code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import copy
import logging
import numpy
import theano
from theano import tensor
from theano.gradient import disconnected_grad
from blocks.bricks import (
Bias, Identity, Initializable, MLP, Tanh, Softmax, Random)
from blocks.bricks.attention import SequenceContentAttention
from blocks.bricks.base import application
from blocks.bricks.recurrent import (
BaseRecurrent, RecurrentStack, recurrent)
from blocks_extras.bricks.sequence_generator2 import (
SequenceGenerator, SoftmaxReadout, Feedback)
from blocks_extras.bricks.attention2 import AttentionRecurrent
from blocks.bricks.lookup import LookupTable
from blocks.graph import ComputationGraph
from blocks.model import Model
from blocks.filter import VariableFilter
from blocks.serialization import load_parameters
from blocks.utils import dict_union, dict_subset
from lvsr.bricks import (
Encoder, InitializableSequence, EditDistanceReward, BleuReward,
RecurrentWithExtraInput, ConvEncoder)
from lvsr.bricks.readouts import (
ReinforceReadout, CriticReadout, ActorCriticReadout)
from lvsr.bricks.attention import SequenceContentAndConvAttention
from lvsr.utils import global_push_initialization_config
from lvsr.beam_search import BeamSearch
logger = logging.getLogger(__name__)
class Bottom(Initializable):
"""
A bottom class that mergers possibly many input sources into one
sequence.
The bottom is responsible for allocating variables for single and
multiple sequences in a batch.
In speech recognition this will typically be the identity transformation
ro a small MLP.
Attributes
----------
vector_input_sources : list of str
discrete_input_sources : list of str
Parameters
----------
input_dims : dict
Maps input source to their dimensions, only for vector sources.
input_num_chars : dict
Maps input source to their range of values, only for discrete sources.
"""
vector_input_sources = []
discrete_input_sources = []
def __init__(self, input_dims, input_num_chars, **kwargs):
super(Bottom, self).__init__(**kwargs)
self.input_dims = input_dims
self.input_num_chars = input_num_chars
class LookupBottom(Bottom):
discrete_input_sources = ['inputs']
def __init__(self, dim, **kwargs):
super(LookupBottom, self).__init__(**kwargs)
self.dim = dim
self.mask = tensor.matrix('inputs_mask')
self.batch_inputs = {
'inputs': tensor.lmatrix('inputs')}
self.single_inputs = {
'inputs': tensor.lvector('inputs')}
self.children = [LookupTable(self.input_num_chars['inputs'], self.dim)]
@application(inputs=['inputs'], outputs=['outputs'])
def apply(self, inputs):
return self.children[0].apply(inputs)
def batch_size(self, inputs):
return inputs.shape[1]
def num_time_steps(self, inputs):
return inputs.shape[0]
def single_to_batch_inputs(self, inputs):
# Note: this code supports many inputs, which are all sequences
inputs = {n: v[:, None, :] if v.ndim == 2 else v[:, None]
for (n, v) in inputs.items()}
inputs_mask = tensor.ones((self.num_time_steps(**inputs),
self.batch_size(**inputs)))
return inputs, inputs_mask
def get_dim(self, name):
if name == 'outputs':
return self.dim
return super(LookupBottom, self).get_dim(name)
class SpeechBottom(Bottom):
"""
A Bottom specialized for speech recognition that accets only one input
- the recordings.
"""
vector_input_sources = ['recordings']
def __init__(self, activation, dims=None, **kwargs):
super(SpeechBottom, self).__init__(**kwargs)
self.num_features = self.input_dims['recordings']
if activation is None:
activation = Tanh()
if dims:
child = MLP([activation] * len(dims),
[self.num_features] + dims,
name="bottom")
self.output_dim = child.output_dim
else:
child = Identity(name='bottom')
self.output_dim = self.num_features
self.children.append(child)
self.mask = tensor.matrix('recordings_mask')
self.batch_inputs = {
'recordings': tensor.tensor3('recordings')}
self.single_inputs = {
'recordings': tensor.matrix('recordings')}
@application(inputs=['recordings'], outputs=['outputs'])
def apply(self, recordings):
return self.children[0].apply(recordings)
def batch_size(self, recordings):
return recordings.shape[1]
def num_time_steps(self, recordings):
return recordings.shape[0]
def single_to_batch_inputs(self, inputs):
# Note: this code supports many inputs, which are all sequences
inputs = {n: v[:, None, :] if v.ndim == 2 else v[:, None]
for (n, v) in inputs.items()}
inputs_mask = tensor.ones((self.num_time_steps(**inputs),
self.batch_size(**inputs)))
return inputs, inputs_mask
def get_dim(self, name):
if name == 'outputs':
return self.output_dim
return super(SpeechBottom, self).get_dim(name)
def _downsize_dim(value, times):
if isinstance(value, int):
return value / times
elif isinstance(value, list):
value = list(value)
for i in range(len(value)):
value[i] /= times
return value
raise ValueError
def _downsize_config(config, times):
for option in ['dim_dec', 'dim_matcher', 'dim_output_embedding',
'dims_bidir', 'post_merge_dims']:
value = config.get(option)
if value is not None:
config[option] = _downsize_dim(value, times)
for option in ['dim', 'dims']:
value = config['bottom'].get(option)
if value is not None:
config['bottom'][option] = _downsize_dim(value, times)
return config
class EncoderDecoder(Initializable, Random):
"""Encapsulate all reusable logic.
This class plays a few roles: (a) it's a top brick that knows
how to combine bottom, bidirectional and recognizer network, (b)
it has the inputs variables and can build whole computation graphs
starting with them (c) it hides compilation of Theano functions
and initialization of beam search. I find it simpler to have it all
in one place for research code.
Parameters
----------
All defining the structure and the dimensions of the model. Typically
receives everything from the "net" section of the config.
"""
def __init__(self,
input_dims,
input_num_chars,
bos_label, eos_label,
num_labels,
dim_dec, dims_bidir,
enc_transition, dec_transition,
use_states_for_readout,
attention_type,
criterion,
bottom,
lm=None, token_map=None,
bidir=True, window_size=None,
max_length=None, subsample=None,
dims_top=None, extra_input_dim=None,
prior=None, conv_n=None,
post_merge_activation=None,
post_merge_dims=None,
dim_matcher=None,
embed_outputs=True,
dim_output_embedding=None,
reuse_bottom_lookup_table=False,
dec_stack=1,
conv_num_filters=1,
data_prepend_eos=True,
# softmax is the default set in SequenceContentAndConvAttention
energy_normalizer=None,
# for speech this is the approximate phoneme duration in frames
max_decoded_length_scale=1,
# for criterions involving generation of outputs, whether
# or not they should be generated by the recognizer itself
generate_predictions=True,
compute_targets=True,
extra_generation_steps=3,
**kwargs):
all_arguments = copy.deepcopy(locals())
all_arguments.update(copy.deepcopy(kwargs))
del all_arguments['kwargs']
del all_arguments['self']
if post_merge_activation is None:
post_merge_activation = Tanh()
super(EncoderDecoder, self).__init__(**kwargs)
self.bos_label = bos_label
self.eos_label = eos_label
self.data_prepend_eos = data_prepend_eos
self.rec_weights_init = None
self.initial_states_init = None
self.enc_transition = enc_transition
self.dec_transition = dec_transition
self.dec_stack = dec_stack
self.criterion = criterion
self.generate_predictions = generate_predictions
self.extra_generation_steps = extra_generation_steps
self.compute_targets = compute_targets
self.max_decoded_length_scale = max_decoded_length_scale
post_merge_activation = post_merge_activation
if dim_matcher is None:
dim_matcher = dim_dec
# The bottom part, before BiRNN
bottom_class = bottom.pop('bottom_class')
bottom = bottom_class(
input_dims=input_dims, input_num_chars=input_num_chars,
name='bottom',
**bottom)
# BiRNN
if dims_bidir:
if not subsample:
subsample = [1] * len(dims_bidir)
encoder = Encoder(self.enc_transition, dims_bidir,
bottom.get_dim(bottom.apply.outputs[0]),
subsample, bidir=bidir)
elif window_size:
encoder = ConvEncoder(
max_length, bottom.get_dim(bottom.apply.outputs[0]), window_size)
else:
raise ValueError("Don't know which Encoder to use")
dim_encoded = encoder.get_dim(encoder.apply.outputs[0])
# The top part, on top of BiRNN but before the attention
if dims_top:
top = MLP([Tanh()],
[dim_encoded] + dims_top + [dim_encoded], name="top")
else:
top = Identity(name='top')
if dec_stack == 1:
transition = self.dec_transition(
dim=dim_dec, activation=Tanh(), name="transition")
else:
assert not extra_input_dim
transitions = [self.dec_transition(dim=dim_dec,
activation=Tanh(),
name="transition_{}".format(trans_level))
for trans_level in xrange(dec_stack)]
transition = RecurrentStack(transitions=transitions,
skip_connections=True)
# Choose attention mechanism according to the configuration
if attention_type == "content":
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=dim_encoded, match_dim=dim_matcher,
name="cont_att")
elif attention_type == "content_and_conv":
attention = SequenceContentAndConvAttention(
state_names=transition.apply.states,
conv_n=conv_n,
conv_num_filters=conv_num_filters,
attended_dim=dim_encoded, match_dim=dim_matcher,
prior=prior,
energy_normalizer=energy_normalizer,
name="conv_att")
else:
raise ValueError("Unknown attention type {}"
.format(attention_type))
if not embed_outputs:
raise ValueError("embed_outputs=False is not supported any more")
if not reuse_bottom_lookup_table:
embedding = LookupTable(num_labels + 1,
dim_dec if
dim_output_embedding is None
else dim_output_embedding)
else:
embedding = bottom.children[0]
feedback = Feedback(
embedding=embedding,
output_names=[s for s in transition.apply.sequences
if s != 'mask'])
# Create a readout
readout_config = dict(
num_tokens=num_labels,
input_names=(transition.apply.states if use_states_for_readout else [])
+ [attention.take_glimpses.outputs[0]],
name="readout")
if post_merge_dims:
readout_config['merge_dim'] = post_merge_dims[0]
readout_config['post_merge'] = InitializableSequence([
Bias(post_merge_dims[0]).apply,
post_merge_activation.apply,
MLP([post_merge_activation] * (len(post_merge_dims) - 1) + [Identity()],
# MLP was designed to support Maxout is activation
# (because Maxout in a way is not one). However
# a single layer Maxout network works with the trick below.
# For deeper Maxout network one has to use the
# Sequence brick.
[d//getattr(post_merge_activation, 'num_pieces', 1)
for d in post_merge_dims] + [num_labels]).apply,
], name='post_merge')
if 'reward' in criterion and criterion['name'] != 'log_likelihood':
if criterion['reward'] == 'edit_distance':
readout_config['reward_brick'] = EditDistanceReward(
self.bos_label, self.eos_label)
elif criterion['reward'] == 'delta_edit_distance':
readout_config['reward_brick'] = EditDistanceReward(
self.bos_label, self.eos_label, deltas=True)
elif criterion['reward'] == 'bleu':
readout_config['reward_brick'] = BleuReward(
self.bos_label, self.eos_label, deltas=False)
elif criterion['reward'] == 'delta_bleu':
readout_config['reward_brick'] = BleuReward(
self.bos_label, self.eos_label, deltas=True)
else:
raise ValueError("Unknown reward type")
if criterion['name'] == 'log_likelihood':
readout_class = SoftmaxReadout
elif criterion['name'] == 'critic':
readout_class = CriticReadout
criterion_copy = dict(criterion)
del criterion_copy['name']
readout_config.update(**criterion_copy)
elif criterion['name'] == 'reinforce':
readout_class = ReinforceReadout
readout_config['merge_names'] = list(readout_config['input_names'])
readout_config['entropy'] = criterion.get('entropy')
readout_config['input_names'] += ['attended', 'attended_mask']
elif criterion['name'] in ['sarsa', 'actor_critic']:
readout_class = ActorCriticReadout
if criterion['name'] == 'actor_critic':
critic_arguments = dict(all_arguments)
# No worries, critic will not compute log likelihood values.
# We
critic_arguments['criterion'] = {
'name': 'critic',
'value_softmax': criterion.get('value_softmax'),
'same_value_for_wrong': criterion.get('same_value_for_wrong'),
'groundtruth_word_bonus': criterion.get('groundtruth_word_bonus'),
'dueling_outputs': criterion.get('dueling_outputs')}
critic_arguments['name'] = 'critic'
if criterion.get('critic_uses_actor_states'):
critic_arguments['extra_input_dim'] = dim_dec
if (criterion.get('value_softmax')
or criterion.get('same_value_for_wrong')
or criterion.get('dueling_outputs')):
# Add an extra output for the critic
critic_arguments['num_labels'] = num_labels + 1
if criterion.get('force_bidir'):
critic_arguments['dims_bidir'] = [dim_dec]
critic_arguments['reuse_bottom_lookup_table'] = True
critic_arguments['input_num_chars'] = {'inputs': num_labels}
if criterion.get('downsize_critic'):
critic_arguments = _downsize_config(
critic_arguments, criterion['downsize_critic'])
critic = EncoderDecoder(**critic_arguments)
readout_config['critic'] = critic
readout_config['merge_names'] = list(readout_config['input_names'])
readout_config['freeze_actor'] = criterion.get('freeze_actor')
readout_config['freeze_critic'] = criterion.get('freeze_critic')
readout_config['critic_uses_actor_states'] = criterion.get('critic_uses_actor_states')
readout_config['critic_uses_groundtruth'] = criterion.get('critic_uses_groundtruth')
readout_config['critic_burnin_steps'] = criterion.get('critic_burnin_steps')
readout_config['critic_loss'] = criterion.get('critic_loss')
readout_config['discount'] = criterion.get('discount')
readout_config['entropy_reward_coof'] = criterion.get('entropy_reward_coof')
readout_config['cross_entropy_reward_coof'] = criterion.get('cross_entropy_reward_coof')
readout_config['value_penalty'] = criterion.get('value_penalty')
readout_config['value_penalty_type'] = criterion.get('value_penalty_type')
readout_config['critic_policy_t'] = criterion.get('critic_policy_t')
readout_config['bos_token'] = bos_label
readout_config['accumulate_outputs'] = criterion.get('accumulate_outputs')
readout_config['use_value_biases'] = criterion.get('use_value_biases')
readout_config['actor_grad_estimate'] = criterion.get('actor_grad_estimate')
readout_config['input_names'] += ['attended', 'attended_mask']
# Note, that settings below are for the "clean" mode.
# When get_cost_graph() is run with training=True, they
# are temporarily overriden with the "real" settings from
# "criterion"
readout_config['compute_targets'] = True
readout_config['trpo_coef'] = 0.0
readout_config['solve_bellman'] = True
else:
raise ValueError("Unknown criterion {}".format(criterion['name']))
readout = readout_class(**readout_config)
if lm:
raise ValueError("LM is currently not supported")
recurrent = AttentionRecurrent(transition, attention)
if extra_input_dim:
recurrent = RecurrentWithExtraInput(
recurrent, "extra_inputs", extra_input_dim, name="with_extra_inputs")
generator = SequenceGenerator(
recurrent=recurrent, readout=readout, feedback=feedback,
name="generator")
# Remember child bricks
self.encoder = encoder
self.bottom = bottom
self.top = top
self.generator = generator
self.softmax = Softmax()
self.children = [encoder, top, bottom, generator, self.softmax]
# Create input variables
self.inputs = self.bottom.batch_inputs
self.inputs_mask = self.bottom.mask
self.labels = tensor.lmatrix('labels')
self.labels_mask = tensor.matrix("labels_mask")
self.predicted_labels = tensor.lmatrix('predicted_labels')
self.predicted_mask = tensor.matrix('predicted_mask')
self.prefix_labels = tensor.lmatrix('prefix_labels')
self.prefix_steps = tensor.lscalar('prefix_steps')
self.single_inputs = self.bottom.single_inputs
self.single_labels = tensor.lvector('labels')
self.single_predicted_labels = tensor.lvector('predicted_labels')
self.n_steps = tensor.lscalar('n_steps')
# Configure mixed_generate
if criterion['name'] == 'actor_critic':
critic = self.generator.readout.critic
self.mixed_generate.sequences = []
self.mixed_generate.states = (
['step'] +
self.generator.recurrent.apply.states +
['critic_' + name for name in critic.generator.recurrent.apply.states])
self.mixed_generate.outputs = (
['samples', 'step'] +
self.generator.recurrent.apply.outputs +
['critic_' + name for name in critic.generator.recurrent.apply.outputs])
self.mixed_generate.contexts = (
self.generator.recurrent.apply.contexts +
['critic_' + name for name in critic.generator.recurrent.apply.contexts]
+ ['groundtruth', 'groundtruth_mask'])
self.initial_states.outputs = self.mixed_generate.states
self.prefix_generate.sequences = []
self.prefix_generate.states = ['step'] + self.generator.recurrent.apply.states
self.prefix_generate.outputs = ['samples', 'step'] + self.generator.recurrent.apply.outputs
self.prefix_generate.contexts = self.generator.recurrent.apply.contexts
def push_initialization_config(self):
super(EncoderDecoder, self).push_initialization_config()
if self.rec_weights_init:
rec_weights_config = {'weights_init': self.rec_weights_init,
'recurrent_weights_init': self.rec_weights_init}
global_push_initialization_config(self,
rec_weights_config,
BaseRecurrent)
if self.initial_states_init:
global_push_initialization_config(self,
{'initial_states_init': self.initial_states_init})
@application
def costs(self, **kwargs):
# pop inputs we know about
prediction = kwargs.pop('prediction')
prediction_mask = kwargs.pop('prediction_mask')
groundtruth = kwargs.pop('groundtruth', None)
groundtruth_mask = kwargs.pop('groundtruth_mask', None)
inputs_mask = kwargs.pop('inputs_mask')
extra_inputs = kwargs.pop('extra_inputs', None)
# the rest is for bottom
bottom_processed = self.bottom.apply(**kwargs)
encoded, encoded_mask = self.encoder.apply(
input_=bottom_processed, mask=inputs_mask)
encoded = self.top.apply(encoded)
costs_kwargs = dict(
prediction=prediction, prediction_mask=prediction_mask,
groundtruth=groundtruth, groundtruth_mask=groundtruth_mask,
attended=encoded, attended_mask=encoded_mask)
if extra_inputs:
costs_kwargs['extra_inputs'] = extra_inputs
return self.generator.costs(**costs_kwargs)
@application
def generate(self, return_initial_states=False, **kwargs):
inputs_mask = kwargs.pop('inputs_mask')
n_steps = kwargs.pop('n_steps')
encoded, encoded_mask = self.encoder.apply(
input_=self.bottom.apply(**kwargs),
mask=inputs_mask)
encoded = self.top.apply(encoded)
return self.generator.generate(
n_steps=n_steps if n_steps is not None else self.n_steps,
batch_size=encoded.shape[1],
attended=encoded,
attended_mask=encoded_mask,
return_initial_states=return_initial_states,
as_dict=True)
@recurrent
def prefix_generate(self, return_initial_states=True, **kwargs):
step = kwargs.pop('step')
sampling_inputs = dict_subset(
kwargs, self.generator.readout.sample.inputs)
samples, scores = self.generator.readout.sample(**sampling_inputs)
prefix_mask = tensor.lt(step, self.prefix_steps)
samples = (prefix_mask * self.prefix_labels[step[0]]
+ (1 - prefix_mask) * samples)
feedback = self.generator.feedback.apply(samples, as_dict=True)
states_contexts = dict_subset(
kwargs,
self.generator.recurrent.apply.states
+ self.generator.recurrent.apply.contexts)
states_outputs = self.generator.recurrent.apply(
as_dict=True, iterate=False,
**dict_union(feedback, states_contexts))
return ([samples, step + 1]
+ states_outputs.values())
@recurrent
def mixed_generate(self, return_initial_states=True, **kwargs):
critic = self.generator.readout.critic
groundtruth = kwargs.pop('groundtruth')
groundtruth_mask = kwargs.pop('groundtruth_mask')
step = kwargs.pop('step')
sampling_inputs = dict_subset(
kwargs, self.generator.readout.sample.inputs)
actor_scores = self.generator.readout.scores(**sampling_inputs)
critic_inputs = {
name: kwargs['critic_' + name]
for name in critic.generator.readout.merge_names}
critic_outputs = critic.generator.readout.outputs(
groundtruth, groundtruth_mask, **critic_inputs)
epsilon = numpy.array(self.generator.readout.epsilon,
dtype=theano.config.floatX)
actor_probs = tensor.exp(actor_scores)
# This is a poor man's 1-hot argmax
critic_probs = self.softmax.apply(critic_outputs * 1000)
probs = (actor_probs * (tensor.constant(1) - epsilon)
+ critic_probs * epsilon)
x = self.theano_rng.uniform(size=(probs.shape[0],))
samples = (tensor.gt(x[:, None], tensor.cumsum(probs, axis=1))
.astype(theano.config.floatX)
.sum(axis=1)
.astype('int64'))
samples = tensor.minimum(samples, probs.shape[1] - 1)
actor_feedback = self.generator.feedback.apply(samples, as_dict=True)
actor_states_contexts = dict_subset(
kwargs,
self.generator.recurrent.apply.states
+ self.generator.recurrent.apply.contexts)
actor_states_outputs = self.generator.recurrent.apply(
as_dict=True, iterate=False,
**dict_union(actor_feedback, actor_states_contexts))
critic_feedback = critic.generator.feedback.apply(samples, as_dict=True)
critic_states_contexts = {
name: kwargs['critic_' + name]
for name in
critic.generator.recurrent.apply.states
+ critic.generator.recurrent.apply.contexts}
critic_apply_kwargs = dict(
as_dict=True, iterate=False,
**dict_union(critic_feedback, critic_states_contexts))
if self.generator.readout.critic_uses_actor_states:
critic_apply_kwargs['extra_inputs'] = actor_states_outputs['states']
critic_states_outputs = critic.generator.recurrent.apply(**critic_apply_kwargs)
return ([samples, step + 1]
+ actor_states_outputs.values()
+ critic_states_outputs.values())
@application
def initial_states(self, batch_size, *args, **kwargs):
critic = self.generator.readout.critic
result = ([tensor.zeros((batch_size,), dtype='int64')]
+ self.generator.initial_states(batch_size, *args, **kwargs))
critic_kwargs = {name[7:]: kwargs[name] for name in kwargs if name.startswith('critic_')}
# This method can be called for two different recurrent application method,
# "mixed_generate" and "prefix_generate". That's why this dirty hack is needed.
if critic_kwargs:
result += critic.generator.initial_states(batch_size, **critic_kwargs)
return result
def get_dim(self, name):
critic = self.generator.readout.critic
if name.startswith('critic_'):
return critic.generator.get_dim(name[7:])
elif name == 'step':
return 0
else:
return self.generator.get_dim(name)
@application
def mask_for_prediction(self, prediction, groundtruth_mask=None,
extra_generation_steps=None):
prediction_mask = tensor.lt(
tensor.cumsum(tensor.eq(prediction, self.eos_label)
.astype(theano.config.floatX), axis=0),
1).astype(theano.config.floatX)
prediction_mask = tensor.roll(prediction_mask, 1, 0)
prediction_mask = tensor.set_subtensor(
prediction_mask[0, :], tensor.ones_like(prediction_mask[0, :]))
if groundtruth_mask:
max_lengths = groundtruth_mask.sum(axis=0) + extra_generation_steps
prediction_mask *= tensor.lt(
tensor.arange(prediction.shape[0])[:, None], max_lengths[None, :])
return prediction_mask
def load_params(self, path):
cg = self.get_cost_graph()
with open(path, 'r') as src:
param_values = load_parameters(src)
Model(cg.outputs).set_parameter_values(param_values)
def get_generate_graph(self, use_mask=True, n_steps=None,
return_initial_states=False,
use_softmax_t=False):
if use_softmax_t:
self.generator.readout.softmax_t = self.criterion.get('softmax_t', 1.0)
inputs_mask = None
if use_mask:
inputs_mask = self.inputs_mask
result = self.generate(
n_steps=n_steps, inputs_mask=inputs_mask,
return_initial_states=return_initial_states,
**self.inputs)
self.generator.readout.softmax_t = 1.
return result
def get_mixed_generate_graph(self, n_steps=None,
return_initial_states=False):
critic = self.generator.readout.critic
attended, attended_mask = self.encoder.apply(
input_=self.bottom.apply(**self.inputs),
mask=self.inputs_mask)
attended = self.top.apply(attended)
critic_attended, critic_attended_mask = critic.encoder.apply(
input_=critic.bottom.apply(inputs=self.labels),
mask=self.labels_mask)
critic_attended = critic.top.apply(critic_attended)
return self.mixed_generate(
n_steps=n_steps, batch_size=attended.shape[1],
return_initial_states=return_initial_states, as_dict=True,
attended=attended, attended_mask=attended_mask,
critic_attended=critic_attended, critic_attended_mask=critic_attended_mask,
groundtruth=self.labels, groundtruth_mask=self.labels_mask)
def get_prefix_generate_graph(self, n_steps=None,
return_initial_states=False):
attended, attended_mask = self.encoder.apply(
input_=self.bottom.apply(**self.inputs),
mask=self.inputs_mask)
attended = self.top.apply(attended)
return self.prefix_generate(
n_steps=n_steps, batch_size=attended.shape[1],
return_initial_states=return_initial_states, as_dict=True,
attended=attended, attended_mask=attended_mask)
def get_cost_graph(self, batch=True, use_prediction=False,
training=False, groundtruth_as_predictions=False,
with_mixed_generation=False):
# "use_predictions" means use the Theano input variable
# for predictions.
readout = self.generator.readout
if training and self.criterion['name'] == 'actor_critic':
logger.debug("Switching to training mode")
readout.compute_targets = self.compute_targets
readout.trpo_coef = self.criterion.get('trpo_coef', 0.0)
if 'solve_bellman' in self.criterion:
readout.solve_bellman = self.criterion['solve_bellman']
if with_mixed_generation and 'epsilon' in self.criterion:
readout.epsilon = self.criterion['epsilon']
if batch:
inputs, inputs_mask = self.inputs, self.inputs_mask
groundtruth, groundtruth_mask = self.labels, self.labels_mask
prediction, prediction_mask = self.predicted_labels, self.predicted_mask
else:
inputs, inputs_mask = self.bottom.single_to_batch_inputs(
self.single_inputs)
groundtruth = self.single_labels[:, None]
groundtruth_mask = self.mask_for_prediction(groundtruth)
prediction = self.single_predicted_labels[:, None]
prediction_mask = self.mask_for_prediction(prediction)
if self.cost_involves_generation() and not groundtruth_as_predictions:
if ((training and self.generate_predictions) or
(not training and not use_prediction)):
generation_routine = (self.get_mixed_generate_graph
if with_mixed_generation
else self.get_generate_graph)
generated = generation_routine(
n_steps=self.labels.shape[0] + self.extra_generation_steps)
prediction = disconnected_grad(generated['samples'])
prediction_mask = self.mask_for_prediction(
prediction, groundtruth_mask, self.extra_generation_steps)
else:
logger.debug("Using provided predictions")
cost = self.costs(inputs_mask=inputs_mask,
prediction=prediction, prediction_mask=prediction_mask,
groundtruth=groundtruth, groundtruth_mask=groundtruth_mask,
**inputs)
else:
if use_prediction:
cost = self.costs(inputs_mask=inputs_mask,
prediction=prediction, prediction_mask=prediction_mask,
**inputs)
else:
cost = self.costs(inputs_mask=inputs_mask,
prediction=groundtruth, prediction_mask=groundtruth_mask,
groundtruth=groundtruth, groundtruth_mask=groundtruth_mask,
**inputs)
cost_cg = ComputationGraph(cost)
# This *has to* be done only when
# "training" or "with_mixed_generation" is True,
# but it does not hurt to do it every time.
logger.debug("Switching back to the normal mode")
readout = self.generator.readout
readout.compute_targets = True
readout.trpo_coef = 0.0
readout.solve_bellman = True
readout.epsilon = 0.
return cost_cg
def analyze(self, inputs, groundtruth, prediction):
"""Compute cost and aligment."""
if not hasattr(self, "_analyze"):
input_variables = list(self.single_inputs.values())
input_variables.append(self.single_labels)
input_variables.append(self.single_predicted_labels)
cg = self.get_cost_graph(batch=False, use_prediction=True)
costs = cg.outputs[0]
weights, = VariableFilter(
bricks=[self.generator], name="weights")(cg)
energies = VariableFilter(
bricks=[self.generator], name="energies")(cg)
energies_output = [energies[0][:, 0, :] if energies
else tensor.zeros_like(weights)]
self._analyze = theano.function(
input_variables,
[costs[0], weights[:, 0, :]] + energies_output,
on_unused_input='warn')
input_values_dict = dict(inputs)
input_values_dict['labels'] = groundtruth
input_values_dict['predicted_labels'] = prediction
return self._analyze(**input_values_dict)
def init_beam_search(self, beam_size):
"""Compile beam search and set the beam size.
See Blocks issue #500.
"""
if hasattr(self, '_beam_search') and self.beam_size == beam_size:
# Only recompile if the user wants a different beam size
return
self.beam_size = beam_size
generated = self.get_generate_graph(use_mask=False, n_steps=3)
cg = ComputationGraph(generated.values())
samples, = VariableFilter(
applications=[self.generator.generate], name="samples")(cg)
self._beam_search = BeamSearch(beam_size, samples)
self._beam_search.compile()
def beam_search(self, inputs, **kwargs):
# When a recognizer is unpickled, self.beam_size is available
# but beam search has to be recompiled.
self.init_beam_search(self.beam_size)
inputs = dict(inputs)
max_length = int(self.bottom.num_time_steps(**inputs) /
self.max_decoded_length_scale)
search_inputs = {}
for var in self.inputs.values():
search_inputs[var] = inputs.pop(var.name)[:, numpy.newaxis, ...]
if inputs:
raise Exception(
'Unknown inputs passed to beam search: {}'.format(
inputs.keys()))
outputs, search_costs = self._beam_search.search(
search_inputs, self.eos_label,
max_length,
ignore_first_eol=self.data_prepend_eos,
**kwargs)
return outputs, search_costs
def init_generate(self):
generated = self.get_generate_graph(use_mask=False)
cg = ComputationGraph(generated['samples'])
self._do_generate = cg.get_theano_function()
def sample(self, inputs, n_steps=None):
if not hasattr(self, '_do_generate'):
self.init_generate()
batch, unused_mask = self.bottom.single_to_batch_inputs(inputs)
batch['n_steps'] = n_steps if n_steps is not None \
else int(self.bottom.num_time_steps(**batch) /
self.max_decoded_length_scale)
sample = self._do_generate(**batch)[0]
sample = list(sample[:, 0])
if self.eos_label in sample:
sample = sample[:sample.index(self.eos_label) + 1]
return sample
def __getstate__(self):
state = dict(self.__dict__)
for attr in ['_analyze', '_beam_search']:
state.pop(attr, None)
return state
def __setstate__(self, state):
self.__dict__.update(state)
# To use bricks used on a GPU first on a CPU later
try:
emitter = self.generator.readout.emitter
del emitter._theano_rng
except:
pass
def cost_involves_generation(self):
return self.criterion['name'] in ['reinforce', 'sarsa', 'actor_critic']
| [
"logging.getLogger",
"theano.tensor.exp",
"lvsr.bricks.attention.SequenceContentAndConvAttention",
"theano.tensor.lscalar",
"theano.tensor.roll",
"blocks.bricks.attention.SequenceContentAttention",
"lvsr.beam_search.BeamSearch",
"theano.tensor.zeros_like",
"numpy.array",
"lvsr.utils.global_push_in... | [((1233, 1260), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1250, 1260), False, 'import logging\n'), ((2675, 2726), 'blocks.bricks.base.application', 'application', ([], {'inputs': "['inputs']", 'outputs': "['outputs']"}), "(inputs=['inputs'], outputs=['outputs'])\n", (2686, 2726), False, 'from blocks.bricks.base import application\n'), ((4476, 4531), 'blocks.bricks.base.application', 'application', ([], {'inputs': "['recordings']", 'outputs': "['outputs']"}), "(inputs=['recordings'], outputs=['outputs'])\n", (4487, 4531), False, 'from blocks.bricks.base import application\n'), ((2402, 2430), 'theano.tensor.matrix', 'tensor.matrix', (['"""inputs_mask"""'], {}), "('inputs_mask')\n", (2415, 2430), False, 'from theano import tensor\n'), ((4265, 4297), 'theano.tensor.matrix', 'tensor.matrix', (['"""recordings_mask"""'], {}), "('recordings_mask')\n", (4278, 4297), False, 'from theano import tensor\n'), ((12203, 12306), 'blocks_extras.bricks.sequence_generator2.Feedback', 'Feedback', ([], {'embedding': 'embedding', 'output_names': "[s for s in transition.apply.sequences if s != 'mask']"}), "(embedding=embedding, output_names=[s for s in transition.apply.\n sequences if s != 'mask'])\n", (12211, 12306), False, 'from blocks_extras.bricks.sequence_generator2 import SequenceGenerator, SoftmaxReadout, Feedback\n'), ((18815, 18856), 'blocks_extras.bricks.attention2.AttentionRecurrent', 'AttentionRecurrent', (['transition', 'attention'], {}), '(transition, attention)\n', (18833, 18856), False, 'from blocks_extras.bricks.attention2 import AttentionRecurrent\n'), ((19040, 19136), 'blocks_extras.bricks.sequence_generator2.SequenceGenerator', 'SequenceGenerator', ([], {'recurrent': 'recurrent', 'readout': 'readout', 'feedback': 'feedback', 'name': '"""generator"""'}), "(recurrent=recurrent, readout=readout, feedback=feedback,\n name='generator')\n", (19057, 19136), False, 'from blocks_extras.bricks.sequence_generator2 import SequenceGenerator, SoftmaxReadout, Feedback\n'), ((19332, 19341), 'blocks.bricks.Softmax', 'Softmax', ([], {}), '()\n', (19339, 19341), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((19562, 19586), 'theano.tensor.lmatrix', 'tensor.lmatrix', (['"""labels"""'], {}), "('labels')\n", (19576, 19586), False, 'from theano import tensor\n'), ((19614, 19642), 'theano.tensor.matrix', 'tensor.matrix', (['"""labels_mask"""'], {}), "('labels_mask')\n", (19627, 19642), False, 'from theano import tensor\n'), ((19676, 19710), 'theano.tensor.lmatrix', 'tensor.lmatrix', (['"""predicted_labels"""'], {}), "('predicted_labels')\n", (19690, 19710), False, 'from theano import tensor\n'), ((19741, 19772), 'theano.tensor.matrix', 'tensor.matrix', (['"""predicted_mask"""'], {}), "('predicted_mask')\n", (19754, 19772), False, 'from theano import tensor\n'), ((19802, 19833), 'theano.tensor.lmatrix', 'tensor.lmatrix', (['"""prefix_labels"""'], {}), "('prefix_labels')\n", (19816, 19833), False, 'from theano import tensor\n'), ((19862, 19892), 'theano.tensor.lscalar', 'tensor.lscalar', (['"""prefix_steps"""'], {}), "('prefix_steps')\n", (19876, 19892), False, 'from theano import tensor\n'), ((19978, 20002), 'theano.tensor.lvector', 'tensor.lvector', (['"""labels"""'], {}), "('labels')\n", (19992, 20002), False, 'from theano import tensor\n'), ((20042, 20076), 'theano.tensor.lvector', 'tensor.lvector', (['"""predicted_labels"""'], {}), "('predicted_labels')\n", (20056, 20076), False, 'from theano import tensor\n'), ((20100, 20125), 'theano.tensor.lscalar', 'tensor.lscalar', (['"""n_steps"""'], {}), "('n_steps')\n", (20114, 20125), False, 'from theano import tensor\n'), ((23840, 23897), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', 'self.generator.readout.sample.inputs'], {}), '(kwargs, self.generator.readout.sample.inputs)\n', (23851, 23897), False, 'from blocks.utils import dict_union, dict_subset\n'), ((24008, 24042), 'theano.tensor.lt', 'tensor.lt', (['step', 'self.prefix_steps'], {}), '(step, self.prefix_steps)\n', (24017, 24042), False, 'from theano import tensor\n'), ((24253, 24358), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', '(self.generator.recurrent.apply.states + self.generator.recurrent.apply.\n contexts)'], {}), '(kwargs, self.generator.recurrent.apply.states + self.generator.\n recurrent.apply.contexts)\n', (24264, 24358), False, 'from blocks.utils import dict_union, dict_subset\n'), ((24920, 24977), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', 'self.generator.readout.sample.inputs'], {}), '(kwargs, self.generator.readout.sample.inputs)\n', (24931, 24977), False, 'from blocks.utils import dict_union, dict_subset\n'), ((25333, 25404), 'numpy.array', 'numpy.array', (['self.generator.readout.epsilon'], {'dtype': 'theano.config.floatX'}), '(self.generator.readout.epsilon, dtype=theano.config.floatX)\n', (25344, 25404), False, 'import numpy\n'), ((25457, 25481), 'theano.tensor.exp', 'tensor.exp', (['actor_scores'], {}), '(actor_scores)\n', (25467, 25481), False, 'from theano import tensor\n'), ((25964, 26007), 'theano.tensor.minimum', 'tensor.minimum', (['samples', '(probs.shape[1] - 1)'], {}), '(samples, probs.shape[1] - 1)\n', (25978, 26007), False, 'from theano import tensor\n'), ((26119, 26224), 'blocks.utils.dict_subset', 'dict_subset', (['kwargs', '(self.generator.recurrent.apply.states + self.generator.recurrent.apply.\n contexts)'], {}), '(kwargs, self.generator.recurrent.apply.states + self.generator.\n recurrent.apply.contexts)\n', (26130, 26224), False, 'from blocks.utils import dict_union, dict_subset\n'), ((28558, 28592), 'theano.tensor.roll', 'tensor.roll', (['prediction_mask', '(1)', '(0)'], {}), '(prediction_mask, 1, 0)\n', (28569, 28592), False, 'from theano import tensor\n'), ((34269, 34291), 'blocks.graph.ComputationGraph', 'ComputationGraph', (['cost'], {}), '(cost)\n', (34285, 34291), False, 'from blocks.graph import ComputationGraph\n'), ((36444, 36474), 'lvsr.beam_search.BeamSearch', 'BeamSearch', (['beam_size', 'samples'], {}), '(beam_size, samples)\n', (36454, 36474), False, 'from lvsr.beam_search import BeamSearch\n'), ((37507, 37545), 'blocks.graph.ComputationGraph', 'ComputationGraph', (["generated['samples']"], {}), "(generated['samples'])\n", (37523, 37545), False, 'from blocks.graph import ComputationGraph\n'), ((2483, 2507), 'theano.tensor.lmatrix', 'tensor.lmatrix', (['"""inputs"""'], {}), "('inputs')\n", (2497, 2507), False, 'from theano import tensor\n'), ((2562, 2586), 'theano.tensor.lvector', 'tensor.lvector', (['"""inputs"""'], {}), "('inputs')\n", (2576, 2586), False, 'from theano import tensor\n'), ((2614, 2667), 'blocks.bricks.lookup.LookupTable', 'LookupTable', (["self.input_num_chars['inputs']", 'self.dim'], {}), "(self.input_num_chars['inputs'], self.dim)\n", (2625, 2667), False, 'from blocks.bricks.lookup import LookupTable\n'), ((3889, 3895), 'blocks.bricks.Tanh', 'Tanh', ([], {}), '()\n', (3893, 3895), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((4136, 4159), 'blocks.bricks.Identity', 'Identity', ([], {'name': '"""bottom"""'}), "(name='bottom')\n", (4144, 4159), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((4354, 4382), 'theano.tensor.tensor3', 'tensor.tensor3', (['"""recordings"""'], {}), "('recordings')\n", (4368, 4382), False, 'from theano import tensor\n'), ((4441, 4468), 'theano.tensor.matrix', 'tensor.matrix', (['"""recordings"""'], {}), "('recordings')\n", (4454, 4468), False, 'from theano import tensor\n'), ((8293, 8314), 'copy.deepcopy', 'copy.deepcopy', (['kwargs'], {}), '(kwargs)\n', (8306, 8314), False, 'import copy\n'), ((8465, 8471), 'blocks.bricks.Tanh', 'Tanh', ([], {}), '()\n', (8469, 8471), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((10294, 10314), 'blocks.bricks.Identity', 'Identity', ([], {'name': '"""top"""'}), "(name='top')\n", (10302, 10314), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((10814, 10876), 'blocks.bricks.recurrent.RecurrentStack', 'RecurrentStack', ([], {'transitions': 'transitions', 'skip_connections': '(True)'}), '(transitions=transitions, skip_connections=True)\n', (10828, 10876), False, 'from blocks.bricks.recurrent import BaseRecurrent, RecurrentStack, recurrent\n'), ((11049, 11181), 'blocks.bricks.attention.SequenceContentAttention', 'SequenceContentAttention', ([], {'state_names': 'transition.apply.states', 'attended_dim': 'dim_encoded', 'match_dim': 'dim_matcher', 'name': '"""cont_att"""'}), "(state_names=transition.apply.states, attended_dim=\n dim_encoded, match_dim=dim_matcher, name='cont_att')\n", (11073, 11181), False, 'from blocks.bricks.attention import SequenceContentAttention\n'), ((11948, 12046), 'blocks.bricks.lookup.LookupTable', 'LookupTable', (['(num_labels + 1)', '(dim_dec if dim_output_embedding is None else dim_output_embedding)'], {}), '(num_labels + 1, dim_dec if dim_output_embedding is None else\n dim_output_embedding)\n', (11959, 12046), False, 'from blocks.bricks.lookup import LookupTable\n'), ((18909, 19007), 'lvsr.bricks.RecurrentWithExtraInput', 'RecurrentWithExtraInput', (['recurrent', '"""extra_inputs"""', 'extra_input_dim'], {'name': '"""with_extra_inputs"""'}), "(recurrent, 'extra_inputs', extra_input_dim, name=\n 'with_extra_inputs')\n", (18932, 19007), False, 'from lvsr.bricks import Encoder, InitializableSequence, EditDistanceReward, BleuReward, RecurrentWithExtraInput, ConvEncoder\n'), ((21689, 21763), 'lvsr.utils.global_push_initialization_config', 'global_push_initialization_config', (['self', 'rec_weights_config', 'BaseRecurrent'], {}), '(self, rec_weights_config, BaseRecurrent)\n', (21722, 21763), False, 'from lvsr.utils import global_push_initialization_config\n'), ((21905, 22000), 'lvsr.utils.global_push_initialization_config', 'global_push_initialization_config', (['self', "{'initial_states_init': self.initial_states_init}"], {}), "(self, {'initial_states_init': self.\n initial_states_init})\n", (21938, 22000), False, 'from lvsr.utils import global_push_initialization_config\n'), ((28676, 28715), 'theano.tensor.ones_like', 'tensor.ones_like', (['prediction_mask[0, :]'], {}), '(prediction_mask[0, :])\n', (28692, 28715), False, 'from theano import tensor\n'), ((29115, 29135), 'blocks.serialization.load_parameters', 'load_parameters', (['src'], {}), '(src)\n', (29130, 29135), False, 'from blocks.serialization import load_parameters\n'), ((35493, 35601), 'theano.function', 'theano.function', (['input_variables', '([costs[0], weights[:, 0, :]] + energies_output)'], {'on_unused_input': '"""warn"""'}), "(input_variables, [costs[0], weights[:, 0, :]] +\n energies_output, on_unused_input='warn')\n", (35508, 35601), False, 'import theano\n'), ((36328, 36398), 'blocks.filter.VariableFilter', 'VariableFilter', ([], {'applications': '[self.generator.generate]', 'name': '"""samples"""'}), "(applications=[self.generator.generate], name='samples')\n", (36342, 36398), False, 'from blocks.filter import VariableFilter\n'), ((11301, 11548), 'lvsr.bricks.attention.SequenceContentAndConvAttention', 'SequenceContentAndConvAttention', ([], {'state_names': 'transition.apply.states', 'conv_n': 'conv_n', 'conv_num_filters': 'conv_num_filters', 'attended_dim': 'dim_encoded', 'match_dim': 'dim_matcher', 'prior': 'prior', 'energy_normalizer': 'energy_normalizer', 'name': '"""conv_att"""'}), "(state_names=transition.apply.states, conv_n\n =conv_n, conv_num_filters=conv_num_filters, attended_dim=dim_encoded,\n match_dim=dim_matcher, prior=prior, energy_normalizer=energy_normalizer,\n name='conv_att')\n", (11332, 11548), False, 'from lvsr.bricks.attention import SequenceContentAndConvAttention\n'), ((13643, 13693), 'lvsr.bricks.EditDistanceReward', 'EditDistanceReward', (['self.bos_label', 'self.eos_label'], {}), '(self.bos_label, self.eos_label)\n', (13661, 13693), False, 'from lvsr.bricks import Encoder, InitializableSequence, EditDistanceReward, BleuReward, RecurrentWithExtraInput, ConvEncoder\n'), ((24503, 24540), 'blocks.utils.dict_union', 'dict_union', (['feedback', 'states_contexts'], {}), '(feedback, states_contexts)\n', (24513, 24540), False, 'from blocks.utils import dict_union, dict_subset\n'), ((26375, 26424), 'blocks.utils.dict_union', 'dict_union', (['actor_feedback', 'actor_states_contexts'], {}), '(actor_feedback, actor_states_contexts)\n', (26385, 26424), False, 'from blocks.utils import dict_union, dict_subset\n'), ((26810, 26861), 'blocks.utils.dict_union', 'dict_union', (['critic_feedback', 'critic_states_contexts'], {}), '(critic_feedback, critic_states_contexts)\n', (26820, 26861), False, 'from blocks.utils import dict_union, dict_subset\n'), ((27369, 27411), 'theano.tensor.zeros', 'tensor.zeros', (['(batch_size,)'], {'dtype': '"""int64"""'}), "((batch_size,), dtype='int64')\n", (27381, 27411), False, 'from theano import tensor\n'), ((29144, 29161), 'blocks.model.Model', 'Model', (['cg.outputs'], {}), '(cg.outputs)\n', (29149, 29161), False, 'from blocks.model import Model\n'), ((33288, 33327), 'theano.gradient.disconnected_grad', 'disconnected_grad', (["generated['samples']"], {}), "(generated['samples'])\n", (33305, 33327), False, 'from theano.gradient import disconnected_grad\n'), ((35158, 35213), 'blocks.filter.VariableFilter', 'VariableFilter', ([], {'bricks': '[self.generator]', 'name': '"""weights"""'}), "(bricks=[self.generator], name='weights')\n", (35172, 35213), False, 'from blocks.filter import VariableFilter\n'), ((35258, 35314), 'blocks.filter.VariableFilter', 'VariableFilter', ([], {'bricks': '[self.generator]', 'name': '"""energies"""'}), "(bricks=[self.generator], name='energies')\n", (35272, 35314), False, 'from blocks.filter import VariableFilter\n'), ((10177, 10183), 'blocks.bricks.Tanh', 'Tanh', ([], {}), '()\n', (10181, 10183), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((10429, 10435), 'blocks.bricks.Tanh', 'Tanh', ([], {}), '()\n', (10433, 10435), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((13827, 13890), 'lvsr.bricks.EditDistanceReward', 'EditDistanceReward', (['self.bos_label', 'self.eos_label'], {'deltas': '(True)'}), '(self.bos_label, self.eos_label, deltas=True)\n', (13845, 13890), False, 'from lvsr.bricks import Encoder, InitializableSequence, EditDistanceReward, BleuReward, RecurrentWithExtraInput, ConvEncoder\n'), ((25623, 25641), 'theano.tensor.constant', 'tensor.constant', (['(1)'], {}), '(1)\n', (25638, 25641), False, 'from theano import tensor\n'), ((28884, 28918), 'theano.tensor.arange', 'tensor.arange', (['prediction.shape[0]'], {}), '(prediction.shape[0])\n', (28897, 28918), False, 'from theano import tensor\n'), ((35436, 35462), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['weights'], {}), '(weights)\n', (35453, 35462), False, 'from theano import tensor\n'), ((10627, 10633), 'blocks.bricks.Tanh', 'Tanh', ([], {}), '()\n', (10631, 10633), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((12797, 12821), 'blocks.bricks.Bias', 'Bias', (['post_merge_dims[0]'], {}), '(post_merge_dims[0])\n', (12801, 12821), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((14009, 14065), 'lvsr.bricks.BleuReward', 'BleuReward', (['self.bos_label', 'self.eos_label'], {'deltas': '(False)'}), '(self.bos_label, self.eos_label, deltas=False)\n', (14019, 14065), False, 'from lvsr.bricks import Encoder, InitializableSequence, EditDistanceReward, BleuReward, RecurrentWithExtraInput, ConvEncoder\n'), ((14190, 14245), 'lvsr.bricks.BleuReward', 'BleuReward', (['self.bos_label', 'self.eos_label'], {'deltas': '(True)'}), '(self.bos_label, self.eos_label, deltas=True)\n', (14200, 14245), False, 'from lvsr.bricks import Encoder, InitializableSequence, EditDistanceReward, BleuReward, RecurrentWithExtraInput, ConvEncoder\n'), ((28384, 28421), 'theano.tensor.eq', 'tensor.eq', (['prediction', 'self.eos_label'], {}), '(prediction, self.eos_label)\n', (28393, 28421), False, 'from theano import tensor\n'), ((12950, 12960), 'blocks.bricks.Identity', 'Identity', ([], {}), '()\n', (12958, 12960), False, 'from blocks.bricks import Bias, Identity, Initializable, MLP, Tanh, Softmax, Random\n'), ((25798, 25826), 'theano.tensor.cumsum', 'tensor.cumsum', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (25811, 25826), False, 'from theano import tensor\n')] |
import numpy as np
import matplotlib.pyplot as plt
def plot_hist_sum(data, r_labels, ylabel, xlabel, title, x_tick_labels=None, adjacent_bars=True):
"""
plots Histogram of sum
plots multiple bars
"""
n_rows = len(r_labels)
total_bars_width = 0.8
bar_width = total_bars_width / n_rows
# bins = np.linspace(1, d, d)
bins = np.arange(data.shape[1]) + 1 # d = data.shape[1]
fig, ax = plt.subplots(figsize=(10, 6))
for i in range(n_rows):
if adjacent_bars:
ax.bar(x=bins + (i * bar_width) - total_bars_width / 2 + bar_width / 2,
height=data[i], width=bar_width, align='center', label=r_labels[i])
else: # overlapping\superimposed bars
ax.bar(x=bins, height=data[i], alpha=0.75, width=total_bars_width, align='center', label=r_labels[i])
ax.legend(loc='best')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xticks(bins)
if x_tick_labels is not None:
ax.set_xticklabels(x_tick_labels)
ax.set_title(title)
def plot_hist_count(data, r_labels, ylabel, xlabel, title, x_tick_labels=None, adjacent_bars=True):
"""
plots Histogram of count
"""
n_rows = len(r_labels)
total_bars_width = 0.8
bar_width = total_bars_width / n_rows
# bins = np.linspace(1, d, d)
bins = np.arange(data.shape[1]) + 1 # d = data.shape[1]
fig, ax = plt.subplots(figsize=(10, 6))
if adjacent_bars:
ax.hist(x=data, bins=bins, label=r_labels)
else: # overlapping\superimposed bars
for i in range(n_rows): # alpha=0.5
ax.hist(x=data[i], bins=bins, alpha=0.5, label=r_labels[i])
ax.legend(loc='best')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xticks(bins)
if x_tick_labels is not None:
ax.set_xticklabels(x_tick_labels)
ax.set_title(title)
| [
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((425, 454), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (437, 454), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1427), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1410, 1427), True, 'import matplotlib.pyplot as plt\n'), ((360, 384), 'numpy.arange', 'np.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (369, 384), True, 'import numpy as np\n'), ((1333, 1357), 'numpy.arange', 'np.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (1342, 1357), True, 'import numpy as np\n')] |
""" Tests for the midgard.math.planetary_motion module"""
# Third party imports
import numpy as np
# Midgard imports
from midgard.data import time
from midgard.math import planetary_motion
def t_jd_gps():
"""Time, format julian date, scale gps"""
return time.Time(2457448.5, scale="gps", fmt="jd")
def test_findsun():
sun_pos = planetary_motion.findsun(t_jd_gps())
expected_sun_pos = np.array([-148102175.801620, -7980320.884334, -19534142.160482])
np.testing.assert_allclose(sun_pos, expected_sun_pos, rtol=0, atol=1e-6)
# print('OUTPUT:\n findsun = {:f} {:f} {:f} \n'.format(sun_pos[0], sun_pos[1], sun_pos[2]))
def test_gsdtime_sun():
gstr, slong, sra, sdec = planetary_motion.gsdtime_sun(t_jd_gps())
expected_gstr = np.array([159.228953])
expected_slong = np.array([340.840280])
expected_sra = np.array([342.313290])
expected_sdec = np.array([-7.502975])
np.testing.assert_allclose(gstr, expected_gstr, rtol=0, atol=1e-6)
np.testing.assert_allclose(slong, expected_slong, rtol=0, atol=1e-6)
np.testing.assert_allclose(sra, expected_sra, rtol=0, atol=1e-6)
np.testing.assert_allclose(sdec, expected_sdec, rtol=0, atol=1e-6)
# print(f"OUTPUT:\n gsdtime_sun = {gstr} {slong} {sra} {sdec} \n")
| [
"midgard.data.time.Time",
"numpy.array",
"numpy.testing.assert_allclose"
] | [((266, 309), 'midgard.data.time.Time', 'time.Time', (['(2457448.5)'], {'scale': '"""gps"""', 'fmt': '"""jd"""'}), "(2457448.5, scale='gps', fmt='jd')\n", (275, 309), False, 'from midgard.data import time\n'), ((406, 469), 'numpy.array', 'np.array', (['[-148102175.80162, -7980320.884334, -19534142.160482]'], {}), '([-148102175.80162, -7980320.884334, -19534142.160482])\n', (414, 469), True, 'import numpy as np\n'), ((475, 548), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sun_pos', 'expected_sun_pos'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(sun_pos, expected_sun_pos, rtol=0, atol=1e-06)\n', (501, 548), True, 'import numpy as np\n'), ((761, 783), 'numpy.array', 'np.array', (['[159.228953]'], {}), '([159.228953])\n', (769, 783), True, 'import numpy as np\n'), ((805, 826), 'numpy.array', 'np.array', (['[340.84028]'], {}), '([340.84028])\n', (813, 826), True, 'import numpy as np\n'), ((847, 868), 'numpy.array', 'np.array', (['[342.31329]'], {}), '([342.31329])\n', (855, 868), True, 'import numpy as np\n'), ((890, 911), 'numpy.array', 'np.array', (['[-7.502975]'], {}), '([-7.502975])\n', (898, 911), True, 'import numpy as np\n'), ((917, 984), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['gstr', 'expected_gstr'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(gstr, expected_gstr, rtol=0, atol=1e-06)\n', (943, 984), True, 'import numpy as np\n'), ((988, 1057), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['slong', 'expected_slong'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(slong, expected_slong, rtol=0, atol=1e-06)\n', (1014, 1057), True, 'import numpy as np\n'), ((1061, 1126), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sra', 'expected_sra'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(sra, expected_sra, rtol=0, atol=1e-06)\n', (1087, 1126), True, 'import numpy as np\n'), ((1130, 1197), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sdec', 'expected_sdec'], {'rtol': '(0)', 'atol': '(1e-06)'}), '(sdec, expected_sdec, rtol=0, atol=1e-06)\n', (1156, 1197), True, 'import numpy as np\n')] |
from src.params.ParamsPING import *
from src.izhikevich_simulation.IzhikevichNetworkOutcome import *
from src.params.ParamsFrequencies import *
import numpy as np
from math import floor, pi
from scipy import fft
import matplotlib.pyplot as plt
from collections import Counter
from tqdm import tqdm
import warnings
class SpikingFrequencyComputer:
"""
TODO:: docs
"""
def compute_for_all_pings(
self, simulation_outcome: IzhikevichNetworkOutcome, params_freqs: ParamsFrequencies
) -> list[int]:
frequencies = []
for ping_network in (pbar := tqdm(simulation_outcome.grid_geometry.ping_networks)):
pbar.set_description("Frequency distribution per PING")
# select ex neurons for a single ping network from spikes
spikes_in_ping_mask = np.isin(
np.array(simulation_outcome.spikes).T[1], ping_network.ids[NeuronTypes.EX]
)
# times when excitatory neurons fired
spikes_times_in_ping = np.array(simulation_outcome.spikes)[spikes_in_ping_mask].T[0]
spikes_ex_per_times = [
np.count_nonzero(spikes_times_in_ping == t) for t in range(simulation_outcome.simulation_time)
]
signal = np.array(spikes_ex_per_times[299:])
frequency = self.tfr_single_ping(
signal=signal,
simulation_time=simulation_outcome.simulation_time,
params_freqs=params_freqs
)
frequencies.append(frequency)
return frequencies
def plot_ping_frequencies(self, frequencies):
# TODO:: make pretty
print("Plotting current-frequency.....", end="")
path = "../plots/test-freq-in-pings.png"
fig, ax = plt.subplots(figsize=(30, 30))
ax.tick_params(axis='both', which='major', labelsize=50)
plt.hist(frequencies, color="#ACDDE7", rwidth=0.7)
fig.savefig(path, bbox_inches='tight')
print(end="\r", flush=True)
print(f"Plotting ended, result: {path[3:]}")
def fft_single_ping(
self, signal: np.ndarray[int, int], params_freqs: ParamsFrequencies
) -> int:
"""
TODO
:param signal:
:param simulation_time:
:param params_freqs:
:return:
"""
fft_data = fft.fft(signal)
freqs = fft.fftfreq(len(signal), d=1 / 1000)
gamma_indices = np.argwhere(
(freqs >= params_freqs.frequencies[0]) &
(freqs <= params_freqs.frequencies[-1])
).flatten()
max_i = np.argmax(np.abs(fft_data[gamma_indices]))
freq_max = freqs[gamma_indices][max_i]
freq_max_abs = np.abs(freq_max)
return np.abs(freq_max_abs)
def tfr_single_ping(
self, signal: np.ndarray[int, int], simulation_time: int, params_freqs: ParamsFrequencies
) -> int:
"""
TODO:: Determines most prominent frequency??
:param simulation_time: number of epochs to run the simulation.
:type simulation_time: int
:param signal: number of excitatory neurons fired at relevant epochs of the simulation.
:type signal: list[int]
:return: TODO:: most prominent frequency?
:rtype: int
"""
t = [i / 0.001 for i in range(1, simulation_time+1)]
t = t[298:]
# the size of the data + zero padding
nr_points = len(params_freqs.wt) + len(signal) - 1
fft_data = fft.fft(signal, nr_points)
tfr = np.zeros((len(params_freqs.frequencies), len(t)), dtype="complex_") * np.nan
for fi in range(len(params_freqs.frequencies)):
fft_wavelet = fft.fft(params_freqs.complex_wavelets[fi], nr_points)
fft_wavelet = fft_wavelet / max(fft_wavelet)
tmp = fft.ifft(fft_wavelet * fft_data, nr_points)
# trim the edges, these are the bits we included by zero padding
tfr[
np.argwhere(np.array(params_freqs.frequencies) == params_freqs.frequencies[fi]).flatten(), :
] = tmp[params_freqs.half_wave_size: -params_freqs.half_wave_size + 1]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
mx_i = int(np.argmax(np.nanmean(np.abs(tfr), 1)))
return params_freqs.frequencies[mx_i]
| [
"numpy.abs",
"scipy.fft.ifft",
"matplotlib.pyplot.hist",
"tqdm.tqdm",
"warnings.catch_warnings",
"numpy.count_nonzero",
"numpy.array",
"numpy.argwhere",
"warnings.simplefilter",
"scipy.fft.fft",
"matplotlib.pyplot.subplots"
] | [((1780, 1810), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(30, 30)'}), '(figsize=(30, 30))\n', (1792, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1935), 'matplotlib.pyplot.hist', 'plt.hist', (['frequencies'], {'color': '"""#ACDDE7"""', 'rwidth': '(0.7)'}), "(frequencies, color='#ACDDE7', rwidth=0.7)\n", (1893, 1935), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2367), 'scipy.fft.fft', 'fft.fft', (['signal'], {}), '(signal)\n', (2359, 2367), False, 'from scipy import fft\n'), ((2713, 2729), 'numpy.abs', 'np.abs', (['freq_max'], {}), '(freq_max)\n', (2719, 2729), True, 'import numpy as np\n'), ((2746, 2766), 'numpy.abs', 'np.abs', (['freq_max_abs'], {}), '(freq_max_abs)\n', (2752, 2766), True, 'import numpy as np\n'), ((3501, 3527), 'scipy.fft.fft', 'fft.fft', (['signal', 'nr_points'], {}), '(signal, nr_points)\n', (3508, 3527), False, 'from scipy import fft\n'), ((594, 646), 'tqdm.tqdm', 'tqdm', (['simulation_outcome.grid_geometry.ping_networks'], {}), '(simulation_outcome.grid_geometry.ping_networks)\n', (598, 646), False, 'from tqdm import tqdm\n'), ((1266, 1301), 'numpy.array', 'np.array', (['spikes_ex_per_times[299:]'], {}), '(spikes_ex_per_times[299:])\n', (1274, 1301), True, 'import numpy as np\n'), ((2610, 2641), 'numpy.abs', 'np.abs', (['fft_data[gamma_indices]'], {}), '(fft_data[gamma_indices])\n', (2616, 2641), True, 'import numpy as np\n'), ((3704, 3757), 'scipy.fft.fft', 'fft.fft', (['params_freqs.complex_wavelets[fi]', 'nr_points'], {}), '(params_freqs.complex_wavelets[fi], nr_points)\n', (3711, 3757), False, 'from scipy import fft\n'), ((3834, 3877), 'scipy.fft.ifft', 'fft.ifft', (['(fft_wavelet * fft_data)', 'nr_points'], {}), '(fft_wavelet * fft_data, nr_points)\n', (3842, 3877), False, 'from scipy import fft\n'), ((4174, 4199), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4197, 4199), False, 'import warnings\n'), ((4213, 4269), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (4234, 4269), False, 'import warnings\n'), ((1136, 1179), 'numpy.count_nonzero', 'np.count_nonzero', (['(spikes_times_in_ping == t)'], {}), '(spikes_times_in_ping == t)\n', (1152, 1179), True, 'import numpy as np\n'), ((2446, 2544), 'numpy.argwhere', 'np.argwhere', (['((freqs >= params_freqs.frequencies[0]) & (freqs <= params_freqs.\n frequencies[-1]))'], {}), '((freqs >= params_freqs.frequencies[0]) & (freqs <= params_freqs\n .frequencies[-1]))\n', (2457, 2544), True, 'import numpy as np\n'), ((847, 882), 'numpy.array', 'np.array', (['simulation_outcome.spikes'], {}), '(simulation_outcome.spikes)\n', (855, 882), True, 'import numpy as np\n'), ((1022, 1057), 'numpy.array', 'np.array', (['simulation_outcome.spikes'], {}), '(simulation_outcome.spikes)\n', (1030, 1057), True, 'import numpy as np\n'), ((4314, 4325), 'numpy.abs', 'np.abs', (['tfr'], {}), '(tfr)\n', (4320, 4325), True, 'import numpy as np\n'), ((3996, 4030), 'numpy.array', 'np.array', (['params_freqs.frequencies'], {}), '(params_freqs.frequencies)\n', (4004, 4030), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def rolling_window_sequences(X, window_size, target_size, value_column, time_column):
"""
Function that takes in a pandas.DataFrame and a window_size then creates
output arrays that correspond to a timeseries sequence with window_size overlap.
The output arrays can be fed into a timeseries forecasting model.
Assumes the input is timeseries sorted.
Args:
X (pandas.DataFrame): a pandas dataframe which has 'timestamp'
and 'value' columns, and is sorted based on timestamp.
The timestamp column is in UNIX format (in seconds).
window_size (int): number of values that overlap to create the sequence.
value_column (string): name of column that has the value field.
time_column (string): name of column that has the time field.
Returns:
(numpy.ndarray): contains the time series sequenced data with each
entry having window_size rows.
(numpy.ndarray): acts as the label for the forecasting problem with
each entry having window_size rows.
(numpy.ndarray): the corresponding timestamps series.
"""
output_X = []
y = []
time = []
for start in range(len(X) - window_size - target_size):
end = start + window_size
output_X.append(X.iloc[start:end][value_column].values.reshape([-1, 1]))
y.append(X.iloc[end:end + target_size][value_column].values)
time.append(X.iloc[end + 1][time_column])
return np.asarray(output_X), np.asarray(y), np.asarray(time)
def time_segments_average(X, interval, value_column, time_column):
"""
function that aggregates data in a pandas dataframe by averaging over a given interval.
it starts averaging from the smallest timestamp in the dataframe and ends at the
largest timestamp. assumes the input is timeseries sorted.
args:
X (pandas.dataframe): a pandas dataframe which has 'timestamp'
and 'value' columns, and is sorted based on timestamp. the timestamp
column is in unix format (in seconds).
interval (int): an integer denoting the number of seconds
in the desired interval.
value_column (string): name of column that has the value field.
time_column (string): name of column that has the time field.
returns:
pandas.dataframe: a pandas dataframe with two colums
('timestamp' and 'value'), where each `timestamp` is the starting time of
an interval and the `value` is the result of aggregation.
"""
start_ts = X[time_column].iloc[0] # min value
end_time = X[time_column].iloc[-1] # max value in dataframe
accepted_points = []
while start_ts < end_time:
# average the values between start_ts, [start_ts + timedelta (e.g. 6hrs)]
upper_ts = start_ts + interval
mask = (X[time_column] > start_ts) & (X[time_column] <= upper_ts)
average_value = X.loc[mask][value_column].mean(skipna=True)
accepted_points.append([start_ts, average_value])
start_ts = upper_ts # update the timestamp
return pd.DataFrame(accepted_points, columns=[time_column, value_column])
| [
"pandas.DataFrame",
"numpy.asarray"
] | [((3295, 3361), 'pandas.DataFrame', 'pd.DataFrame', (['accepted_points'], {'columns': '[time_column, value_column]'}), '(accepted_points, columns=[time_column, value_column])\n', (3307, 3361), True, 'import pandas as pd\n'), ((1601, 1621), 'numpy.asarray', 'np.asarray', (['output_X'], {}), '(output_X)\n', (1611, 1621), True, 'import numpy as np\n'), ((1623, 1636), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1633, 1636), True, 'import numpy as np\n'), ((1638, 1654), 'numpy.asarray', 'np.asarray', (['time'], {}), '(time)\n', (1648, 1654), True, 'import numpy as np\n')] |
# @Date : 2019-10-22
# @Author : <NAME>
import os
import numpy as np
import math
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from imageio import imsave
from tqdm import tqdm
from copy import deepcopy
import logging
from utils.inception_score import get_inception_score
from utils.fid_score import calculate_fid_given_paths
from utils.genotype import alpha2genotype, beta2genotype, draw_graph_D, draw_graph_G
logger = logging.getLogger(__name__)
def train(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader,
epoch,
writer_dict, lr_schedulers, architect_gen=None, architect_dis=None):
writer = writer_dict['writer']
gen_step = 0
# train mode
gen_net = gen_net.train()
dis_net = dis_net.train()
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
real_imgs = imgs.type(torch.cuda.FloatTensor)
real_imgs_w = real_imgs[:imgs.shape[0] // 2]
real_imgs_arch = real_imgs[imgs.shape[0] // 2:]
# sample noise
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0] // 2, args.latent_dim)))
# search arch of D
if architect_dis:
# sample noise
search_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0] // 2, args.latent_dim)))
if args.amending_coefficient:
architect_dis.step(dis_net, real_imgs_arch, gen_net, search_z, real_imgs_train=real_imgs_w, train_z=z,
eta=args.amending_coefficient)
else:
architect_dis.step(dis_net, real_imgs_arch, gen_net, search_z)
# train weights of D
dis_optimizer.zero_grad()
real_validity = dis_net(real_imgs_w)
fake_imgs = gen_net(z).detach()
assert fake_imgs.size() == real_imgs_w.size()
fake_validity = dis_net(fake_imgs)
# Hinge loss
d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - real_validity)) + \
torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))
d_loss.backward()
dis_optimizer.step()
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# sample noise
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_bs, args.latent_dim)))
# search arch of G
if architect_gen:
if global_steps % args.n_critic == 0:
# sample noise
search_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_bs, args.latent_dim)))
if args.amending_coefficient:
architect_gen.step(search_z, gen_net, dis_net, train_z=gen_z, eta=args.amending_coefficient)
else:
architect_gen.step(search_z, gen_net, dis_net)
# train weights of G
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_imgs = gen_net(gen_z)
fake_validity = dis_net(gen_imgs)
# Hinge loss
g_loss = -torch.mean(fake_validity)
g_loss.backward()
gen_optimizer.step()
# learning rate
if lr_schedulers:
gen_scheduler, dis_scheduler = lr_schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
'[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]' %
(
epoch, args.max_epoch_D, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
writer_dict['train_global_steps'] = global_steps + 1
if architect_gen:
# deriving arch of G/D during searching
derive_freq_iter = math.floor((args.max_iter_D / args.max_epoch_D) / args.derive_per_epoch)
if (args.derive_per_epoch > 0) and (iter_idx % derive_freq_iter == 0):
genotype_G = alpha2genotype(gen_net.module.alphas_normal, gen_net.module.alphas_up, save=True,
file_path=os.path.join(args.path_helper['genotypes_path'],
str(epoch) + '_' + str(iter_idx) + '_G.npy'))
genotype_D = beta2genotype(dis_net.module.alphas_normal, dis_net.module.alphas_down, save=True,
file_path=os.path.join(args.path_helper['genotypes_path'],
str(epoch) + '_' + str(iter_idx) + '_D.npy'))
if args.draw_arch:
draw_graph_G(genotype_G, save=True,
file_path=os.path.join(args.path_helper['graph_vis_path'],
str(epoch) + '_' + str(iter_idx) + '_G'))
draw_graph_D(genotype_D, save=True,
file_path=os.path.join(args.path_helper['graph_vis_path'],
str(epoch) + '_' + str(iter_idx) + '_D'))
def validate(args, fixed_z, fid_stat, gen_net: nn.Module, writer_dict):
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
# eval mode
gen_net = gen_net.eval()
# generate images
sample_imgs = gen_net(fixed_z)
img_grid = make_grid(sample_imgs, nrow=10, normalize=True, scale_each=True)
# get fid and inception score
fid_buffer_dir = os.path.join(args.path_helper['sample_path'], 'fid_buffer')
os.makedirs(fid_buffer_dir, exist_ok=True)
eval_iter = args.num_eval_imgs // args.eval_batch_size
img_list = list()
for iter_idx in tqdm(range(eval_iter), desc='sample images'):
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))
# generate a batch of images
gen_imgs = gen_net(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu',
torch.uint8).numpy()
for img_idx, img in enumerate(gen_imgs):
file_name = os.path.join(fid_buffer_dir, f'iter{iter_idx}_b{img_idx}.png')
imsave(file_name, img)
img_list.extend(list(gen_imgs))
# get inception score
logger.info('=> calculate inception score')
mean, std = get_inception_score(img_list)
# get fid score
logger.info('=> calculate fid score')
fid_score = calculate_fid_given_paths([fid_buffer_dir, fid_stat], inception_path=None)
# del buffer
os.system('rm -r {}'.format(fid_buffer_dir))
writer.add_image('sampled_images', img_grid, global_steps)
writer.add_scalar('Inception_score/mean', mean, global_steps)
writer.add_scalar('Inception_score/std', std, global_steps)
writer.add_scalar('FID_score', fid_score, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return mean, std, fid_score
class LinearLrDecay(object):
def __init__(self, optimizer, start_lr, end_lr, decay_start_step, decay_end_step):
assert start_lr > end_lr
self.optimizer = optimizer
self.delta = (start_lr - end_lr) / (decay_end_step - decay_start_step)
self.decay_start_step = decay_start_step
self.decay_end_step = decay_end_step
self.start_lr = start_lr
self.end_lr = end_lr
def step(self, current_step):
if current_step <= self.decay_start_step:
lr = self.start_lr
elif current_step >= self.decay_end_step:
lr = self.end_lr
else:
lr = self.start_lr - self.delta * (current_step - self.decay_start_step)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
| [
"logging.getLogger",
"numpy.random.normal",
"torch.nn.ReLU",
"os.makedirs",
"math.floor",
"imageio.imsave",
"torch.mean",
"tqdm.tqdm",
"os.path.join",
"utils.inception_score.get_inception_score",
"torchvision.utils.make_grid",
"utils.fid_score.calculate_fid_given_paths"
] | [((455, 482), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (472, 482), False, 'import logging\n'), ((5942, 6006), 'torchvision.utils.make_grid', 'make_grid', (['sample_imgs'], {'nrow': '(10)', 'normalize': '(True)', 'scale_each': '(True)'}), '(sample_imgs, nrow=10, normalize=True, scale_each=True)\n', (5951, 6006), False, 'from torchvision.utils import make_grid\n'), ((6063, 6122), 'os.path.join', 'os.path.join', (["args.path_helper['sample_path']", '"""fid_buffer"""'], {}), "(args.path_helper['sample_path'], 'fid_buffer')\n", (6075, 6122), False, 'import os\n'), ((6127, 6169), 'os.makedirs', 'os.makedirs', (['fid_buffer_dir'], {'exist_ok': '(True)'}), '(fid_buffer_dir, exist_ok=True)\n', (6138, 6169), False, 'import os\n'), ((6978, 7007), 'utils.inception_score.get_inception_score', 'get_inception_score', (['img_list'], {}), '(img_list)\n', (6997, 7007), False, 'from utils.inception_score import get_inception_score\n'), ((7087, 7161), 'utils.fid_score.calculate_fid_given_paths', 'calculate_fid_given_paths', (['[fid_buffer_dir, fid_stat]'], {'inception_path': 'None'}), '([fid_buffer_dir, fid_stat], inception_path=None)\n', (7112, 7161), False, 'from utils.fid_score import calculate_fid_given_paths\n'), ((868, 886), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (872, 886), False, 'from tqdm import tqdm\n'), ((1169, 1230), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(imgs.shape[0] // 2, args.latent_dim)'], {}), '(0, 1, (imgs.shape[0] // 2, args.latent_dim))\n', (1185, 1230), True, 'import numpy as np\n'), ((2343, 2397), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(args.gen_bs, args.latent_dim)'], {}), '(0, 1, (args.gen_bs, args.latent_dim))\n', (2359, 2397), True, 'import numpy as np\n'), ((4325, 4395), 'math.floor', 'math.floor', (['(args.max_iter_D / args.max_epoch_D / args.derive_per_epoch)'], {}), '(args.max_iter_D / args.max_epoch_D / args.derive_per_epoch)\n', (4335, 4395), False, 'import math\n'), ((6353, 6416), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(args.eval_batch_size, args.latent_dim)'], {}), '(0, 1, (args.eval_batch_size, args.latent_dim))\n', (6369, 6416), True, 'import numpy as np\n'), ((6749, 6811), 'os.path.join', 'os.path.join', (['fid_buffer_dir', 'f"""iter{iter_idx}_b{img_idx}.png"""'], {}), "(fid_buffer_dir, f'iter{iter_idx}_b{img_idx}.png')\n", (6761, 6811), False, 'import os\n'), ((6824, 6846), 'imageio.imsave', 'imsave', (['file_name', 'img'], {}), '(file_name, img)\n', (6830, 6846), False, 'from imageio import imsave\n'), ((1359, 1420), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(imgs.shape[0] // 2, args.latent_dim)'], {}), '(0, 1, (imgs.shape[0] // 2, args.latent_dim))\n', (1375, 1420), True, 'import numpy as np\n'), ((3132, 3157), 'torch.mean', 'torch.mean', (['fake_validity'], {}), '(fake_validity)\n', (3142, 3157), False, 'import torch\n'), ((2041, 2062), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2048, 2062), True, 'import torch.nn as nn\n'), ((2117, 2138), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2124, 2138), True, 'import torch.nn as nn\n'), ((2583, 2637), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(args.gen_bs, args.latent_dim)'], {}), '(0, 1, (args.gen_bs, args.latent_dim))\n', (2599, 2637), True, 'import numpy as np\n')] |
import os
from copy import deepcopy
import numpy as np
import torch
import goalrepresent as gr
from goalrepresent.helper.randomhelper import set_seed
from goalrepresent.models import PCAModel
class BCSpectrumFourierModel():
@staticmethod
def default_config():
default_config = gr.Config()
default_config.set_BC_range = True
return default_config
def __init__(self, config=None, **kwargs):
set_seed(0)
self.config = gr.config.update_config(kwargs, config, self.__class__.default_config())
self.threshold = 50
self.n_sections = 20
self.n_orientations = 1
self.n_latents = 8
self.img_size = (256, 256)
self.regions_masks = self.get_regions_masks()
checkpoint_filepath = os.path.join(os.path.dirname(__file__), 'reference_dataset_pca_fourier_spectrum_descriptors_model.pickle')
self.pca_model = PCAModel.load_model(checkpoint_filepath)
if self.config.set_BC_range:
# computed on external reference dataset of 20 000 images -> np.percentile(0.01 - 0.99)
range = np.load(os.path.join(os.path.dirname(__file__), 'reference_dataset_pca_fourier_spectrum_descriptors_range.npz'))
self.BC_range = [range['low'], range['high']]
else:
self.BC_range = [np.zeros(self.n_latents), np.ones(self.n_latents)]
return
def get_regions_masks(self):
regions_masks = []
# create sectors
R = self.img_size[0] // 2
section_regions = [(ring_idx / self.n_sections * R, (ring_idx + 1) / self.n_sections * R) for ring_idx in
range(self.n_sections)]
# concatenate first and last regions
orientation_regions = [(-np.pi / 2, -np.pi / 2 + 1 / self.n_orientations * np.pi / 2)]
offset = -np.pi / 2 + 1 / self.n_orientations * np.pi / 2
orientation_regions += [
(offset + wedge_idx / self.n_orientations * np.pi, offset + (wedge_idx + 1) / self.n_orientations * np.pi)
for
wedge_idx in range(self.n_orientations - 1)]
grid_x, grid_y = torch.meshgrid(torch.range(0, R - 1, 1), torch.range(-R, R - 1, 1))
grid_r = (grid_x ** 2 + grid_y ** 2).sqrt()
grid_theta = torch.atan(grid_y / grid_x)
# fill feature vector
for section_region in section_regions:
r1 = section_region[0]
r2 = section_region[1]
# edge region
theta1 = orientation_regions[0][0]
theta2 = orientation_regions[0][1]
region_mask = (grid_r >= r1) & (grid_r < r2) & (((grid_theta >= theta1) & (grid_theta < theta2)) | (
(grid_theta >= -offset) & (grid_theta <= np.pi / 2)))
regions_masks.append(region_mask)
# inner region
for orientation_region in orientation_regions[1:]:
theta1 = orientation_region[0]
theta2 = orientation_region[1]
region_mask = (grid_r >= r1) & (grid_r < r2) & (grid_theta >= theta1) & (grid_theta < theta2)
regions_masks.append(region_mask)
return regions_masks
def roll_n(self, X, axis, n):
f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None)
for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None)
for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def spectrum_fourier_descriptors(self, image):
image = torch.from_numpy(image)
if torch.cuda.is_available():
image = image.cuda()
# power spectrum
spectrum = torch.rfft(image, signal_ndim=2, onesided=False, normalized=True)
power_spectrum = (spectrum[:, :, 0] ** 2 + spectrum[:, :, 1] ** 2)
# shift to be centered
power_spectrum = self.roll_n(power_spectrum, 1, power_spectrum.size(1) // 2)
power_spectrum = self.roll_n(power_spectrum, 0, power_spectrum.size(0) // 2)
# remove unrelevant frequencies
power_spectrum[power_spectrum < power_spectrum.mean()] = 0
half_power_spectrum = power_spectrum[power_spectrum.size(0) // 2:, :]
# feature vector
n_regions = self.n_sections * self.n_orientations
feature_vector = torch.zeros(2 * n_regions)
cur_region_idx = 0
for region_mask in self.regions_masks:
region_power_spectrum = deepcopy(half_power_spectrum)[region_mask]
feature_vector[2 * cur_region_idx] = region_power_spectrum.mean()
feature_vector[2 * cur_region_idx + 1] = region_power_spectrum.std()
cur_region_idx += 1
return feature_vector.cpu().numpy()
def calc_embedding(self, x, **kwargs):
# x: numpy H*W
coefficients = self.spectrum_fourier_descriptors(x.cpu().squeeze().numpy())
z = self.pca_model.calc_embedding(coefficients.reshape(1, -1)).squeeze()
normalized_z = (z - self.BC_range[0]) / (self.BC_range[1] - self.BC_range[0])
normalized_z = torch.from_numpy(normalized_z).unsqueeze(0)
return normalized_z | [
"numpy.ones",
"goalrepresent.helper.randomhelper.set_seed",
"goalrepresent.Config",
"torch.rfft",
"goalrepresent.models.PCAModel.load_model",
"torch.from_numpy",
"os.path.dirname",
"numpy.zeros",
"torch.cuda.is_available",
"torch.range",
"copy.deepcopy",
"torch.zeros",
"torch.atan",
"torch... | [((298, 309), 'goalrepresent.Config', 'gr.Config', ([], {}), '()\n', (307, 309), True, 'import goalrepresent as gr\n'), ((439, 450), 'goalrepresent.helper.randomhelper.set_seed', 'set_seed', (['(0)'], {}), '(0)\n', (447, 450), False, 'from goalrepresent.helper.randomhelper import set_seed\n'), ((915, 955), 'goalrepresent.models.PCAModel.load_model', 'PCAModel.load_model', (['checkpoint_filepath'], {}), '(checkpoint_filepath)\n', (934, 955), False, 'from goalrepresent.models import PCAModel\n'), ((2279, 2306), 'torch.atan', 'torch.atan', (['(grid_y / grid_x)'], {}), '(grid_y / grid_x)\n', (2289, 2306), False, 'import torch\n'), ((3551, 3581), 'torch.cat', 'torch.cat', (['[back, front]', 'axis'], {}), '([back, front], axis)\n', (3560, 3581), False, 'import torch\n'), ((3651, 3674), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (3667, 3674), False, 'import torch\n'), ((3686, 3711), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3709, 3711), False, 'import torch\n'), ((3791, 3856), 'torch.rfft', 'torch.rfft', (['image'], {'signal_ndim': '(2)', 'onesided': '(False)', 'normalized': '(True)'}), '(image, signal_ndim=2, onesided=False, normalized=True)\n', (3801, 3856), False, 'import torch\n'), ((4429, 4455), 'torch.zeros', 'torch.zeros', (['(2 * n_regions)'], {}), '(2 * n_regions)\n', (4440, 4455), False, 'import torch\n'), ((796, 821), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (811, 821), False, 'import os\n'), ((2153, 2177), 'torch.range', 'torch.range', (['(0)', '(R - 1)', '(1)'], {}), '(0, R - 1, 1)\n', (2164, 2177), False, 'import torch\n'), ((2179, 2204), 'torch.range', 'torch.range', (['(-R)', '(R - 1)', '(1)'], {}), '(-R, R - 1, 1)\n', (2190, 2204), False, 'import torch\n'), ((1328, 1352), 'numpy.zeros', 'np.zeros', (['self.n_latents'], {}), '(self.n_latents)\n', (1336, 1352), True, 'import numpy as np\n'), ((1354, 1377), 'numpy.ones', 'np.ones', (['self.n_latents'], {}), '(self.n_latents)\n', (1361, 1377), True, 'import numpy as np\n'), ((4567, 4596), 'copy.deepcopy', 'deepcopy', (['half_power_spectrum'], {}), '(half_power_spectrum)\n', (4575, 4596), False, 'from copy import deepcopy\n'), ((5188, 5218), 'torch.from_numpy', 'torch.from_numpy', (['normalized_z'], {}), '(normalized_z)\n', (5204, 5218), False, 'import torch\n'), ((1135, 1160), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1150, 1160), False, 'import os\n')] |
#!/usr/bin/env python
import numpy as np
import pytest
import raytracing as rt
def test_ray_penetrates_grid_1d_pos_x():
start = np.array([[-1.23]])
end = np.array([[6.78]])
shape = np.array([5])
size = np.array([0.45])
map_gt = rt.gridmap(shape, size)
map_gt.misses[:5] = 1
map = rt.gridmap(shape, size)
rt.trace1d(start, end, map)
assert(map_gt == map)
def test_ray_penetrates_grid_1d_neg_x():
start = np.array([[1.98]])
end = np.array([[-6.71]])
shape = np.array([100])
size = np.array([0.01])
map_gt = rt.gridmap(shape, size)
map_gt.misses = np.ones(100)
map = rt.gridmap(shape, size)
rt.trace1d(start, end, map)
assert(map_gt == map)
def test_ray_starts_and_ends_in_grid_1d_pos_x():
start = np.array([[0.671]])
end = np.array([[0.985]])
shape = np.array([100])
size = np.array([0.01])
map_gt = rt.gridmap(shape, size)
map_gt.misses[67:98] = 1
map_gt.hits[98] = 1
map = rt.gridmap(shape, size)
rt.trace1d(start, end, map)
assert(map_gt == map)
def test_ray_starts_and_ends_in_grid_1d_neg_x():
start = np.array([[0.985]])
end = np.array([[0.671]])
shape = np.array([100])
size = np.array([0.01])
map_gt = rt.gridmap(shape, size)
map_gt.misses[98:67:-1] = 1
map_gt.hits[67] = 1
map = rt.gridmap(shape, size)
rt.trace1d(start, end, map)
assert(map_gt == map)
def test_ray_starts_in_grid_and_ends_outside_1d_pos_x():
start = np.array([[12.1]])
end = np.array([[1009.7]])
shape = np.array([51])
size = np.array([3.0])
map_gt = rt.gridmap(shape, size)
map_gt.misses[4:51] = 1
map = rt.gridmap(shape, size)
rt.trace1d(start, end, map)
assert(map_gt == map)
def test_ray_starts_in_grid_and_ends_outside_1d_neg_x():
start = np.array([[109.7]])
end = np.array([[-12.1]])
shape = np.array([51])
size = np.array([3.0])
map_gt = rt.gridmap(shape, size)
map_gt.misses[:37] = 1
map = rt.gridmap(shape, size)
rt.trace1d(start, end, map)
assert(map_gt == map)
def test_identical_start_and_end_2d_in_cell():
point = np.array([[0.23, 0.25]])
shape = np.array([50, 50])
size = np.array([1.0, 1.0])
map_gt = rt.gridmap(shape, size)
map_gt.hits[0,0] = 1
map = rt.gridmap(shape, size)
rt.trace2d(point, point, map)
assert(map_gt == map)
def test_identical_start_and_end_2d_on_grid_line():
point = np.array([[0.0, 0.0]])
shape = np.array([50, 50])
size = np.array([1.0, 1.0])
map_gt = rt.gridmap(shape, size)
map_gt.hits[0,0] = 1
map = rt.gridmap(shape, size)
rt.trace2d(point, point, map)
assert(map_gt == map)
def test_identical_start_and_end_2d_outside_grid():
point = np.array([[100.0, 100.0]])
shape = np.array([50, 50])
size = np.array([1.0, 1.0])
map_gt = rt.gridmap(shape, size)
map = rt.gridmap(shape, size)
rt.trace2d(point, point, map)
assert(map_gt == map)
def test_parallel_ray_tracing_2d():
start = np.array(
[[-1.5, +1.5],
[+1.0, -2.0],
[+3.5, -1.0],
[+7.5, +1.0],
[+5.5, +4.5],
[-0.5, +2.0]])
end = np.array(
[[+2.5, +1.5],
[+1.0, -0.5],
[+3.5, +1.5],
[+4.5, +0.5],
[+5.5, +2.5],
[+1.0, +3.5]])
shape = np.array([6, 3])
size = np.array([1, 1])
map_gt = rt.gridmap(shape, size)
map_gt.misses[:2,1] += 1
map_gt.hits[2,1] += 1
map_gt.misses[3,0] += 1
map_gt.hits[3,1] += 1
map_gt.misses[5,0] += 1
map_gt.hits[4,0] += 1
map_gt.hits[5,2] += 1
map_gt.misses[0,2] += 1
map = rt.gridmap(shape, size)
rt.trace2d(start, end, map)
assert(map_gt == map)
if __name__ == '__main__':
pytest.main()
| [
"numpy.ones",
"pytest.main",
"numpy.array",
"raytracing.gridmap",
"raytracing.trace2d",
"raytracing.trace1d"
] | [((135, 154), 'numpy.array', 'np.array', (['[[-1.23]]'], {}), '([[-1.23]])\n', (143, 154), True, 'import numpy as np\n'), ((165, 183), 'numpy.array', 'np.array', (['[[6.78]]'], {}), '([[6.78]])\n', (173, 183), True, 'import numpy as np\n'), ((196, 209), 'numpy.array', 'np.array', (['[5]'], {}), '([5])\n', (204, 209), True, 'import numpy as np\n'), ((221, 237), 'numpy.array', 'np.array', (['[0.45]'], {}), '([0.45])\n', (229, 237), True, 'import numpy as np\n'), ((252, 275), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (262, 275), True, 'import raytracing as rt\n'), ((317, 340), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (327, 340), True, 'import raytracing as rt\n'), ((345, 372), 'raytracing.trace1d', 'rt.trace1d', (['start', 'end', 'map'], {}), '(start, end, map)\n', (355, 372), True, 'import raytracing as rt\n'), ((455, 473), 'numpy.array', 'np.array', (['[[1.98]]'], {}), '([[1.98]])\n', (463, 473), True, 'import numpy as np\n'), ((484, 503), 'numpy.array', 'np.array', (['[[-6.71]]'], {}), '([[-6.71]])\n', (492, 503), True, 'import numpy as np\n'), ((516, 531), 'numpy.array', 'np.array', (['[100]'], {}), '([100])\n', (524, 531), True, 'import numpy as np\n'), ((543, 559), 'numpy.array', 'np.array', (['[0.01]'], {}), '([0.01])\n', (551, 559), True, 'import numpy as np\n'), ((574, 597), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (584, 597), True, 'import raytracing as rt\n'), ((618, 630), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (625, 630), True, 'import numpy as np\n'), ((642, 665), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (652, 665), True, 'import raytracing as rt\n'), ((670, 697), 'raytracing.trace1d', 'rt.trace1d', (['start', 'end', 'map'], {}), '(start, end, map)\n', (680, 697), True, 'import raytracing as rt\n'), ((788, 807), 'numpy.array', 'np.array', (['[[0.671]]'], {}), '([[0.671]])\n', (796, 807), True, 'import numpy as np\n'), ((818, 837), 'numpy.array', 'np.array', (['[[0.985]]'], {}), '([[0.985]])\n', (826, 837), True, 'import numpy as np\n'), ((850, 865), 'numpy.array', 'np.array', (['[100]'], {}), '([100])\n', (858, 865), True, 'import numpy as np\n'), ((877, 893), 'numpy.array', 'np.array', (['[0.01]'], {}), '([0.01])\n', (885, 893), True, 'import numpy as np\n'), ((908, 931), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (918, 931), True, 'import raytracing as rt\n'), ((996, 1019), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (1006, 1019), True, 'import raytracing as rt\n'), ((1024, 1051), 'raytracing.trace1d', 'rt.trace1d', (['start', 'end', 'map'], {}), '(start, end, map)\n', (1034, 1051), True, 'import raytracing as rt\n'), ((1142, 1161), 'numpy.array', 'np.array', (['[[0.985]]'], {}), '([[0.985]])\n', (1150, 1161), True, 'import numpy as np\n'), ((1172, 1191), 'numpy.array', 'np.array', (['[[0.671]]'], {}), '([[0.671]])\n', (1180, 1191), True, 'import numpy as np\n'), ((1204, 1219), 'numpy.array', 'np.array', (['[100]'], {}), '([100])\n', (1212, 1219), True, 'import numpy as np\n'), ((1231, 1247), 'numpy.array', 'np.array', (['[0.01]'], {}), '([0.01])\n', (1239, 1247), True, 'import numpy as np\n'), ((1262, 1285), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (1272, 1285), True, 'import raytracing as rt\n'), ((1353, 1376), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (1363, 1376), True, 'import raytracing as rt\n'), ((1381, 1408), 'raytracing.trace1d', 'rt.trace1d', (['start', 'end', 'map'], {}), '(start, end, map)\n', (1391, 1408), True, 'import raytracing as rt\n'), ((1507, 1525), 'numpy.array', 'np.array', (['[[12.1]]'], {}), '([[12.1]])\n', (1515, 1525), True, 'import numpy as np\n'), ((1536, 1556), 'numpy.array', 'np.array', (['[[1009.7]]'], {}), '([[1009.7]])\n', (1544, 1556), True, 'import numpy as np\n'), ((1569, 1583), 'numpy.array', 'np.array', (['[51]'], {}), '([51])\n', (1577, 1583), True, 'import numpy as np\n'), ((1595, 1610), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (1603, 1610), True, 'import numpy as np\n'), ((1625, 1648), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (1635, 1648), True, 'import raytracing as rt\n'), ((1688, 1711), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (1698, 1711), True, 'import raytracing as rt\n'), ((1716, 1743), 'raytracing.trace1d', 'rt.trace1d', (['start', 'end', 'map'], {}), '(start, end, map)\n', (1726, 1743), True, 'import raytracing as rt\n'), ((1842, 1861), 'numpy.array', 'np.array', (['[[109.7]]'], {}), '([[109.7]])\n', (1850, 1861), True, 'import numpy as np\n'), ((1872, 1891), 'numpy.array', 'np.array', (['[[-12.1]]'], {}), '([[-12.1]])\n', (1880, 1891), True, 'import numpy as np\n'), ((1904, 1918), 'numpy.array', 'np.array', (['[51]'], {}), '([51])\n', (1912, 1918), True, 'import numpy as np\n'), ((1930, 1945), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (1938, 1945), True, 'import numpy as np\n'), ((1960, 1983), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (1970, 1983), True, 'import raytracing as rt\n'), ((2022, 2045), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (2032, 2045), True, 'import raytracing as rt\n'), ((2050, 2077), 'raytracing.trace1d', 'rt.trace1d', (['start', 'end', 'map'], {}), '(start, end, map)\n', (2060, 2077), True, 'import raytracing as rt\n'), ((2166, 2190), 'numpy.array', 'np.array', (['[[0.23, 0.25]]'], {}), '([[0.23, 0.25]])\n', (2174, 2190), True, 'import numpy as np\n'), ((2203, 2221), 'numpy.array', 'np.array', (['[50, 50]'], {}), '([50, 50])\n', (2211, 2221), True, 'import numpy as np\n'), ((2233, 2253), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2241, 2253), True, 'import numpy as np\n'), ((2268, 2291), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (2278, 2291), True, 'import raytracing as rt\n'), ((2328, 2351), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (2338, 2351), True, 'import raytracing as rt\n'), ((2356, 2385), 'raytracing.trace2d', 'rt.trace2d', (['point', 'point', 'map'], {}), '(point, point, map)\n', (2366, 2385), True, 'import raytracing as rt\n'), ((2479, 2501), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (2487, 2501), True, 'import numpy as np\n'), ((2514, 2532), 'numpy.array', 'np.array', (['[50, 50]'], {}), '([50, 50])\n', (2522, 2532), True, 'import numpy as np\n'), ((2544, 2564), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2552, 2564), True, 'import numpy as np\n'), ((2579, 2602), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (2589, 2602), True, 'import raytracing as rt\n'), ((2639, 2662), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (2649, 2662), True, 'import raytracing as rt\n'), ((2667, 2696), 'raytracing.trace2d', 'rt.trace2d', (['point', 'point', 'map'], {}), '(point, point, map)\n', (2677, 2696), True, 'import raytracing as rt\n'), ((2790, 2816), 'numpy.array', 'np.array', (['[[100.0, 100.0]]'], {}), '([[100.0, 100.0]])\n', (2798, 2816), True, 'import numpy as np\n'), ((2829, 2847), 'numpy.array', 'np.array', (['[50, 50]'], {}), '([50, 50])\n', (2837, 2847), True, 'import numpy as np\n'), ((2859, 2879), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2867, 2879), True, 'import numpy as np\n'), ((2894, 2917), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (2904, 2917), True, 'import raytracing as rt\n'), ((2929, 2952), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (2939, 2952), True, 'import raytracing as rt\n'), ((2957, 2986), 'raytracing.trace2d', 'rt.trace2d', (['point', 'point', 'map'], {}), '(point, point, map)\n', (2967, 2986), True, 'import raytracing as rt\n'), ((3068, 3167), 'numpy.array', 'np.array', (['[[-1.5, +1.5], [+1.0, -2.0], [+3.5, -1.0], [+7.5, +1.0], [+5.5, +4.5], [-\n 0.5, +2.0]]'], {}), '([[-1.5, +1.5], [+1.0, -2.0], [+3.5, -1.0], [+7.5, +1.0], [+5.5, +\n 4.5], [-0.5, +2.0]])\n', (3076, 3167), True, 'import numpy as np\n'), ((3232, 3331), 'numpy.array', 'np.array', (['[[+2.5, +1.5], [+1.0, -0.5], [+3.5, +1.5], [+4.5, +0.5], [+5.5, +2.5], [+\n 1.0, +3.5]]'], {}), '([[+2.5, +1.5], [+1.0, -0.5], [+3.5, +1.5], [+4.5, +0.5], [+5.5, +\n 2.5], [+1.0, +3.5]])\n', (3240, 3331), True, 'import numpy as np\n'), ((3393, 3409), 'numpy.array', 'np.array', (['[6, 3]'], {}), '([6, 3])\n', (3401, 3409), True, 'import numpy as np\n'), ((3421, 3437), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (3429, 3437), True, 'import numpy as np\n'), ((3452, 3475), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (3462, 3475), True, 'import raytracing as rt\n'), ((3713, 3736), 'raytracing.gridmap', 'rt.gridmap', (['shape', 'size'], {}), '(shape, size)\n', (3723, 3736), True, 'import raytracing as rt\n'), ((3741, 3768), 'raytracing.trace2d', 'rt.trace2d', (['start', 'end', 'map'], {}), '(start, end, map)\n', (3751, 3768), True, 'import raytracing as rt\n'), ((3828, 3841), 'pytest.main', 'pytest.main', ([], {}), '()\n', (3839, 3841), False, 'import pytest\n')] |
import numpy as np
x = np.array([75, 138, 679])
y = np.array([3.45, 2.7, 0.3])
a = np.polyfit(x, np.log(y),1)
demand_erraticity = 140
print(a)
print(round(np.exp(a[0] *demand_erraticity + a[1])))
print(isinstance(int(np.round(2.99)), int)) | [
"numpy.exp",
"numpy.array",
"numpy.log",
"numpy.round"
] | [((24, 48), 'numpy.array', 'np.array', (['[75, 138, 679]'], {}), '([75, 138, 679])\n', (32, 48), True, 'import numpy as np\n'), ((53, 79), 'numpy.array', 'np.array', (['[3.45, 2.7, 0.3]'], {}), '([3.45, 2.7, 0.3])\n', (61, 79), True, 'import numpy as np\n'), ((98, 107), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (104, 107), True, 'import numpy as np\n'), ((156, 195), 'numpy.exp', 'np.exp', (['(a[0] * demand_erraticity + a[1])'], {}), '(a[0] * demand_erraticity + a[1])\n', (162, 195), True, 'import numpy as np\n'), ((219, 233), 'numpy.round', 'np.round', (['(2.99)'], {}), '(2.99)\n', (227, 233), True, 'import numpy as np\n')] |
import gym
import itertools
import matplotlib
import numpy as np
import sys
import tensorflow as tf
import collections
from time import time
import os.path
from fourInARowWrapper import FourInARowWrapper
if "../" not in sys.path:
sys.path.append("../")
from lib import plotting
matplotlib.style.use('ggplot')
env = FourInARowWrapper(1)
def invertBoard(inBoard):
invertedBoard = np.array(inBoard)
board_shape = inBoard.shape
#print("Shape:", board_shape)
for x in range(board_shape[0]):
for y in range(board_shape[1]):
invertedBoard[x][y][0] = inBoard[x][y][1]
invertedBoard[x][y][1] = inBoard[x][y][0]
return invertedBoard
class ConvolutionalNetwork():
def __init__(self, scope="conv_net"):
with tf.variable_scope(scope):
self.board = tf.placeholder(tf.float32, (None, 7, 6, 2), "board")
#self.player = tf.placeholder(tf.float32, (None, 2), "player")
# self.filter1 = tf.Variable(tf.random_normal([4, 6, 2, 20]), name="filter1")
# self.filter2 = tf.Variable(tf.random_normal([2, 1, 20, 80]), name="filter2")
self.board_norm = tf.nn.batch_normalization(x=self.board, mean=0, variance=1, offset=1, scale=1, variance_epsilon=1e-7)
self.board_flat = tf.reshape(self.board_norm, (-1, 7*6*2))
# self.conv1 = tf.nn.conv2d(
# input=self.board_norm,
# filter=self.filter1,
# strides=(1, 1, 1, 1),
# padding="VALID"
# )
#
# self.deconv1 = []
#
# for i in range(10):
# self.deconv1.append(tf.nn.conv2d_transpose(tf.slice(self.conv1, [0,0,0,i], [-1,-1,-1,1], "slice1"),
# tf.slice(self.filter1, [0,0,0,i], [-1,-1,-1,1], "slice2"),
# tf.shape(self.board_norm),
# strides=(1, 1, 1, 1),
# padding="VALID",
# data_format="NHWC",
# name="deconv1"))
#
# self.l1 = tf.nn.leaky_relu(self.conv1, 0.1)
#
#
# self.conv2 = tf.nn.conv2d(
# input=self.l1,
# filter=self.filter2,
# strides=(1, 1, 1, 1),
# padding="VALID"
# )
#
# self.deconv2_1 = []
# for i in range(10):
# for j in range(20):
# self.deconv2 = tf.nn.conv2d_transpose(tf.slice(self.conv2, [0,0,0,j], [-1,-1,-1,1]),
# tf.slice(self.filter2, [0,0,0,j], [-1,-1,-1,1]), tf.shape(self.l1),
# strides=(1, 1, 1, 1), padding="VALID",
# data_format="NHWC", name="deconv2")
# self.deconv2_1.append(tf.nn.conv2d_transpose(tf.slice(self.deconv2, [0,0,0,i], [-1,-1,-1,1], "slice1"),
# tf.slice(self.filter1, [0,0,0,i], [-1,-1,-1,1], "slice2"),
# tf.shape(self.board_norm),
# strides=(1, 1, 1, 1),
# padding="VALID",
# data_format="NHWC",
# name="deconv1"))
#
# self.outLayerConv = tf.nn.leaky_relu(self.conv2, 0.1)
#
# self.board_flat = tf.reshape(self.board_norm, [tf.shape(self.board_norm)[0], 84])
# self.outLayerConv_flat = tf.reshape(self.outLayerConv, [tf.shape(self.outLayerConv)[0], 240])
#
# self.board_and_out = tf.concat([self.board_flat, self.outLayerConv_flat], 1)
self.board_and_out = tf.contrib.layers.fully_connected(
inputs=self.board_flat,
num_outputs=800,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=1),
scope="layer_1"
)
self.board_and_out_relu = tf.nn.leaky_relu(features=self.board_and_out, alpha=0.1)
self.outLayer_pre = tf.contrib.layers.fully_connected(
inputs=self.board_and_out_relu,
num_outputs=500,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=1),
scope="outLayer"
)
self.outLayer_pre_relu = tf.nn.leaky_relu(features=self.outLayer_pre, alpha=0.1)
self.outLayer = tf.contrib.layers.dropout(
self.outLayer_pre_relu,
keep_prob=0.9,
)
class Trainer():
def __init__(self, scope="trainer", learning_rate=0.001, convNet=None, policy=None, policyLossFactor=0.1, value=None, valueLossFactor=0.1):
with tf.variable_scope(scope):
self.policy = policy
self.value = value
self.convNet = convNet
self.policyLoss = policy.loss
self.valueLoss = value.loss
self.loss = policyLossFactor * policy.loss + valueLossFactor * value.loss
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.optimizerRMSProp = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.contrib.framework.get_global_step())
def update(self, board, td_target, td_error, action, avaliableColumns, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.policy.target: td_error, self.policy.action: action,
self.policy.validColumnsFilter: avaliableColumns,
self.value.target: td_target, self.convNet.board: board}
_, loss, pol_loss, value_loss = sess.run([self.train_op, self.loss, self.policyLoss, self.valueLoss], feed_dict)
return loss, pol_loss, value_loss
def evalFilters(self, board, sess=None):
sess = sess or tf.get_default_session()
board_exp = np.expand_dims(board, axis=0)
feed_dict = {self.convNet.board: board_exp}
layer1, layer2 = sess.run([self.convNet.deconv1, self.convNet.deconv2_1], feed_dict)
return layer1, layer2
class PolicyEstimator():
"""
Policy Function approximator.
"""
def __init__(self, scope="policy_estimator", entropyFactor=0.1, shared_layers=None):
with tf.variable_scope(scope):
self.shared_layers = shared_layers
if shared_layers is not None:
self.board = shared_layers.board
self.input = shared_layers.outLayer
#self.player = shared_layers.player
else:
print("Needs shared_layers parameter")
return -1
self.target = tf.placeholder(dtype=tf.float32, name="target")
self.validColumnsFilter = tf.placeholder(dtype=tf.float32, shape=(None, 7), name="validColumnsFilter")
self.l1 = tf.contrib.layers.fully_connected(
inputs=self.input,
num_outputs=100,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=1),
scope="l2"
)
self.l1 = tf.nn.leaky_relu(features=self.l1, alpha=0.1)
# self.l1_dropout = tf.contrib.layers.dropout(
# self.l1,
# keep_prob=0.9,
# )
self.mu = tf.contrib.layers.fully_connected(
inputs=self.l1,
num_outputs=env.action_space.high-env.action_space.low,
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=1),
scope="mu")
self.mu = tf.squeeze(self.mu)
self.mu = tf.multiply(self.mu, self.validColumnsFilter) + 1e-6
self.mu = tf.divide(self.mu, tf.reduce_sum(self.mu))
self.dist = tf.contrib.distributions.Categorical(probs=self.mu, dtype=tf.float32)
# Draw sample
self.action = self.dist.sample()
# Loss and train op
self.loss = -self.dist.log_prob(self.action) * self.target
# Add cross entropy cost to encourage exploration
self.loss -= entropyFactor * self.dist.entropy()
def predict(self, env, sess=None):
sess = sess or tf.get_default_session()
player = np.expand_dims(env.state[0], axis=0)
if player[0][0] == 1:
board = np.expand_dims(env.state[1], axis=0)
else:
board = np.expand_dims(invertBoard(env.state[1]), axis=0)
action, mu = sess.run([self.action, self.mu], {self.shared_layers.board: board, self.validColumnsFilter: np.expand_dims(env.getAvaliableColumns(), axis=0)})
return action, mu
class ValueEstimator():
"""
Value Function approximator.
"""
def __init__(self, scope="value_estimator", shared_layers=None):
with tf.variable_scope(scope):
self.shared_layers = shared_layers
if shared_layers is not None:
self.board = shared_layers.board
self.input = shared_layers.outLayer
else:
print("Needs shared_layers parameter")
return -1
#self.player = tf.placeholder(tf.float32, (None, 2), "player")
self.target = tf.placeholder(dtype=tf.float32, shape=(None, 1), name="target")
self.l1 = tf.contrib.layers.fully_connected(
inputs=self.input,
num_outputs=100,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=1),
scope="l2"
)
self.l1 = tf.nn.leaky_relu(features=self.l1, alpha=0.1)
# self.l1_dropout = tf.contrib.layers.dropout(
# self.l1,
# keep_prob=0.7,
# )
# This is just linear classifier
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.l1,
num_outputs=1,
activation_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=1),
scope="output_layer")
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
player = np.expand_dims(state[0], axis=0)
if player[0][0] == 1:
board = np.expand_dims(state[1], axis=0)
else:
board = np.expand_dims(invertBoard(state[1]), axis=0)
#state = featurize_state(state)
return sess.run(self.value_estimate, {self.shared_layers.board: board})
def actor_critic(env, estimator_policy_X, estimator_value_X, trainer_X, num_episodes, discount_factor=1.0, player2=True, positiveRewardFactor=1.0, negativeRewardFactor=1.0, batch_size=1):
"""
Actor Critic Algorithm. Optimizes the policy
function approximator using policy gradient.
Args:
env: OpenAI environment.
estimator_policy_X: Policy Function to be optimized
estimator_value_X: Value function approximator, used as a critic
trainer_X: our training class
num_episodes: Number of episodes to run for
discount_factor: Time-discount factor
player2: True if computer plays player2, False if user does
positiveRewardFactor: Factor bla bla bla reward
negativeRewardFactor: Factor bla bla bla
batch_size: Batch size
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes),
episode_td_error=np.zeros(num_episodes),
episode_value_loss=np.zeros(num_episodes),
episode_policy_loss=np.zeros(num_episodes),
episode_kl_divergence=np.zeros(num_episodes))
Transition = collections.namedtuple("Transition", ["state", "action", "reward", "next_state", "done"])
batch_board_X = np.zeros((batch_size, 7, 6, 2))
batch_player_X = np.zeros((batch_size, 2))
batch_td_target_X = np.zeros((batch_size, 1))
batch_td_error_X =np.zeros((batch_size, 1))
batch_action_X =np.zeros((batch_size, 1))
batch_avaliableColumns_X = np.zeros((batch_size, 7))
batch_pos_X = 0
game = 1
for i_episode in range(num_episodes):
# Reset the environment and pick the first action
state = env.reset(i_episode % 2 + 1)
robotLevel = i_episode%4 + 1
episode = []
probas = None
last_turn = False
done = False
last_state = None
action = None
reward = None
# if game % 5000 == 10:
# player2 = True
# elif game % 5000 == 0:
# player2 = False
if game == num_episodes-3:
player2 = False
# One step in the environment
for t in itertools.count():
# Save avaliable columns
if not done:
avaliableColumns = env.getAvaliableColumns()
currentPlayerBeforeStep = env.getCurrentPlayer()
action_tmp = action
# Take a step
if currentPlayerBeforeStep == 1 and not done or currentPlayerBeforeStep == 2 and player2 and not done:
action, probas = estimator_policy_X.predict(env)
action = action[0]
probas = probas[0]
elif not done:
try:
action = int(input("Give a column number: ")) - 1
except ValueError:
print("Wrong input! Setting action to 1")
action = 0
probas = None
if currentPlayerBeforeStep == 2 and player2 and not done:
next_state, reward, step_done, action = env.robotStep(robotLevel)
elif not done:
next_state, reward, step_done, _ = env.step(action)
if not done:
if game == num_episodes-3:
pass
#layer1, layer2 = trainer_X.evalFilters(next_state[1])
#plotting.plotNNFilter(next_state[1], layer1, layer2)
if step_done:
pass
if t > 0:
state_tmp = last_state
last_state = state
reward_tmp = -reward*negativeRewardFactor
else:
state_tmp = state
last_state = state
reward_tmp = -reward*negativeRewardFactor
elif done and not last_turn:
state_tmp = episode[-2].next_state
reward_tmp = reward*positiveRewardFactor
else:
break
if t > 0:
episode.append(Transition(
state=state_tmp, action=action_tmp, reward=reward_tmp, next_state=next_state, done=done))
player = None
if episode[-1].state[0][0] == 1:
player = "X"
elif episode[-1].state[0][1] == 1:
player = "O"
# Update statistics
stats.episode_lengths[i_episode] = t
# If player 0 (X)
if episode[-1].state[0][0] == 1 or True:
if episode[-1].state[0][0] == 1:
stats.episode_rewards[i_episode] += episode[-1].reward
# Calculate TD Target
value_next = estimator_value_X.predict(episode[-1].next_state)
td_target = episode[-1].reward + discount_factor * value_next
td_error = td_target - estimator_value_X.predict(episode[-1].state)
if episode[-1].state[0][0] == 1:
batch_board_X[batch_pos_X] = episode[-1].state[1]
else:
batch_board_X[batch_pos_X] = invertBoard(episode[-1].state[1])
batch_player_X[batch_pos_X] = episode[-1].state[0]
batch_td_target_X[batch_pos_X] = td_target
batch_td_error_X[batch_pos_X] = td_error
batch_action_X[batch_pos_X] = episode[-1].action
batch_avaliableColumns_X[batch_pos_X] = avaliableColumns
batch_pos_X += 1
# else:
# value_next = estimator_value_O.predict(episode[-1].next_state, )
# td_target = episode[-1].reward + discount_factor * value_next
# td_error = td_target - estimator_value_O.predict(episode[-1].state)
#
# batch_player_O[batch_pos_O] = episode[-1].state[0]
# batch_board_O[batch_pos_O] = episode[-1].state[1]
# batch_td_target_O[batch_pos_O] = td_target
# batch_td_error_O[batch_pos_O] = td_error
# batch_action_O[batch_pos_O] = episode[-1].action
# batch_avaliableColumns_O[batch_pos_O] = avaliableColumns
#
# batch_pos_O += 1
stats.episode_td_error[i_episode] += td_error
if batch_pos_X == batch_size:
# Update both networks
loss_X, policyLoss, valueLoss = trainer_X.update(batch_board_X, batch_td_target_X, batch_td_error_X, batch_action_X, batch_avaliableColumns_X)
loss_X = loss_X[0][0]
policyLoss = policyLoss[0][0]
valueLoss = valueLoss[0][0]
batch_pos_X = 0
print("Updates X network. Loss:", loss_X)
stats.episode_value_loss[i_episode] += valueLoss
# if batch_pos_O == batch_size:
# # Update both networks
# loss_O = trainer_O.update(batch_board_O, batch_td_target_O, batch_td_error_O, batch_action_O,
# batch_avaliableColumns_O)
# loss_O = loss_O[0][0]
# batch_pos_O = 0
#
# print("Updates X network. Loss:", loss_O)
# stats.episode_value_loss[i_episode] += loss_O
if probas is not None and last_probas is not None:
kl_div = 0
for i in range(probas.size):
kl_div += probas[i]*np.log(probas[i]/last_probas[i])
stats.episode_kl_divergence[i_episode] += kl_div
# Print out which step we're on, useful for debugging.
print(
"\rPlayer {}: Action {}, Reward {:<4}, TD Error {:<20}, TD Target {:<20}, Value Next {:<20}, at Step {:<5} @ Game {} @ Episode {}/{} ({})".format(
player, int(episode[-1].action + 1), episode[-1].reward, td_error, td_target, value_next, t,
game, i_episode + 1, num_episodes, stats.episode_rewards[i_episode - 1]), end="")
if player == "X" and episode[-1].reward > 0 and robotLevel > 1:# or i_episode % 100 == 0:
for i in range(t):
print("Player:", batch_player_X[batch_pos_X-t+i], "Action:", int(batch_action_X[batch_pos_X-t+i])+1 )
print("Robot level:", robotLevel)
env.renderHotEncodedState( ((1, 0), batch_board_X[batch_pos_X-1]) )
if game == num_episodes or env.getCurrentPlayer() == 2 and not player2:
env.render()
if probas is not None:
out = " "
for i in range(probas.size):
out += "%03d " % int(probas[i]*100+0.5)
print(out)
last_probas = probas
if done:
last_turn = True
game += 1
if step_done:
done = True
state = next_state
return stats
tf.reset_default_graph()
start = time()
batch_size = 2000
global_step = tf.Variable(0, name="global_step", trainable=False)
conv_net_X = ConvolutionalNetwork("X_convNet")
policy_estimator_X = PolicyEstimator("X_policy", entropyFactor=1e-5, shared_layers=conv_net_X)
value_estimator_X = ValueEstimator("X_value", shared_layers=conv_net_X)
trainer_X = Trainer("X_trainer", learning_rate=1e-3, convNet=conv_net_X, policy=policy_estimator_X, policyLossFactor=1, value=value_estimator_X, valueLossFactor=1e-2)
variables = tf.contrib.slim.get_variables_to_restore()
variables_to_restore = [v for v in variables if v.name.split('/')[0]!='trainer' and v.name.split('/')[0]!='policy_estimator' and v.name.split('/')[0]!='value_estimator']
variables_to_init = [v for v in variables if v.name.split('/')[0]!='conv_net']
for v in variables_to_restore:
print(v)
saver = tf.train.Saver(variables)
with tf.Session() as sess:
try:
saver.restore(sess, "tmp/model10.ckpt")
sess.run(tf.initializers.variables(variables_to_init))
print("Restoring parameters")
except ValueError:
sess.run(tf.initializers.global_variables())
print("Initializing parameters")
stats = actor_critic(env, policy_estimator_X, value_estimator_X, trainer_X, 10000, discount_factor=0.99, player2=True, positiveRewardFactor=1, negativeRewardFactor=1, batch_size=batch_size)
#filters = sess.run(conv_net_X.filter1)
save_path = saver.save(sess, "tmp/model10.ckpt")
print("Saving parameters")
end = time()
print("It took:", end-start, "seconds to do 5.000 games")
plotting.plot_episode_stats(stats, smoothing_window=100)
| [
"tensorflow.contrib.framework.get_global_step",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.get_default_session",
"tensorflow.multiply",
"numpy.array",
"matplotlib.style.use",
"sys.path.append",
"tensorflow.squared_difference",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.... | [((303, 333), 'matplotlib.style.use', 'matplotlib.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (323, 333), False, 'import matplotlib\n'), ((343, 363), 'fourInARowWrapper.FourInARowWrapper', 'FourInARowWrapper', (['(1)'], {}), '(1)\n', (360, 363), False, 'from fourInARowWrapper import FourInARowWrapper\n'), ((21519, 21543), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (21541, 21543), True, 'import tensorflow as tf\n'), ((21555, 21561), 'time.time', 'time', ([], {}), '()\n', (21559, 21561), False, 'from time import time\n'), ((21600, 21651), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (21611, 21651), True, 'import tensorflow as tf\n'), ((22052, 22094), 'tensorflow.contrib.slim.get_variables_to_restore', 'tf.contrib.slim.get_variables_to_restore', ([], {}), '()\n', (22092, 22094), True, 'import tensorflow as tf\n'), ((22405, 22430), 'tensorflow.train.Saver', 'tf.train.Saver', (['variables'], {}), '(variables)\n', (22419, 22430), True, 'import tensorflow as tf\n'), ((23084, 23090), 'time.time', 'time', ([], {}), '()\n', (23088, 23090), False, 'from time import time\n'), ((23155, 23211), 'lib.plotting.plot_episode_stats', 'plotting.plot_episode_stats', (['stats'], {'smoothing_window': '(100)'}), '(stats, smoothing_window=100)\n', (23182, 23211), False, 'from lib import plotting\n'), ((249, 271), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (264, 271), False, 'import sys\n'), ((414, 431), 'numpy.array', 'np.array', (['inBoard'], {}), '(inBoard)\n', (422, 431), True, 'import numpy as np\n'), ((13144, 13237), 'collections.namedtuple', 'collections.namedtuple', (['"""Transition"""', "['state', 'action', 'reward', 'next_state', 'done']"], {}), "('Transition', ['state', 'action', 'reward',\n 'next_state', 'done'])\n", (13166, 13237), False, 'import collections\n'), ((13257, 13288), 'numpy.zeros', 'np.zeros', (['(batch_size, 7, 6, 2)'], {}), '((batch_size, 7, 6, 2))\n', (13265, 13288), True, 'import numpy as np\n'), ((13311, 13336), 'numpy.zeros', 'np.zeros', (['(batch_size, 2)'], {}), '((batch_size, 2))\n', (13319, 13336), True, 'import numpy as np\n'), ((13362, 13387), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (13370, 13387), True, 'import numpy as np\n'), ((13411, 13436), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (13419, 13436), True, 'import numpy as np\n'), ((13458, 13483), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (13466, 13483), True, 'import numpy as np\n'), ((13516, 13541), 'numpy.zeros', 'np.zeros', (['(batch_size, 7)'], {}), '((batch_size, 7))\n', (13524, 13541), True, 'import numpy as np\n'), ((22439, 22451), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (22449, 22451), True, 'import tensorflow as tf\n'), ((6765, 6794), 'numpy.expand_dims', 'np.expand_dims', (['board'], {'axis': '(0)'}), '(board, axis=0)\n', (6779, 6794), True, 'import numpy as np\n'), ((9281, 9317), 'numpy.expand_dims', 'np.expand_dims', (['env.state[0]'], {'axis': '(0)'}), '(env.state[0], axis=0)\n', (9295, 9317), True, 'import numpy as np\n'), ((11463, 11495), 'numpy.expand_dims', 'np.expand_dims', (['state[0]'], {'axis': '(0)'}), '(state[0], axis=0)\n', (11477, 11495), True, 'import numpy as np\n'), ((14198, 14215), 'itertools.count', 'itertools.count', ([], {}), '()\n', (14213, 14215), False, 'import itertools\n'), ((812, 836), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (829, 836), True, 'import tensorflow as tf\n'), ((864, 916), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 7, 6, 2)', '"""board"""'], {}), "(tf.float32, (None, 7, 6, 2), 'board')\n", (878, 916), True, 'import tensorflow as tf\n'), ((1211, 1318), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', ([], {'x': 'self.board', 'mean': '(0)', 'variance': '(1)', 'offset': '(1)', 'scale': '(1)', 'variance_epsilon': '(1e-07)'}), '(x=self.board, mean=0, variance=1, offset=1, scale\n =1, variance_epsilon=1e-07)\n', (1236, 1318), True, 'import tensorflow as tf\n'), ((1344, 1388), 'tensorflow.reshape', 'tf.reshape', (['self.board_norm', '(-1, 7 * 6 * 2)'], {}), '(self.board_norm, (-1, 7 * 6 * 2))\n', (1354, 1388), True, 'import tensorflow as tf\n'), ((4694, 4750), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', ([], {'features': 'self.board_and_out', 'alpha': '(0.1)'}), '(features=self.board_and_out, alpha=0.1)\n', (4710, 4750), True, 'import tensorflow as tf\n'), ((5117, 5172), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', ([], {'features': 'self.outLayer_pre', 'alpha': '(0.1)'}), '(features=self.outLayer_pre, alpha=0.1)\n', (5133, 5172), True, 'import tensorflow as tf\n'), ((5204, 5268), 'tensorflow.contrib.layers.dropout', 'tf.contrib.layers.dropout', (['self.outLayer_pre_relu'], {'keep_prob': '(0.9)'}), '(self.outLayer_pre_relu, keep_prob=0.9)\n', (5229, 5268), True, 'import tensorflow as tf\n'), ((5500, 5524), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (5517, 5524), True, 'import tensorflow as tf\n'), ((5831, 5882), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (5853, 5882), True, 'import tensorflow as tf\n'), ((5920, 5974), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (5945, 5974), True, 'import tensorflow as tf\n'), ((6223, 6247), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (6245, 6247), True, 'import tensorflow as tf\n'), ((6717, 6741), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (6739, 6741), True, 'import tensorflow as tf\n'), ((7166, 7190), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (7183, 7190), True, 'import tensorflow as tf\n'), ((7570, 7617), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'name': '"""target"""'}), "(dtype=tf.float32, name='target')\n", (7584, 7617), True, 'import tensorflow as tf\n'), ((7657, 7733), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 7)', 'name': '"""validColumnsFilter"""'}), "(dtype=tf.float32, shape=(None, 7), name='validColumnsFilter')\n", (7671, 7733), True, 'import tensorflow as tf\n'), ((8056, 8101), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', ([], {'features': 'self.l1', 'alpha': '(0.1)'}), '(features=self.l1, alpha=0.1)\n', (8072, 8101), True, 'import tensorflow as tf\n'), ((8596, 8615), 'tensorflow.squeeze', 'tf.squeeze', (['self.mu'], {}), '(self.mu)\n', (8606, 8615), True, 'import tensorflow as tf\n'), ((8789, 8858), 'tensorflow.contrib.distributions.Categorical', 'tf.contrib.distributions.Categorical', ([], {'probs': 'self.mu', 'dtype': 'tf.float32'}), '(probs=self.mu, dtype=tf.float32)\n', (8825, 8858), True, 'import tensorflow as tf\n'), ((9236, 9260), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (9258, 9260), True, 'import tensorflow as tf\n'), ((9372, 9408), 'numpy.expand_dims', 'np.expand_dims', (['env.state[1]'], {'axis': '(0)'}), '(env.state[1], axis=0)\n', (9386, 9408), True, 'import numpy as np\n'), ((9857, 9881), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (9874, 9881), True, 'import tensorflow as tf\n'), ((10284, 10348), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 1)', 'name': '"""target"""'}), "(dtype=tf.float32, shape=(None, 1), name='target')\n", (10298, 10348), True, 'import tensorflow as tf\n'), ((10671, 10716), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', ([], {'features': 'self.l1', 'alpha': '(0.1)'}), '(features=self.l1, alpha=0.1)\n', (10687, 10716), True, 'import tensorflow as tf\n'), ((11239, 11268), 'tensorflow.squeeze', 'tf.squeeze', (['self.output_layer'], {}), '(self.output_layer)\n', (11249, 11268), True, 'import tensorflow as tf\n'), ((11294, 11349), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.value_estimate', 'self.target'], {}), '(self.value_estimate, self.target)\n', (11315, 11349), True, 'import tensorflow as tf\n'), ((11420, 11444), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (11442, 11444), True, 'import tensorflow as tf\n'), ((11550, 11582), 'numpy.expand_dims', 'np.expand_dims', (['state[1]'], {'axis': '(0)'}), '(state[1], axis=0)\n', (11564, 11582), True, 'import numpy as np\n'), ((12841, 12863), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (12849, 12863), True, 'import numpy as np\n'), ((12890, 12912), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (12898, 12912), True, 'import numpy as np\n'), ((12940, 12962), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (12948, 12962), True, 'import numpy as np\n'), ((12992, 13014), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (13000, 13014), True, 'import numpy as np\n'), ((13045, 13067), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (13053, 13067), True, 'import numpy as np\n'), ((13100, 13122), 'numpy.zeros', 'np.zeros', (['num_episodes'], {}), '(num_episodes)\n', (13108, 13122), True, 'import numpy as np\n'), ((22538, 22582), 'tensorflow.initializers.variables', 'tf.initializers.variables', (['variables_to_init'], {}), '(variables_to_init)\n', (22563, 22582), True, 'import tensorflow as tf\n'), ((8641, 8686), 'tensorflow.multiply', 'tf.multiply', (['self.mu', 'self.validColumnsFilter'], {}), '(self.mu, self.validColumnsFilter)\n', (8652, 8686), True, 'import tensorflow as tf\n'), ((8738, 8760), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.mu'], {}), '(self.mu)\n', (8751, 8760), True, 'import tensorflow as tf\n'), ((22665, 22699), 'tensorflow.initializers.global_variables', 'tf.initializers.global_variables', ([], {}), '()\n', (22697, 22699), True, 'import tensorflow as tf\n'), ((4555, 4603), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(1)'}), '(mean=0.0, stddev=1)\n', (4583, 4603), True, 'import tensorflow as tf\n'), ((4978, 5026), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(1)'}), '(mean=0.0, stddev=1)\n', (5006, 5026), True, 'import tensorflow as tf\n'), ((6069, 6107), 'tensorflow.contrib.framework.get_global_step', 'tf.contrib.framework.get_global_step', ([], {}), '()\n', (6105, 6107), True, 'import tensorflow as tf\n'), ((7938, 7986), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(1)'}), '(mean=0.0, stddev=1)\n', (7966, 7986), True, 'import tensorflow as tf\n'), ((8492, 8540), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(1)'}), '(mean=0.0, stddev=1)\n', (8520, 8540), True, 'import tensorflow as tf\n'), ((10553, 10601), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(1)'}), '(mean=0.0, stddev=1)\n', (10581, 10601), True, 'import tensorflow as tf\n'), ((11113, 11161), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(1)'}), '(mean=0.0, stddev=1)\n', (11141, 11161), True, 'import tensorflow as tf\n'), ((19928, 19962), 'numpy.log', 'np.log', (['(probas[i] / last_probas[i])'], {}), '(probas[i] / last_probas[i])\n', (19934, 19962), True, 'import numpy as np\n')] |
import numpy
import csb.test as test
from csb.numeric import log
from csb.statistics.pdf.parameterized import ParameterizedDensity
from csb.statistics.pdf.parameterized import ParameterValueError, ParameterizationError
from csb.statistics.pdf.parameterized import AbstractParameter, Parameter, NonVirtualParameter
class Location(NonVirtualParameter):
def _validate(self, value):
return float(value)
class Scale(Parameter):
def _validate(self, value):
return float(value)
def _compute(self, base_value):
if base_value == 0.0:
return numpy.inf
else:
return 1.0 / base_value ** 0.5
def bind_to(self, base):
if base.name != "precision":
raise ValueError(base)
super(Scale, self).bind_to(base)
class DoubleScale(Parameter):
def _validate(self, value):
return float(value)
def _compute(self, base_value):
return base_value * 2.0
class Precision(Parameter):
def _validate(self, value):
if value < 0:
raise ParameterValueError(self.name, value)
return float(value)
class FancyGaussian(ParameterizedDensity):
def __init__(self, mu=0, precision=1):
super(FancyGaussian, self).__init__()
self._register('mu')
self._register('sigma')
self._register('precision')
loc = Location(mu)
prec = Precision(precision)
sigma = Scale(0)
sigma.bind_to(prec)
self.set_params(mu=loc, sigma=sigma, precision=prec)
@property
def mu(self):
return self['mu'].value
@property
def sigma(self):
return self['sigma'].value
@property
def precision(self):
return self['precision'].value
def log_prob(self, x):
mu = self.mu
sigma = self.sigma
return log(1.0 / numpy.sqrt(2 * numpy.pi * sigma ** 2)) - (x - mu) ** 2 / (2 * sigma ** 2)
@test.unit
class TestAbstractGenericParameter(test.Case):
"""
Use AbstractParameter as a generic class which accepts values
of any type.
"""
def setUp(self):
class Value(object):
pass
class Param(AbstractParameter):
def _validate(self, value):
if not isinstance(value, Value):
raise TypeError(value)
return value
self.value = Value()
self.param = Param(self.value)
def testValue(self):
self.assertIs(self.param.value, self.value)
def testSet(self):
self.assertRaises(TypeError, self.param.set, 3)
@test.unit
class TestParameter(test.Case):
"""
This is the main test case with complete coverage for AbstractParameter's
methods and behavior. Covers also Parameter.
computed -- leaf
/
base -- computed2
\
computed3
"""
def setUp(self):
self.base = Precision(1.2)
self.computed = Scale(100, base=self.base)
self.computed2 = Scale(200, base=self.base)
self.computed3 = Scale(300, base=self.base)
self.leaf = DoubleScale(400, base=self.computed)
def testConstrucor(self):
# make sure newly constructed parameters are left in a consistent state
# to avoid unnecessary consistency updates
self.assertTrue(self.base._consistent)
self.assertTrue(Scale(1)._consistent)
def testName(self):
self.assertEqual(self.base.name, "precision")
self.assertEqual(self.computed.name, "scale")
self.assertEqual(Scale(name="TesT").name, "TesT")
def testValue(self):
self.assertEqual(self.base.value, 1.2)
self.assertEqual(self.computed.value, 1.0 / numpy.sqrt(self.base.value))
self.assertEqual(self.computed2.value, 1.0 / numpy.sqrt(self.base.value))
self.assertEqual(self.leaf.value, self.computed.value * 2)
# turn self.base into a virtual parameter
self.base.bind_to(Precision(12.2))
self.assertEqual(self.base.value, 12.2)
def testIsVirtual(self):
self.assertFalse(self.base.is_virtual)
self.assertTrue(self.computed.is_virtual)
self.base.bind_to(Precision(12.2))
self.assertTrue(self.base.is_virtual)
def testSet(self):
base_initial_value = self.base._value
# recompute all derivatives from the initial value of base
self.assertEqual(self.computed._value, 100)
self.leaf._ensure_consistency()
self.computed2._ensure_consistency()
self.computed3._ensure_consistency()
# set self.base - it should remain consistent because it is not computed
self.assertTrue(self.base._consistent)
self.base.set(2.2)
self.assertTrue(self.base._consistent)
self.assertEqual(self.base.value, 2.2)
# self.computed and self.leaf should be inconsistent now that their base is updated
self.assertFalse(self.computed._consistent)
self.assertFalse(self.leaf._consistent)
self.assertEqual(self.computed._value, 1.0 / numpy.sqrt(base_initial_value))
self.assertEqual(self.leaf._value, 2.0 / numpy.sqrt(base_initial_value))
# retrieving self.computed's value should trigger updates up to self.computed
recomputed = self.computed.value
self.assertTrue(self.computed._consistent)
self.assertEqual(recomputed, 1.0 / numpy.sqrt(self.base._value))
# self.leaf is still inconsistent
self.assertFalse(self.leaf._consistent)
self.assertEqual(self.leaf._value, 2.0 / numpy.sqrt(base_initial_value))
self.assertIs(self.leaf._nearest_consistent_base()[-1], self.computed)
# until we request its value
recomputed = self.leaf.value
self.assertTrue(self.leaf._consistent)
self.assertEqual(recomputed, 2.0 / numpy.sqrt(self.base._value))
self.assertEqual(recomputed, 2.0 * self.computed._value)
# make sure the other two branches are still inconsistent
initial_value = 1.0 / numpy.sqrt(base_initial_value)
self.assertEqual(self.computed2._value, initial_value)
self.assertEqual(self.computed3._value, initial_value)
# until they get used
recomputed = self.computed2.value
self.assertTrue(self.computed2._consistent)
self.assertEqual(recomputed, 1.0 / numpy.sqrt(self.base._value))
# attempt to set self.computed - not allowed
self.assertRaises(ParameterizationError, self.computed.set, 2)
# attempt to set a negative Precision
self.assertRaises(ParameterValueError, self.base.set, -2)
# attempt to assigned non-float - not allowed in the Parameter specialization
self.assertRaises(ParameterValueError, Parameter().set, object())
def testBindTo(self):
# can't bind self.base to itself
self.assertRaises(ParameterizationError, self.base.bind_to, self.base)
# deeper circular dependency
self.assertRaises(ParameterizationError, self.base.bind_to, self.computed)
# self.base is not virtual and therefore must be consistent
self.assertTrue(self.base._consistent)
# make it virtual - should get inconsistent now
self.base.bind_to(Precision(12.2))
self.assertFalse(self.base._consistent)
self.assertTrue(self.base.is_virtual)
# retrieving its value should trigger the consistency cascade
self.assertEqual(self.base.value, 12.2)
self.assertTrue(self.base._consistent)
def testFindBaseParameter(self):
self.assertIs(self.base.find_base_parameter(), self.base)
self.assertIs(self.computed.find_base_parameter(), self.base)
@test.unit
class TestNonVirtualParameter(test.Case):
"""
Make sure explicit NonVirtualParameter-s are updatable and
refuse binding requests
"""
def setUp(self):
self.param = Location()
def testConstructor(self):
base = Parameter()
self.assertRaises(ParameterizationError, lambda: Location(base=base))
def testIsVirtual(self):
self.assertFalse(self.param.is_virtual)
def testBindTo(self):
base = Parameter()
self.assertRaises(ParameterizationError, self.param.bind_to, base)
def testSet(self):
self.param.set(22)
self.assertEqual(self.param.value, 22)
@test.unit
class TestParameterizedDensity(test.Case):
def setUp(self):
self.pdf = FancyGaussian(2, 5)
def testConstructor(self):
class Density(ParameterizedDensity):
def __init__(self, p):
super(Density, self).__init__()
self._register('p')
self.set_params(p=p)
def log_prob(self, x):
return x
self.assertRaises(TypeError, Density, 4)
def testProperties(self):
self.assertEqual(self.pdf.mu, 2)
self.assertEqual(self.pdf.precision, 5)
self.assertAlmostEqual(self.pdf.sigma, 0.4472, places=3)
def testParameterChaining(self):
self.assertEqual(self.pdf.precision, 5)
self.assertAlmostEqual(self.pdf.sigma, 0.4472, places=3)
self.pdf['precision'].set(2)
self.assertEqual(self.pdf.precision, 2)
self.assertAlmostEqual(self.pdf.sigma, 0.7071, places=3)
def testAssignment(self):
self.pdf['sigma'] = Scale(55)
self.assertEqual(self.pdf.sigma, 55)
self.assertEqual(self.pdf['sigma'].name, 'scale')
def assign(i):
self.pdf['sigma'] = i
self.assertRaises(TypeError, assign, 55)
if __name__ == '__main__':
test.Console()
| [
"csb.statistics.pdf.parameterized.ParameterValueError",
"csb.statistics.pdf.parameterized.Parameter",
"numpy.sqrt",
"csb.test.Console"
] | [((10393, 10407), 'csb.test.Console', 'test.Console', ([], {}), '()\n', (10405, 10407), True, 'import csb.test as test\n'), ((8545, 8556), 'csb.statistics.pdf.parameterized.Parameter', 'Parameter', ([], {}), '()\n', (8554, 8556), False, 'from csb.statistics.pdf.parameterized import AbstractParameter, Parameter, NonVirtualParameter\n'), ((8771, 8782), 'csb.statistics.pdf.parameterized.Parameter', 'Parameter', ([], {}), '()\n', (8780, 8782), False, 'from csb.statistics.pdf.parameterized import AbstractParameter, Parameter, NonVirtualParameter\n'), ((1165, 1202), 'csb.statistics.pdf.parameterized.ParameterValueError', 'ParameterValueError', (['self.name', 'value'], {}), '(self.name, value)\n', (1184, 1202), False, 'from csb.statistics.pdf.parameterized import ParameterValueError, ParameterizationError\n'), ((6482, 6512), 'numpy.sqrt', 'numpy.sqrt', (['base_initial_value'], {}), '(base_initial_value)\n', (6492, 6512), False, 'import numpy\n'), ((4060, 4087), 'numpy.sqrt', 'numpy.sqrt', (['self.base.value'], {}), '(self.base.value)\n', (4070, 4087), False, 'import numpy\n'), ((4142, 4169), 'numpy.sqrt', 'numpy.sqrt', (['self.base.value'], {}), '(self.base.value)\n', (4152, 4169), False, 'import numpy\n'), ((5506, 5536), 'numpy.sqrt', 'numpy.sqrt', (['base_initial_value'], {}), '(base_initial_value)\n', (5516, 5536), False, 'import numpy\n'), ((5587, 5617), 'numpy.sqrt', 'numpy.sqrt', (['base_initial_value'], {}), '(base_initial_value)\n', (5597, 5617), False, 'import numpy\n'), ((5840, 5868), 'numpy.sqrt', 'numpy.sqrt', (['self.base._value'], {}), '(self.base._value)\n', (5850, 5868), False, 'import numpy\n'), ((6009, 6039), 'numpy.sqrt', 'numpy.sqrt', (['base_initial_value'], {}), '(base_initial_value)\n', (6019, 6039), False, 'import numpy\n'), ((6291, 6319), 'numpy.sqrt', 'numpy.sqrt', (['self.base._value'], {}), '(self.base._value)\n', (6301, 6319), False, 'import numpy\n'), ((6806, 6834), 'numpy.sqrt', 'numpy.sqrt', (['self.base._value'], {}), '(self.base._value)\n', (6816, 6834), False, 'import numpy\n'), ((7246, 7257), 'csb.statistics.pdf.parameterized.Parameter', 'Parameter', ([], {}), '()\n', (7255, 7257), False, 'from csb.statistics.pdf.parameterized import AbstractParameter, Parameter, NonVirtualParameter\n'), ((2061, 2098), 'numpy.sqrt', 'numpy.sqrt', (['(2 * numpy.pi * sigma ** 2)'], {}), '(2 * numpy.pi * sigma ** 2)\n', (2071, 2098), False, 'import numpy\n')] |
import numpy as np
import random
from collections import namedtuple, deque
from DQNmodel import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork1 = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork2 = QNetwork(state_size, action_size, seed).to(device)
self.optimizer1 = optim.Adam(self.qnetwork1.parameters(), lr=LR)
self.optimizer2 = optim.Adam(self.qnetwork2.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork1.eval()
with torch.no_grad():
action_values = self.qnetwork1(state)
self.qnetwork1.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
Q1_curr = self.qnetwork1(next_states).gather(1, actions)
Q1_next = self.qnetwork1(next_states).detach().max(1)[0]
Q2_curr = self.qnetwork2(next_states).gather(1, actions)
Q2_next = self.qnetwork2(next_states).detach().max(1)[0]
Q_next = torch.min(Q1_next, Q2_next).unsqueeze(1)
Q_expected = rewards + (1 - dones) * gamma * Q_next
loss1 = F.mse_loss(Q_expected, Q1_curr)
loss2 = F.mse_loss(Q_expected, Q2_curr)
# Minimize the loss
self.optimizer1.zero_grad()
loss1.backward()
self.optimizer1.step()
self.optimizer2.zero_grad()
loss2.backward()
self.optimizer2.step()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | [
"random.sample",
"torch.nn.functional.mse_loss",
"collections.deque",
"collections.namedtuple",
"DQNmodel.QNetwork",
"random.seed",
"torch.min",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.vstack",
"torch.no_grad",
"random.random",
"numpy.arange"
] | [((506, 531), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (529, 531), False, 'import torch\n'), ((1006, 1023), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1017, 1023), False, 'import random\n'), ((3648, 3679), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Q_expected', 'Q1_curr'], {}), '(Q_expected, Q1_curr)\n', (3658, 3679), True, 'import torch.nn.functional as F\n'), ((3696, 3727), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Q_expected', 'Q2_curr'], {}), '(Q_expected, Q2_curr)\n', (3706, 3727), True, 'import torch.nn.functional as F\n'), ((4476, 4501), 'collections.deque', 'deque', ([], {'maxlen': 'buffer_size'}), '(maxlen=buffer_size)\n', (4481, 4501), False, 'from collections import namedtuple, deque\n'), ((4567, 4660), 'collections.namedtuple', 'namedtuple', (['"""Experience"""'], {'field_names': "['state', 'action', 'reward', 'next_state', 'done']"}), "('Experience', field_names=['state', 'action', 'reward',\n 'next_state', 'done'])\n", (4577, 4660), False, 'from collections import namedtuple, deque\n'), ((4677, 4694), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4688, 4694), False, 'import random\n'), ((5020, 5065), 'random.sample', 'random.sample', (['self.memory'], {'k': 'self.batch_size'}), '(self.memory, k=self.batch_size)\n', (5033, 5065), False, 'import random\n'), ((2468, 2483), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2481, 2483), False, 'import torch\n'), ((2620, 2635), 'random.random', 'random.random', ([], {}), '()\n', (2633, 2635), False, 'import random\n'), ((1070, 1109), 'DQNmodel.QNetwork', 'QNetwork', (['state_size', 'action_size', 'seed'], {}), '(state_size, action_size, seed)\n', (1078, 1109), False, 'from DQNmodel import QNetwork\n'), ((1146, 1185), 'DQNmodel.QNetwork', 'QNetwork', (['state_size', 'action_size', 'seed'], {}), '(state_size, action_size, seed)\n', (1154, 1185), False, 'from DQNmodel import QNetwork\n'), ((2753, 2780), 'numpy.arange', 'np.arange', (['self.action_size'], {}), '(self.action_size)\n', (2762, 2780), True, 'import numpy as np\n'), ((3505, 3532), 'torch.min', 'torch.min', (['Q1_next', 'Q2_next'], {}), '(Q1_next, Q2_next)\n', (3514, 3532), False, 'import torch\n'), ((5101, 5159), 'numpy.vstack', 'np.vstack', (['[e.state for e in experiences if e is not None]'], {}), '([e.state for e in experiences if e is not None])\n', (5110, 5159), True, 'import numpy as np\n'), ((5215, 5274), 'numpy.vstack', 'np.vstack', (['[e.action for e in experiences if e is not None]'], {}), '([e.action for e in experiences if e is not None])\n', (5224, 5274), True, 'import numpy as np\n'), ((5329, 5388), 'numpy.vstack', 'np.vstack', (['[e.reward for e in experiences if e is not None]'], {}), '([e.reward for e in experiences if e is not None])\n', (5338, 5388), True, 'import numpy as np\n'), ((5448, 5511), 'numpy.vstack', 'np.vstack', (['[e.next_state for e in experiences if e is not None]'], {}), '([e.next_state for e in experiences if e is not None])\n', (5457, 5511), True, 'import numpy as np\n'), ((2369, 2392), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (2385, 2392), False, 'import torch\n'), ((5565, 5622), 'numpy.vstack', 'np.vstack', (['[e.done for e in experiences if e is not None]'], {}), '([e.done for e in experiences if e is not None])\n', (5574, 5622), True, 'import numpy as np\n')] |
"""
A script to convert between the J2000 and the sun.
"""
from sunpy.coordinates.sun import sky_position as sun_position
import sunpy.coordinates.sun as sun_coord
import numpy as np
def j2000xy(RA,DEC,t_sun):
[RA_sun, DEC_sun] = sun_position(t_sun,False)
rotate_angel = sun_coord.P(t_sun)
# shift the center and transfer into arcsec
x_shift = -(RA - RA_sun.degree) * 3600
y_shift = (DEC - DEC_sun.degree) * 3600
# rotate xy according to the position angle
xx = x_shift * np.cos(-rotate_angel.rad) - y_shift * np.sin(-rotate_angel.rad)
yy = x_shift * np.sin(-rotate_angel.rad) + y_shift * np.cos(-rotate_angel.rad)
return [xx,yy]
| [
"numpy.sin",
"sunpy.coordinates.sun.P",
"numpy.cos",
"sunpy.coordinates.sun.sky_position"
] | [((237, 263), 'sunpy.coordinates.sun.sky_position', 'sun_position', (['t_sun', '(False)'], {}), '(t_sun, False)\n', (249, 263), True, 'from sunpy.coordinates.sun import sky_position as sun_position\n'), ((282, 300), 'sunpy.coordinates.sun.P', 'sun_coord.P', (['t_sun'], {}), '(t_sun)\n', (293, 300), True, 'import sunpy.coordinates.sun as sun_coord\n'), ((507, 532), 'numpy.cos', 'np.cos', (['(-rotate_angel.rad)'], {}), '(-rotate_angel.rad)\n', (513, 532), True, 'import numpy as np\n'), ((545, 570), 'numpy.sin', 'np.sin', (['(-rotate_angel.rad)'], {}), '(-rotate_angel.rad)\n', (551, 570), True, 'import numpy as np\n'), ((590, 615), 'numpy.sin', 'np.sin', (['(-rotate_angel.rad)'], {}), '(-rotate_angel.rad)\n', (596, 615), True, 'import numpy as np\n'), ((628, 653), 'numpy.cos', 'np.cos', (['(-rotate_angel.rad)'], {}), '(-rotate_angel.rad)\n', (634, 653), True, 'import numpy as np\n')] |
import argparse
import os.path as osp
import random
from time import perf_counter as t
import yaml
from yaml import SafeLoader
import torch
import torch_geometric.transforms as T
import torch.nn.functional as F
import torch.nn as nn
from torch_geometric.datasets import Planetoid, CitationFull
from torch_geometric.utils import dropout_adj, to_undirected, is_undirected
from torch_geometric.nn import GCNConv
import numpy as np
from torch_geometric.utils import to_undirected, to_scipy_sparse_matrix
from datasets import get_citation_dataset
from model_digcl import Encoder, Model, drop_feature
from eval_digcl import label_classification
from get_adj import *
import warnings
warnings.filterwarnings('ignore')
def train(model: Model, x, edge_index):
model.train()
optimizer.zero_grad()
edge_index_1, edge_weight_1 = cal_fast_appr(
alpha_1, edge_index, x.shape[0], x.dtype)
edge_index_2, edge_weight_2 = cal_fast_appr(
alpha_2, edge_index, x.shape[0], x.dtype)
x_1 = drop_feature(x, drop_feature_rate_1)
x_2 = drop_feature(x, drop_feature_rate_2)
z1 = model(x_1, edge_index_1, edge_weight_1)
z2 = model(x_2, edge_index_2, edge_weight_2)
loss = model.loss(z1, z2, batch_size=0)
loss.backward()
optimizer.step()
return loss.item()
def test(model: Model, dataset, x, edge_index, edge_weight, y, final=False):
model.eval()
z = model(x, edge_index, edge_weight)
label_classification(z, y, data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='DBLP')
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--config', type=str, default='config_digcl.yaml')
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--recache', action="store_true",
help="clean up the old adj data", default=True)
parser.add_argument('--normalize-features',
action="store_true", default=True)
parser.add_argument('--adj-type', type=str, default='or')
parser.add_argument('--curr-type', type=str, default='log')
args = parser.parse_args()
assert args.gpu_id in range(0, 8)
torch.cuda.set_device(args.gpu_id)
config = yaml.load(open(args.config), Loader=SafeLoader)[args.dataset]
torch.manual_seed(config['seed'])
random.seed(2021)
learning_rate = config['learning_rate']
num_hidden = config['num_hidden']
num_proj_hidden = config['num_proj_hidden']
activation = ({'relu': F.relu, 'prelu': nn.PReLU(), 'rrelu': nn.RReLU()})[
config['activation']]
base_model = ({'GCNConv': GCNConv})[config['base_model']]
num_layers = config['num_layers']
alpha_1 = 0.1
drop_feature_rate_1 = config['drop_feature_rate_1']
drop_feature_rate_2 = config['drop_feature_rate_2']
tau = config['tau']
num_epochs = config['num_epochs']
weight_decay = config['weight_decay']
path = osp.join(osp.expanduser('.'), 'datasets')
print(args.normalize_features)
dataset = get_citation_dataset(
args.dataset, args.alpha, args.recache, args.normalize_features, args.adj_type)
print("Num of edges ", dataset[0].num_edges)
data = dataset[0]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = data.to(device)
edge_index_init, edge_weight_init = cal_fast_appr(
alpha_1, data.edge_index, data.x.shape[0], data.x.dtype)
encoder = Encoder(dataset.num_features, num_hidden, activation,
base_model=base_model, k=num_layers).to(device)
model = Model(encoder, num_hidden, num_proj_hidden, tau).to(device)
optimizer = torch.optim.Adam(
model.parameters(), lr=learning_rate, weight_decay=weight_decay)
start = t()
prev = start
for epoch in range(1, num_epochs + 1):
a = 0.9
b = 0.1
if args.curr_type == 'linear':
alpha_2 = a-(a-b)/(num_epochs+1)*epoch
elif args.curr_type == 'exp':
alpha_2 = a - (a-b)/(np.exp(3)-1) * \
(np.exp(3*epoch/(num_epochs+1))-1)
elif args.curr_type == 'log':
alpha_2 = a - (a-b)*(1/3*np.log(epoch/(num_epochs+1)+np.exp(-3)))
elif args.curr_type == 'fixed':
alpha_2 = 0.9
else:
print('wrong curr type')
exit()
loss = train(model, data.x, data.edge_index)
now = t()
print(f'(T) | Epoch={epoch:03d}, loss={loss:.4f}, '
f'this epoch {now - prev:.4f}, total {now - start:.4f}')
prev = now
print("=== Final ===")
test(model, dataset, data.x, edge_index_init,
edge_weight_init, data.y, final=True)
| [
"torch.manual_seed",
"datasets.get_citation_dataset",
"argparse.ArgumentParser",
"eval_digcl.label_classification",
"model_digcl.Model",
"time.perf_counter",
"random.seed",
"torch.nn.PReLU",
"numpy.exp",
"torch.cuda.is_available",
"model_digcl.Encoder",
"torch.nn.RReLU",
"torch.cuda.set_devi... | [((681, 714), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (704, 714), False, 'import warnings\n'), ((1011, 1047), 'model_digcl.drop_feature', 'drop_feature', (['x', 'drop_feature_rate_1'], {}), '(x, drop_feature_rate_1)\n', (1023, 1047), False, 'from model_digcl import Encoder, Model, drop_feature\n'), ((1058, 1094), 'model_digcl.drop_feature', 'drop_feature', (['x', 'drop_feature_rate_2'], {}), '(x, drop_feature_rate_2)\n', (1070, 1094), False, 'from model_digcl import Encoder, Model, drop_feature\n'), ((1446, 1478), 'eval_digcl.label_classification', 'label_classification', (['z', 'y', 'data'], {}), '(z, y, data)\n', (1466, 1478), False, 'from eval_digcl import label_classification\n'), ((1521, 1546), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1544, 1546), False, 'import argparse\n'), ((2239, 2273), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu_id'], {}), '(args.gpu_id)\n', (2260, 2273), False, 'import torch\n'), ((2355, 2388), 'torch.manual_seed', 'torch.manual_seed', (["config['seed']"], {}), "(config['seed'])\n", (2372, 2388), False, 'import torch\n'), ((2393, 2410), 'random.seed', 'random.seed', (['(2021)'], {}), '(2021)\n', (2404, 2410), False, 'import random\n'), ((3090, 3195), 'datasets.get_citation_dataset', 'get_citation_dataset', (['args.dataset', 'args.alpha', 'args.recache', 'args.normalize_features', 'args.adj_type'], {}), '(args.dataset, args.alpha, args.recache, args.\n normalize_features, args.adj_type)\n', (3110, 3195), False, 'from datasets import get_citation_dataset\n'), ((3825, 3828), 'time.perf_counter', 't', ([], {}), '()\n', (3826, 3828), True, 'from time import perf_counter as t\n'), ((3008, 3027), 'os.path.expanduser', 'osp.expanduser', (['"""."""'], {}), "('.')\n", (3022, 3027), True, 'import os.path as osp\n'), ((4471, 4474), 'time.perf_counter', 't', ([], {}), '()\n', (4472, 4474), True, 'from time import perf_counter as t\n'), ((2586, 2596), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (2594, 2596), True, 'import torch.nn as nn\n'), ((2607, 2617), 'torch.nn.RReLU', 'nn.RReLU', ([], {}), '()\n', (2615, 2617), True, 'import torch.nn as nn\n'), ((3309, 3334), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3332, 3334), False, 'import torch\n'), ((3509, 3603), 'model_digcl.Encoder', 'Encoder', (['dataset.num_features', 'num_hidden', 'activation'], {'base_model': 'base_model', 'k': 'num_layers'}), '(dataset.num_features, num_hidden, activation, base_model=base_model,\n k=num_layers)\n', (3516, 3603), False, 'from model_digcl import Encoder, Model, drop_feature\n'), ((3645, 3693), 'model_digcl.Model', 'Model', (['encoder', 'num_hidden', 'num_proj_hidden', 'tau'], {}), '(encoder, num_hidden, num_proj_hidden, tau)\n', (3650, 3693), False, 'from model_digcl import Encoder, Model, drop_feature\n'), ((4116, 4152), 'numpy.exp', 'np.exp', (['(3 * epoch / (num_epochs + 1))'], {}), '(3 * epoch / (num_epochs + 1))\n', (4122, 4152), True, 'import numpy as np\n'), ((4082, 4091), 'numpy.exp', 'np.exp', (['(3)'], {}), '(3)\n', (4088, 4091), True, 'import numpy as np\n'), ((4253, 4263), 'numpy.exp', 'np.exp', (['(-3)'], {}), '(-3)\n', (4259, 4263), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
This script loads a template file and fills in IDs in columns where they are missing
author: <NAME> for Knocean Inc., 22 September 2020
"""
import pandas as pd
import numpy as np
from pathlib import Path
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
"-t", "--template", dest="template_file", help="Template file", metavar="FILE", required=True
)
parser.add_argument(
"-m",
"--metadata-table",
dest="metadata_table_path",
help="Path to the IHCC metadata table",
metavar="FILE",
)
parser.add_argument(
"-l",
"--length-id",
dest="length_of_id",
help="How many characters should your id by at most?",
metavar="FILE",
default=7,
)
args = parser.parse_args()
# Get the data dictionary id. If a metada data file is supplied,
# get it from there, else use the path to uppercase
if args.metadata_table_path:
df = pd.read_csv(args.metadata_table_path, header=None, sep="\t")
dd_id = df[df.iloc[:, 0] == "Cohort ID"].iloc[:, 1].iloc[0]
else:
dd_id = Path(args.template_file).stem
dd_id = dd_id.upper()
print("Generating IDs for data dictionary: %s" % dd_id)
NUM_PADDED_ZERO = args.length_of_id
MAX_ID = int("9" * NUM_PADDED_ZERO)
PREFIX = "%s:" % dd_id
COL_TERM_ID = "Term ID"
COL_LABEL = "Label"
df = pd.read_csv(args.template_file, sep="\t", dtype=str)
len_pre = len(df)
highest_current_id = 0
if COL_TERM_ID in df.columns:
df_nn = df[df[COL_TERM_ID].notnull()]
ids = df_nn[df_nn[COL_TERM_ID].str.startswith(PREFIX)][COL_TERM_ID].tolist()
ids = [i.replace(PREFIX, "") for i in ids]
ids_int = [int(i) for i in ids if i.isdigit()]
if ids_int:
highest_current_id = max(ids_int)
else:
df[COL_TERM_ID] = ""
for index, row in df.iterrows():
value = row[COL_TERM_ID]
if row[COL_LABEL] or (value.dtype == float and not np.isnan(value)):
# print(str(value) + " " + str(row[COL_LABEL]))
if (type(value) != str) or (not value.startswith(PREFIX)):
highest_current_id = highest_current_id + 1
if highest_current_id > MAX_ID:
raise RuntimeError(
"The maximum number of digits is exhausted (%d), "
+ "you need to pick a larger range!" % NUM_PADDED_ZERO
)
highest_current_id_str = str(highest_current_id)
df.at[index, COL_TERM_ID] = "%s%s" % (
PREFIX,
highest_current_id_str.zfill(NUM_PADDED_ZERO),
)
if len_pre != len(df):
raise RuntimeError(
"The size of the dictionary changed " + "during the process - something went wrong (KTD)."
)
# Save template
with open(args.template_file, "w") as write_csv:
write_csv.write(df.to_csv(sep="\t", index=False))
| [
"numpy.isnan",
"pathlib.Path",
"argparse.ArgumentParser",
"pandas.read_csv"
] | [((294, 310), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (308, 310), False, 'from argparse import ArgumentParser\n'), ((1340, 1392), 'pandas.read_csv', 'pd.read_csv', (['args.template_file'], {'sep': '"""\t"""', 'dtype': 'str'}), "(args.template_file, sep='\\t', dtype=str)\n", (1351, 1392), True, 'import pandas as pd\n'), ((939, 999), 'pandas.read_csv', 'pd.read_csv', (['args.metadata_table_path'], {'header': 'None', 'sep': '"""\t"""'}), "(args.metadata_table_path, header=None, sep='\\t')\n", (950, 999), True, 'import pandas as pd\n'), ((1082, 1106), 'pathlib.Path', 'Path', (['args.template_file'], {}), '(args.template_file)\n', (1086, 1106), False, 'from pathlib import Path\n'), ((1895, 1910), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (1903, 1910), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import functools
def doublewrap(function):
"""
A decorator decorator, allowing to use the decorator to be used without
parentheses if not arguments are provided. All arguments must be optional.
"""
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(self.scene_scope + "_" + name, *args,
**kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
# weight initialization based on muupan's code
# https://github.com/muupan/async-rl/blob/master/a3c_ale.py
def fc_weight_variable(shape, name='W_fc'):
input_channels = shape[0]
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name)
def fc_bias_variable(shape, input_channels, name='bias_fc'):
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name)
| [
"numpy.sqrt",
"tensorflow.variable_scope",
"tensorflow.Variable",
"functools.wraps",
"tensorflow.random_uniform"
] | [((289, 314), 'functools.wraps', 'functools.wraps', (['function'], {}), '(function)\n', (304, 314), False, 'import functools\n'), ((1220, 1245), 'functools.wraps', 'functools.wraps', (['function'], {}), '(function)\n', (1235, 1245), False, 'import functools\n'), ((1787, 1832), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'minval': '(-d)', 'maxval': 'd'}), '(shape, minval=-d, maxval=d)\n', (1804, 1832), True, 'import tensorflow as tf\n'), ((1844, 1875), 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': 'name'}), '(initial, name=name)\n', (1855, 1875), True, 'import tensorflow as tf\n'), ((1991, 2036), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'minval': '(-d)', 'maxval': 'd'}), '(shape, minval=-d, maxval=d)\n', (2008, 2036), True, 'import tensorflow as tf\n'), ((2048, 2079), 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': 'name'}), '(initial, name=name)\n', (2059, 2079), True, 'import tensorflow as tf\n'), ((1749, 1772), 'numpy.sqrt', 'np.sqrt', (['input_channels'], {}), '(input_channels)\n', (1756, 1772), True, 'import numpy as np\n'), ((1953, 1976), 'numpy.sqrt', 'np.sqrt', (['input_channels'], {}), '(input_channels)\n', (1960, 1976), True, 'import numpy as np\n'), ((1329, 1394), 'tensorflow.variable_scope', 'tf.variable_scope', (["(self.scene_scope + '_' + name)", '*args'], {}), "(self.scene_scope + '_' + name, *args, **kwargs)\n", (1346, 1394), True, 'import tensorflow as tf\n')] |
"""
Code modified from PyTorch DCGAN examples: https://github.com/pytorch/examples/tree/master/dcgan
"""
from __future__ import print_function
import argparse
import os
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from utils import denormalize, weights_init, compute_acc
from network import _netG, _netD, _netD_CIFAR10, _netG_CIFAR10
from folder import ImageFolder
import tqdm
import torchvision.utils as vutils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='cifar10 | imagenet')
parser.add_argument('--nz', type=int, default=110, help='size of the latent z vector')
parser.add_argument('--eval_epoch', type=int, default=None)
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--n_images', type=int, default=1, help='number of images you want to generate')
parser.add_argument('--outf', default='./training_data', help='folder to output images and model checkpoints')
parser.add_argument('--gpu_id', type=int, default=0, help='The ID of the specified GPU')
parser.add_argument('--manualSeed', type=int,default=0, help='manual seed')
parser.add_argument('--num_classes', type=int, default=10, help='Number of classes for AC-GAN')
opt = parser.parse_args()
# specify the gpu id if using only 1 gpu
if opt.ngpu == 1:
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_id)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# some hyper parameters
ngpu = int(opt.ngpu)
nz = int(opt.nz)
num_classes = int(opt.num_classes)
# Define the generator and initialize the weights
if opt.dataset == 'imagenet':
netG = _netG(ngpu, nz)
else:
netG = _netG_CIFAR10(ngpu, nz)
if opt.dataset == 'imagenet':
netD = _netD(ngpu, num_classes)
else:
netD = _netD_CIFAR10(ngpu, num_classes)
try:
netG_state_dict=torch.load(os.path.join(opt.outf,f'netG_epoch_{opt.eval_epoch}.pth'))
netD_state_dict=torch.load(os.path.join(opt.outf,f'netD_epoch_{opt.eval_epoch}.pth'))
netG.load_state_dict(netG_state_dict)
netD.load_state_dict(netD_state_dict)
except:
raise NotImplementedError
noise = torch.FloatTensor(1, nz, 1, 1)
aux_label = torch.LongTensor(1)
if opt.cuda:
netG.cuda()
netD.cuda()
noise,aux_label=noise.cuda(),aux_label.cuda()
num_generated_images=[0 for _ in range(num_classes)]
i=0
if not os.path.exists(os.path.join(opt.outf,'gen_images')):
os.mkdir(os.path.join(opt.outf,'gen_images'))
for cls_gen in range(num_classes):
if not os.path.exists(os.path.join(opt.outf,'gen_images',f'c{cls_gen}')):
os.mkdir(os.path.join(opt.outf,'gen_images',f'c{cls_gen}'))
while sum(num_generated_images)<opt.n_images:
cls_gen=i%num_classes # which class you want to generate
if num_generated_images[cls_gen]<=(opt.n_images//num_classes):
class_onehot = np.zeros(num_classes)
class_onehot[cls_gen]=1
noise_ = np.random.normal(0, 1, (1, nz))
noise_[0,:num_classes] = class_onehot
noise_ = (torch.from_numpy(noise_))
noise.data.copy_(noise_.view(1, nz, 1, 1))
fake = netG(noise)
if torch.argmax(netD(fake)[1])==cls_gen:
print(f'\r [{sum(num_generated_images)}/{opt.n_images}] saving images complete',end='')
#save image
vutils.save_image(denormalize(fake,opt.dataset).squeeze(0),os.path.join(opt.outf,'gen_images',f'c{cls_gen}',f'{i}.png'))
num_generated_images[cls_gen]+=1
else:
print(f'fail to save class {cls_gen} when i is {i}')
i+=1
if __name__=='__main__':
main() | [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"numpy.random.normal",
"random.randint",
"argparse.ArgumentParser",
"os.makedirs",
"network._netG",
"torch.LongTensor",
"network._netD_CIFAR10",
"os.path.join",
"random.seed",
"torch.from_numpy",
"network._netG_CIFAR10",
"numpy.zeros",
"... | [((523, 548), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (546, 548), False, 'import argparse\n'), ((1797, 1824), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (1808, 1824), False, 'import random\n'), ((1829, 1862), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (1846, 1862), False, 'import torch\n'), ((2864, 2894), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'nz', '(1)', '(1)'], {}), '(1, nz, 1, 1)\n', (2881, 2894), False, 'import torch\n'), ((2911, 2930), 'torch.LongTensor', 'torch.LongTensor', (['(1)'], {}), '(1)\n', (2927, 2930), False, 'import torch\n'), ((1614, 1635), 'os.makedirs', 'os.makedirs', (['opt.outf'], {}), '(opt.outf)\n', (1625, 1635), False, 'import os\n'), ((1725, 1749), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (1739, 1749), False, 'import random\n'), ((1888, 1930), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (1914, 1930), False, 'import torch\n'), ((1965, 1990), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1988, 1990), False, 'import torch\n'), ((2315, 2330), 'network._netG', '_netG', (['ngpu', 'nz'], {}), '(ngpu, nz)\n', (2320, 2330), False, 'from network import _netG, _netD, _netD_CIFAR10, _netG_CIFAR10\n'), ((2356, 2379), 'network._netG_CIFAR10', '_netG_CIFAR10', (['ngpu', 'nz'], {}), '(ngpu, nz)\n', (2369, 2379), False, 'from network import _netG, _netD, _netD_CIFAR10, _netG_CIFAR10\n'), ((2429, 2453), 'network._netD', '_netD', (['ngpu', 'num_classes'], {}), '(ngpu, num_classes)\n', (2434, 2453), False, 'from network import _netG, _netD, _netD_CIFAR10, _netG_CIFAR10\n'), ((2479, 2511), 'network._netD_CIFAR10', '_netD_CIFAR10', (['ngpu', 'num_classes'], {}), '(ngpu, num_classes)\n', (2492, 2511), False, 'from network import _netG, _netD, _netD_CIFAR10, _netG_CIFAR10\n'), ((2561, 2619), 'os.path.join', 'os.path.join', (['opt.outf', 'f"""netG_epoch_{opt.eval_epoch}.pth"""'], {}), "(opt.outf, f'netG_epoch_{opt.eval_epoch}.pth')\n", (2573, 2619), False, 'import os\n'), ((2655, 2713), 'os.path.join', 'os.path.join', (['opt.outf', 'f"""netD_epoch_{opt.eval_epoch}.pth"""'], {}), "(opt.outf, f'netD_epoch_{opt.eval_epoch}.pth')\n", (2667, 2713), False, 'import os\n'), ((3134, 3170), 'os.path.join', 'os.path.join', (['opt.outf', '"""gen_images"""'], {}), "(opt.outf, 'gen_images')\n", (3146, 3170), False, 'import os\n'), ((3189, 3225), 'os.path.join', 'os.path.join', (['opt.outf', '"""gen_images"""'], {}), "(opt.outf, 'gen_images')\n", (3201, 3225), False, 'import os\n'), ((3633, 3654), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (3641, 3654), True, 'import numpy as np\n'), ((3712, 3743), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1, nz)'], {}), '(0, 1, (1, nz))\n', (3728, 3743), True, 'import numpy as np\n'), ((3816, 3840), 'torch.from_numpy', 'torch.from_numpy', (['noise_'], {}), '(noise_)\n', (3832, 3840), False, 'import torch\n'), ((3295, 3346), 'os.path.join', 'os.path.join', (['opt.outf', '"""gen_images"""', 'f"""c{cls_gen}"""'], {}), "(opt.outf, 'gen_images', f'c{cls_gen}')\n", (3307, 3346), False, 'import os\n'), ((3368, 3419), 'os.path.join', 'os.path.join', (['opt.outf', '"""gen_images"""', 'f"""c{cls_gen}"""'], {}), "(opt.outf, 'gen_images', f'c{cls_gen}')\n", (3380, 3419), False, 'import os\n'), ((4189, 4252), 'os.path.join', 'os.path.join', (['opt.outf', '"""gen_images"""', 'f"""c{cls_gen}"""', 'f"""{i}.png"""'], {}), "(opt.outf, 'gen_images', f'c{cls_gen}', f'{i}.png')\n", (4201, 4252), False, 'import os\n'), ((4148, 4178), 'utils.denormalize', 'denormalize', (['fake', 'opt.dataset'], {}), '(fake, opt.dataset)\n', (4159, 4178), False, 'from utils import denormalize, weights_init, compute_acc\n')] |
"""
The game of Reversi. Warning: this game is not coded in an optimal
way, the AI will be slow.
"""
import numpy as np
from easyAI import TwoPlayersGame
to_string = lambda a : "ABCDEFGH"[a[0]] + str(a[1]+1)
to_array = lambda s : np.array(["ABCDEFGH".index(s[0]),int(s[1])-1])
class Reversi( TwoPlayersGame ):
"""
See the rules on http://en.wikipedia.org/wiki/Reversi
Here for simplicity we suppose that the game ends when a
player cannot play, but it would take just a few more lines to
implement the real ending rules, by which the game ends when both
players can't play.
This implementation will make a slow and dumbe AI and could be sped
up by adding a way of unmaking moves (method unmake_moves) and
coding some parts in C (this is left as an exercise :) )
"""
def __init__(self, players, board = None):
self.players = players
self.board = np.zeros((8,8), dtype=int)
self.board[3,[3,4]] = [1,2]
self.board[4,[3,4]] = [2,1]
self.nplayer=1
def possible_moves(self):
""" Only moves that lead to flipped pieces are allowed """
return [to_string((i,j)) for i in range(8) for j in range(8)
if (self.board[i,j] == 0)
and (pieces_flipped(self.board, (i,j), self.nplayer) != [])]
def make_move(self, pos):
""" Put the piece at position ``pos`` and flip the pieces that
much be flipped """
pos= to_array(pos)
flipped = pieces_flipped(self.board, pos, self.nplayer)
for i,j in flipped:
self.board[i,j] = self.nplayer
self.board[pos[0],pos[1]] = self.nplayer
def show(self):
""" Prints the board in a fancy (?) way """
print('\n'+'\n'.join([' 1 2 3 4 5 6 7 8']+ ['ABCDEFGH'[k] +
' '+' '.join([['.','1','2','X'][self.board[k][i]]
for i in range(8)]) for k in range(8)]+['']))
def is_over(self):
""" The game is considered over when someone cannot play. That
may not be the actual rule but it is simpler to code :). Of
course it would be possible to implement that a player can pass
if it cannot play (by adding the move 'pass')"""
return self.possible_moves() == []
def scoring(self):
"""
In the beginning of the game (less than 32 pieces) much
importance is given to placing pieces on the border. After this
point, only the number of pieces of each player counts
"""
if np.sum(self.board==0) > 32: # less than half the board is full
player = self.board==self.nplayer
opponent = self.board==self.nopponent
return ((player-opponent)*BOARD_SCORE).sum()
else:
npieces_player = np.sum(self.board==self.nplayer)
npieces_opponent = np.sum(self.board==self.nopponent)
return npieces_player - npieces_opponent
# This board is used by the AI to give more importance to the border
BOARD_SCORE = np.array( [[9,3,3,3,3,3,3,9],
[3,1,1,1,1,1,1,3],
[3,1,1,1,1,1,1,3],
[3,1,1,1,1,1,1,3],
[3,1,1,1,1,1,1,3],
[3,1,1,1,1,1,1,3],
[3,1,1,1,1,1,1,3],
[9,3,3,3,3,3,3,9]])
DIRECTIONS = [ np.array([i,j]) for i in [-1,0,1] for j in [-1,0,1]
if (i!=0 or j!=0)]
def pieces_flipped(board, pos, nplayer):
"""
Returns a list of the positions of the pieces to be flipped if
player `nplayer` places a piece on the `board` at position `pos`.
This is slow and could be coded in C or Cython.
"""
flipped = []
for d in DIRECTIONS:
ppos = pos + d
streak = []
while (0<=ppos[0]<=7) and (0<=ppos[1]<=7):
if board[ppos[0],ppos[1]] == 3 - nplayer:
streak.append(+ppos)
elif board[ppos[0],ppos[1]] == nplayer:
flipped += streak
break
else:
break
ppos += d
return flipped
if __name__ == "__main__":
from easyAI import Human_Player, AI_Player, Negamax
# An example: Computer vs Computer:
game = Reversi([AI_Player(Negamax(4)), AI_Player(Negamax(4))])
game.play()
if game.scoring() > 0:
print("player %d wins." % game.nplayer)
elif game.scoring() < 0:
print("player %d wins." % game.nopponent)
else:
print("Draw.")
| [
"easyAI.Negamax",
"numpy.array",
"numpy.zeros",
"numpy.sum"
] | [((3016, 3243), 'numpy.array', 'np.array', (['[[9, 3, 3, 3, 3, 3, 3, 9], [3, 1, 1, 1, 1, 1, 1, 3], [3, 1, 1, 1, 1, 1, 1, \n 3], [3, 1, 1, 1, 1, 1, 1, 3], [3, 1, 1, 1, 1, 1, 1, 3], [3, 1, 1, 1, 1,\n 1, 1, 3], [3, 1, 1, 1, 1, 1, 1, 3], [9, 3, 3, 3, 3, 3, 3, 9]]'], {}), '([[9, 3, 3, 3, 3, 3, 3, 9], [3, 1, 1, 1, 1, 1, 1, 3], [3, 1, 1, 1, \n 1, 1, 1, 3], [3, 1, 1, 1, 1, 1, 1, 3], [3, 1, 1, 1, 1, 1, 1, 3], [3, 1,\n 1, 1, 1, 1, 1, 3], [3, 1, 1, 1, 1, 1, 1, 3], [9, 3, 3, 3, 3, 3, 3, 9]])\n', (3024, 3243), True, 'import numpy as np\n'), ((3371, 3387), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (3379, 3387), True, 'import numpy as np\n'), ((914, 941), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {'dtype': 'int'}), '((8, 8), dtype=int)\n', (922, 941), True, 'import numpy as np\n'), ((2520, 2543), 'numpy.sum', 'np.sum', (['(self.board == 0)'], {}), '(self.board == 0)\n', (2526, 2543), True, 'import numpy as np\n'), ((2779, 2813), 'numpy.sum', 'np.sum', (['(self.board == self.nplayer)'], {}), '(self.board == self.nplayer)\n', (2785, 2813), True, 'import numpy as np\n'), ((2843, 2879), 'numpy.sum', 'np.sum', (['(self.board == self.nopponent)'], {}), '(self.board == self.nopponent)\n', (2849, 2879), True, 'import numpy as np\n'), ((4294, 4304), 'easyAI.Negamax', 'Negamax', (['(4)'], {}), '(4)\n', (4301, 4304), False, 'from easyAI import Human_Player, AI_Player, Negamax\n'), ((4317, 4327), 'easyAI.Negamax', 'Negamax', (['(4)'], {}), '(4)\n', (4324, 4327), False, 'from easyAI import Human_Player, AI_Player, Negamax\n')] |
import os.path
import numpy as np
import math
from collections import namedtuple
from typing import Dict, Any, Tuple, List, Optional
from models.adaptive_model import AdaptiveModel
from models.standard_model import StandardModel
from dataset.dataset import Dataset, DataSeries
from utils.file_utils import save_by_file_suffix, read_by_file_suffix
from utils.sequence_model_utils import SequenceModelType
from utils.constants import OUTPUT, LOGITS, SEQ_LENGTH, SKIP_GATES, PHASE_GATES, STOP_OUTPUT_NAME
LOG_FILE_FMT = 'model-{0}-{1}-{2}.jsonl.gz'
ModelResults = namedtuple('ModelResults', ['predictions', 'labels', 'stop_probs', 'accuracy'])
BATCH_SIZE = 64
def clip(x: int, bounds: Tuple[int, int]) -> int:
if x > bounds[1]:
return bounds[1]
elif x < bounds[0]:
return bounds[0]
return x
def save_test_log(accuracy: float, power: float, valid_accuracy: Optional[float], budget: float, system_name: str, key: str, output_file: str):
test_log: Dict[str, Dict[str, Any]] = dict()
if os.path.exists(output_file):
test_log = list(read_by_file_suffix(output_file))[0]
if key not in test_log:
test_log[key] = dict()
log_value = {
'ACCURACY': accuracy,
'AVG_POWER': power,
'VALID_ACCURACY': valid_accuracy,
'BUDGET': budget,
'SYSTEM_NAME': system_name
}
budget_str = '{0:.4f}'.format(budget)
test_log[key][budget_str] = log_value
save_by_file_suffix([test_log], output_file)
def get_budget_index(budget: float, valid_accuracy: np.ndarray, max_time: int, power_estimates: np.ndarray, allow_violations: bool) -> int:
"""
Selects the single model level which should yield the best overall accuracy. This decision
is based on the validation accuracy for each level.
Args:
budget: The current avg power budget
valid_accuracy: A [L] array containing the validation accuracy for each model level
max_time: The number of timesteps
power_estimates: A [L] array of power estimates for each level
allow_violations: Index selected in a manner which allows for budget violations if such violations
will lead to better end-to-end accuracy.
Returns:
The "optimal" model level.
"""
best_index = 0
best_acc = 0.0
if allow_violations:
num_levels = valid_accuracy.shape[0]
energy_budget = budget * max_time
for level_idx in range(num_levels):
# Estimate the number of timesteps on which we can perform inference with this level
avg_power = power_estimates[level_idx]
projected_timesteps = min(energy_budget / avg_power, max_time)
projected_correct = valid_accuracy[level_idx] * projected_timesteps
estimated_accuracy = projected_correct / max_time
if estimated_accuracy > best_acc:
best_acc = estimated_accuracy
best_index = level_idx
else:
budget_comparison = power_estimates <= budget
if np.any(budget_comparison):
budget_mask = budget_comparison.astype(float)
masked_accuracy = valid_accuracy * budget_mask
best_index = np.argmax(masked_accuracy)
else:
best_index = np.argmin(power_estimates)
return best_index
def concat_model_results(model_results: List[ModelResults]) -> ModelResults:
"""
Stacks each field of the given results into a single array. This is useful
for Skip RNN and Phased RNN systems in which each output is a separate model.
"""
predictions = np.concatenate([r.predictions for r in model_results], axis=1) # [N, L]
labels = model_results[0].labels # [N, 1]
stop_probs = [r.stop_probs for r in model_results]
accuracy = [r.accuracy for r in model_results]
return ModelResults(predictions=predictions,
labels=labels,
stop_probs=stop_probs,
accuracy=accuracy)
def execute_adaptive_model(model: AdaptiveModel, dataset: Dataset, series: DataSeries) -> ModelResults:
"""
Executes the neural network on the given data series. We do this in a separate step
to avoid recomputing for multiple budgets. Executing the neural network is relatively expensive.
Args:
model: The adaptive model used to perform inference
dataset: The dataset to perform inference on
series: The data series to extract. This is usually the TEST set.
Returns:
A model result tuple containing the inference results.
"""
level_predictions: List[np.ndarray] = []
stop_probs: List[np.ndarray] = []
labels: List[np.ndarray] = []
num_outputs = model.num_outputs
# Operations to execute
ops = [LOGITS, STOP_OUTPUT_NAME]
# Make the batch generator. Don't shuffle so we have consistent results.
data_generator = dataset.minibatch_generator(series=series,
batch_size=BATCH_SIZE,
metadata=model.metadata,
should_shuffle=False)
for batch_num, batch in enumerate(data_generator):
# Compute the predicted log probabilities
feed_dict = model.batch_to_feed_dict(batch, is_train=False, epoch_num=0)
model_results = model.execute(feed_dict, ops=ops)
# Concatenate logits into a [B, L, C] array (logit_ops is already ordered by level).
# For reference, L is the number of levels and C is the number of classes
logits = model_results[LOGITS]
stop_output = model_results[STOP_OUTPUT_NAME] # [B, L]
stop_probs.append(stop_output)
# Compute the predictions for each level
level_pred = np.argmax(logits, axis=-1) # [B, L]
level_predictions.append(level_pred)
labels.append(np.array(batch[OUTPUT]).reshape(-1, 1))
# Save results as attributes
level_predictions = np.concatenate(level_predictions, axis=0)
labels = np.concatenate(labels, axis=0) # [N, 1]
stop_probs = np.concatenate(stop_probs, axis=0)
level_accuracy = np.average(np.isclose(level_predictions, labels).astype(float), axis=0)
return ModelResults(predictions=level_predictions, labels=labels, stop_probs=stop_probs, accuracy=level_accuracy)
def execute_standard_model(model: StandardModel, dataset: Dataset, series: DataSeries) -> ModelResults:
"""
Executes the neural network on the given data series. We do this in a separate step
to avoid recomputing for multiple budgets. Executing the neural network is relatively expensive.
Args:
model: The standard model used to perform inference
dataset: The dataset to perform inference on
series: The data series to extract. This is usually the TEST set.
Returns:
A model result tuple containing the inference results.
"""
level_predictions: List[np.ndarray] = []
labels: List[np.ndarray] = []
# Make the batch generator. Don't shuffle so we have consistent results.
data_generator = dataset.minibatch_generator(series=series,
batch_size=BATCH_SIZE,
metadata=model.metadata,
should_shuffle=False)
for batch_num, batch in enumerate(data_generator):
# Compute the predicted log probabilities
feed_dict = model.batch_to_feed_dict(batch, is_train=False, epoch_num=0)
model_results = model.execute(feed_dict, [LOGITS])
# Compute the predictions for each level
level_pred = np.argmax(model_results[LOGITS], axis=-1) # [B, L]
level_predictions.append(level_pred)
labels.append(np.array(batch[OUTPUT]).reshape(-1, 1))
# Save results as attributes
level_predictions = np.concatenate(level_predictions, axis=0)
labels = np.concatenate(labels, axis=0) # [N, 1]
level_accuracy = np.average(np.isclose(level_predictions, labels).astype(float), axis=0)
return ModelResults(predictions=level_predictions, labels=labels, stop_probs=None, accuracy=level_accuracy)
def execute_skip_rnn_model(model: StandardModel, dataset: Dataset, series: DataSeries) -> ModelResults:
"""
Executes the neural network on the given data series. We do this in a separate step
to avoid recomputing for multiple budgets. Executing the neural network is relatively expensive.
Args:
model: The Skip RNN standard model used to perform inference
dataset: The dataset to perform inference on
series: The data series to extract. This is usually the TEST set.
Returns:
A model result tuple containing the inference results. The sample fractions are placed in the stop_probs element.
"""
assert model.model_type == SequenceModelType.SKIP_RNN, 'Must provide a Skip RNN'
seq_length = model.seq_length
predictions: List[np.ndarray] = []
labels: List[np.ndarray] = []
sample_counts = np.zeros(shape=(seq_length, ), dtype=np.int64)
# Make the batch generator. Don't shuffle so we have consistent results.
data_generator = dataset.minibatch_generator(series=series,
batch_size=BATCH_SIZE,
metadata=model.metadata,
should_shuffle=False)
for batch_num, batch in enumerate(data_generator):
# Compute the predicted log probabilities
feed_dict = model.batch_to_feed_dict(batch, is_train=False, epoch_num=0)
model_results = model.execute(feed_dict, [LOGITS, SKIP_GATES])
# Compute the predictions for each level
pred = np.argmax(model_results[LOGITS], axis=-1) # [B]
predictions.append(pred.reshape(-1, 1))
# Collect the number of samples processed for each batch element. We subtract 1
# because it is impossible for the models to consume zero samples.
num_samples = np.sum(model_results[SKIP_GATES], axis=-1).astype(int) - 1 # [B]
counts = np.bincount(num_samples, minlength=seq_length)
sample_counts += counts # [T]
labels.append(np.array(batch[OUTPUT]).reshape(-1, 1))
# Save results as attributes
predictions = np.concatenate(predictions, axis=0)
labels = np.concatenate(labels, axis=0) # [N, 1]
accuracy = np.average(np.isclose(predictions, labels).astype(float), axis=0)
# Normalize the sample counts
sample_counts = sample_counts.astype(float)
sample_fractions = sample_counts / np.sum(sample_counts)
return ModelResults(predictions=predictions, labels=labels, stop_probs=sample_fractions, accuracy=accuracy)
def execute_phased_rnn_model(model: StandardModel, dataset: Dataset, series: DataSeries) -> ModelResults:
"""
Executes the neural network on the given data series. We do this in a separate step
to avoid recomputing for multiple budgets. Executing the neural network is relatively expensive.
Args:
model: The Phased RNN standard model used to perform inference
dataset: The dataset to perform inference on
series: The data series to extract. This is usually the TEST set.
Returns:
A model result tuple containing the inference results. The sample fractions are placed in the stop_probs element.
"""
assert model.model_type == SequenceModelType.PHASED_RNN, 'Must provide a Phased RNN'
seq_length = model.seq_length
predictions: List[np.ndarray] = []
labels: List[np.ndarray] = []
sample_counts = np.zeros(shape=(seq_length, ), dtype=np.int64)
# Make the batch generator. Don't shuffle so we have consistent results.
data_generator = dataset.minibatch_generator(series=series,
batch_size=BATCH_SIZE,
metadata=model.metadata,
should_shuffle=False)
for batch_num, batch in enumerate(data_generator):
# Compute the predicted log probabilities
feed_dict = model.batch_to_feed_dict(batch, is_train=False, epoch_num=0)
model_results = model.execute(feed_dict, [LOGITS, PHASE_GATES])
# Compute the predictions for each level
pred = np.argmax(model_results[LOGITS], axis=-1) # [B]
predictions.append(pred.reshape(-1, 1))
# Collect the number of samples processed for each batch element. We subtract 1
# because it is impossible for the models to consume zero samples.
phase_gates = model_results[PHASE_GATES] # [B, T]
num_samples = np.count_nonzero(phase_gates, axis=-1) - 1 # [B]
counts = np.bincount(num_samples, minlength=seq_length)
sample_counts += counts # [T]
labels.append(np.array(batch[OUTPUT]).reshape(-1, 1))
# Save results as attributes
predictions = np.concatenate(predictions, axis=0)
labels = np.concatenate(labels, axis=0) # [N, 1]
accuracy = np.average(np.isclose(predictions, labels).astype(float), axis=0)
# Normalize the sample counts
sample_counts = sample_counts.astype(float)
sample_fractions = sample_counts / np.sum(sample_counts)
return ModelResults(predictions=predictions, labels=labels, stop_probs=sample_fractions, accuracy=accuracy)
| [
"utils.file_utils.read_by_file_suffix",
"collections.namedtuple",
"numpy.isclose",
"numpy.argmax",
"numpy.any",
"utils.file_utils.save_by_file_suffix",
"numpy.count_nonzero",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.concatenate",
"numpy.argmin",
"numpy.bincount"
] | [((563, 642), 'collections.namedtuple', 'namedtuple', (['"""ModelResults"""', "['predictions', 'labels', 'stop_probs', 'accuracy']"], {}), "('ModelResults', ['predictions', 'labels', 'stop_probs', 'accuracy'])\n", (573, 642), False, 'from collections import namedtuple\n'), ((1447, 1491), 'utils.file_utils.save_by_file_suffix', 'save_by_file_suffix', (['[test_log]', 'output_file'], {}), '([test_log], output_file)\n', (1466, 1491), False, 'from utils.file_utils import save_by_file_suffix, read_by_file_suffix\n'), ((3598, 3660), 'numpy.concatenate', 'np.concatenate', (['[r.predictions for r in model_results]'], {'axis': '(1)'}), '([r.predictions for r in model_results], axis=1)\n', (3612, 3660), True, 'import numpy as np\n'), ((6004, 6045), 'numpy.concatenate', 'np.concatenate', (['level_predictions'], {'axis': '(0)'}), '(level_predictions, axis=0)\n', (6018, 6045), True, 'import numpy as np\n'), ((6059, 6089), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (6073, 6089), True, 'import numpy as np\n'), ((6117, 6151), 'numpy.concatenate', 'np.concatenate', (['stop_probs'], {'axis': '(0)'}), '(stop_probs, axis=0)\n', (6131, 6151), True, 'import numpy as np\n'), ((7922, 7963), 'numpy.concatenate', 'np.concatenate', (['level_predictions'], {'axis': '(0)'}), '(level_predictions, axis=0)\n', (7936, 7963), True, 'import numpy as np\n'), ((7977, 8007), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (7991, 8007), True, 'import numpy as np\n'), ((9091, 9136), 'numpy.zeros', 'np.zeros', ([], {'shape': '(seq_length,)', 'dtype': 'np.int64'}), '(shape=(seq_length,), dtype=np.int64)\n', (9099, 9136), True, 'import numpy as np\n'), ((10387, 10422), 'numpy.concatenate', 'np.concatenate', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (10401, 10422), True, 'import numpy as np\n'), ((10436, 10466), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (10450, 10466), True, 'import numpy as np\n'), ((11689, 11734), 'numpy.zeros', 'np.zeros', ([], {'shape': '(seq_length,)', 'dtype': 'np.int64'}), '(shape=(seq_length,), dtype=np.int64)\n', (11697, 11734), True, 'import numpy as np\n'), ((13030, 13065), 'numpy.concatenate', 'np.concatenate', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (13044, 13065), True, 'import numpy as np\n'), ((13079, 13109), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (13093, 13109), True, 'import numpy as np\n'), ((3039, 3064), 'numpy.any', 'np.any', (['budget_comparison'], {}), '(budget_comparison)\n', (3045, 3064), True, 'import numpy as np\n'), ((5801, 5827), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (5810, 5827), True, 'import numpy as np\n'), ((7704, 7745), 'numpy.argmax', 'np.argmax', (['model_results[LOGITS]'], {'axis': '(-1)'}), '(model_results[LOGITS], axis=-1)\n', (7713, 7745), True, 'import numpy as np\n'), ((9820, 9861), 'numpy.argmax', 'np.argmax', (['model_results[LOGITS]'], {'axis': '(-1)'}), '(model_results[LOGITS], axis=-1)\n', (9829, 9861), True, 'import numpy as np\n'), ((10186, 10232), 'numpy.bincount', 'np.bincount', (['num_samples'], {'minlength': 'seq_length'}), '(num_samples, minlength=seq_length)\n', (10197, 10232), True, 'import numpy as np\n'), ((10680, 10701), 'numpy.sum', 'np.sum', (['sample_counts'], {}), '(sample_counts)\n', (10686, 10701), True, 'import numpy as np\n'), ((12419, 12460), 'numpy.argmax', 'np.argmax', (['model_results[LOGITS]'], {'axis': '(-1)'}), '(model_results[LOGITS], axis=-1)\n', (12428, 12460), True, 'import numpy as np\n'), ((12828, 12874), 'numpy.bincount', 'np.bincount', (['num_samples'], {'minlength': 'seq_length'}), '(num_samples, minlength=seq_length)\n', (12839, 12874), True, 'import numpy as np\n'), ((13323, 13344), 'numpy.sum', 'np.sum', (['sample_counts'], {}), '(sample_counts)\n', (13329, 13344), True, 'import numpy as np\n'), ((3208, 3234), 'numpy.argmax', 'np.argmax', (['masked_accuracy'], {}), '(masked_accuracy)\n', (3217, 3234), True, 'import numpy as np\n'), ((3274, 3300), 'numpy.argmin', 'np.argmin', (['power_estimates'], {}), '(power_estimates)\n', (3283, 3300), True, 'import numpy as np\n'), ((12761, 12799), 'numpy.count_nonzero', 'np.count_nonzero', (['phase_gates'], {'axis': '(-1)'}), '(phase_gates, axis=-1)\n', (12777, 12799), True, 'import numpy as np\n'), ((1075, 1107), 'utils.file_utils.read_by_file_suffix', 'read_by_file_suffix', (['output_file'], {}), '(output_file)\n', (1094, 1107), False, 'from utils.file_utils import save_by_file_suffix, read_by_file_suffix\n'), ((6184, 6221), 'numpy.isclose', 'np.isclose', (['level_predictions', 'labels'], {}), '(level_predictions, labels)\n', (6194, 6221), True, 'import numpy as np\n'), ((8051, 8088), 'numpy.isclose', 'np.isclose', (['level_predictions', 'labels'], {}), '(level_predictions, labels)\n', (8061, 8088), True, 'import numpy as np\n'), ((10503, 10534), 'numpy.isclose', 'np.isclose', (['predictions', 'labels'], {}), '(predictions, labels)\n', (10513, 10534), True, 'import numpy as np\n'), ((13146, 13177), 'numpy.isclose', 'np.isclose', (['predictions', 'labels'], {}), '(predictions, labels)\n', (13156, 13177), True, 'import numpy as np\n'), ((5906, 5929), 'numpy.array', 'np.array', (['batch[OUTPUT]'], {}), '(batch[OUTPUT])\n', (5914, 5929), True, 'import numpy as np\n'), ((7824, 7847), 'numpy.array', 'np.array', (['batch[OUTPUT]'], {}), '(batch[OUTPUT])\n', (7832, 7847), True, 'import numpy as np\n'), ((10103, 10145), 'numpy.sum', 'np.sum', (['model_results[SKIP_GATES]'], {'axis': '(-1)'}), '(model_results[SKIP_GATES], axis=-1)\n', (10109, 10145), True, 'import numpy as np\n'), ((10295, 10318), 'numpy.array', 'np.array', (['batch[OUTPUT]'], {}), '(batch[OUTPUT])\n', (10303, 10318), True, 'import numpy as np\n'), ((12938, 12961), 'numpy.array', 'np.array', (['batch[OUTPUT]'], {}), '(batch[OUTPUT])\n', (12946, 12961), True, 'import numpy as np\n')] |
'''
Resampling methods
==================
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
import sklearn.linear_model as lm
from sklearn.model_selection import train_test_split, KFold, PredefinedSplit
from sklearn.model_selection import cross_val_score, GridSearchCV
import sklearn.metrics as metrics
X, y = datasets.make_regression(n_samples=100, n_features=100,
n_informative=10, random_state=42)
# %%
# Train, validation and test sets
# -------------------------------
#
# Machine learning algorithms overfit taining data. Predictive performances **MUST** be evaluated on independant hold-out dataset.
#
# .. figure:: ../images/train_val_test_cv.png
# :alt: Train, validation and test sets.
#
# 1. **Training dataset**: Dataset used to fit the model
# (set the model parameters like weights). The *training error* can be
# easily calculated by applying the statistical learning method to the
# observations used in its training. But because of overfitting, the
# **training error rate can dramatically underestimate the error** that
# would be obtained on new samples.
# 2. **Validation dataset**: Dataset used to provide an unbiased evaluation
# of a model fit on the training dataset while
# **tuning model hyperparameters**, ie. **model selection**.
# The validation error is the average error that results from a learning
# method to predict the response on a new (validation) samples that is,
# on samples that were not used in training the method.
# 3. **Test dataset**: Dataset used to provide an unbiased
# **evaluation of a final model** fitted on the training dataset.
# It is only used once a model is completely trained (using the train and
# validation sets).
#
# What is the Difference Between Test and Validation Datasets? by
# [<NAME>](https://machinelearningmastery.com/difference-test-validation-datasets/)
#
# Thus the original dataset is generally split in a training, validation and a
# test data sets. Large training+validation set (80%) small test set (20%) might
# provide a poor estimation of the predictive performances (same argument
# stands for train vs validation samples). On the contrary, large test set and
# small training set might produce a poorly estimated learner.
# This is why, on situation where we cannot afford such split, cross-validation
# scheme can be use for model selection or/and for model evaluation.
#
# If sample size is limited, train/validation/test split may not be possible.
# **Cross Validation (CV)** (see below) can be used to replace:
#
# - Outer (train/test) split of model evaluation.
# - Inner train/validation split of model selection (more frequent situation).
# - Inner and outer splits, leading to two nested CV.
# %%
# Split dataset in train/test sets for model evaluation
# -----------------------------------------------------
#
X_train, X_test, y_train, y_test =\
train_test_split(X, y, test_size=0.25, shuffle=True, random_state=42)
mod = lm.Ridge(alpha=10)
mod.fit(X_train, y_train)
y_pred_test = mod.predict(X_test)
print("Test R2: %.2f" % metrics.r2_score(y_test, y_pred_test))
# %%
# Train/validation/test splits: model selection and model evaluation
# ------------------------------------------------------------------
#
# The **Grid search procedure** (`GridSearchCV`) performs a
# model selection of the best **hyper-parameters** :math:`\alpha` over a grid of possible values.
# Train set is "splitted (inner split) into train/validation sets.
#
# **Model selection with grid search procedure:**
#
# 1. Fit the learner (\ie. estimate **parameters** :math:`\mathbf{\Omega}_k`)
# on training set: :math:`\mathbf{X}_{train}, \mathbf{y}_{train} \rightarrow f_{\alpha_k, \mathbf{\Omega}_k}(.)`
# 2. Evaluate the model on the validation set and keep the hyper-parameter(s) that
# minimises the error measure :math:`\alpha_* = \arg \min L(f_{\alpha_k, \mathbf{\Omega}_k}(\mathbf{X}_{val}), \mathbf{y}_{val})`
# 3. Refit the learner on all training + validation data,
# :math:`\mathbf{X}_{train \cup val}, \mathbf{y}_{train \cup val}`,
# using the best hyper parameters (:math:`\alpha_*`): :math:`\rightarrow f_{\alpha_*, \mathbf{\Omega}_*}(.)`
#
# **Model evaluation:** on the test set:
# :math:`L(f_{\alpha_*, \mathbf{\Omega}_*}(\mathbf{X}_{test}), \mathbf{y}_{test})`
train_idx, validation_idx = train_test_split(np.arange(X_train.shape[0]),
test_size=0.25, shuffle=True,
random_state=42)
split_inner = PredefinedSplit(test_fold=validation_idx)
print("Train set size: %i" % X_train[train_idx].shape[0])
print("Validation set size: %i" % X_train[validation_idx].shape[0])
print("Test set size: %i" % X_test.shape[0])
lm_cv = GridSearchCV(lm.Ridge(), {'alpha': 10. ** np.arange(-3, 3)},
cv=split_inner, n_jobs=5)
# Fit, indluding model selection with internal Train/validation split
lm_cv.fit(X_train, y_train)
# Predict
y_pred_test = lm_cv.predict(X_test)
print("Test R2: %.2f" % metrics.r2_score(y_test, y_pred_test))
# %%
# Cross-Validation (CV)
# ---------------------
#
# If sample size is limited, train/validation/test split may not be possible.
# **Cross Validation (CV)** can be used to replace train/validation split
# and/or train+validation / test split.
#
# Cross-Validation scheme randomly divides the set of observations into
# *K* groups, or **folds**, of approximately equal size.
# The first fold is treated as a validation set, and the method
# :math:`f()` is fitted on the remaining union of *K - 1* folds:
# (:math:`f(\boldsymbol{X}_{-K}, \boldsymbol{y}_{-K})`).
# The measure of performance (the score function :math:`\mathcal{S}`),
# either a error measure or an correct prediction measure is an average
# of a loss error or correct prediction measure, noted :math:`\mathcal{L}`,
# between a true target value and the predicted target value.
# The score function is evaluated of the on the observations in the held-out
# fold. For each sample *i* we consider the model estimated
# :math:`f(\boldsymbol{X}_{-k(i)}, \boldsymbol{y}_{-k(i)}` on the data set
# without the group *k* that contains *i* noted *-k(i)*.
# This procedure is repeated *K* times; each time, a different group of
# observations is treated as a test set.
# Then we compare the predicted value
# (:math:`f_{-k(i)}(\boldsymbol{x}_i) = \hat{y_i})`
# with true value :math:`y_i` using a Error or Loss function
# :math:`\mathcal{L}(y, \hat{y})`.
#
# For 10-fold we can either average over 10 values (Macro measure) or
# concatenate the 10 experiments and compute the micro measures.
#
# Two strategies [micro vs macro estimates](https://stats.stackexchange.com/questions/34611/meanscores-vs-scoreconcatenation-in-cross-validation):
#
# - **Micro measure: average(individual scores)**: compute a score
# :math:`\mathcal{S}` for each sample and average over all samples.
# It is simillar to **average score(concatenation)**: an averaged score
# computed over all concatenated samples.
#
# .. raw:: latex
# \mathcal{S}(f) = \frac{1}{N} \sum_i^N \mathcal{L}\left(y_i, f(\boldsymbol{x}_{-k(i)}, \boldsymbol{y}_{-k(i)}) \right).
#
# - **Macro measure mean(CV scores)** (the most commonly used method):
# compute a score :math:`\mathcal{S}` on each each fold *k* and average
# accross folds:
#
# .. raw:: latex
# \begin{align*}
# \mathcal{S}(f) &= \frac{1}{K} \sum_k^K \mathcal{S}_k(f).\\
# \mathcal{S}(f) &= \frac{1}{K} \sum_k^K \frac{1}{N_k} \sum_{i \in k} \mathcal{L}\left(y_i, f(\boldsymbol{x}_{-k(i)}, \boldsymbol{y}_{-k(i)}) \right).
# \end{align*}
#
# These two measures (an average of average vs. a global average) are generaly
# similar. They may differ slightly is folds are of different sizes.
# This validation scheme is known as the **K-Fold CV**.
# Typical choices of *K* are 5 or 10, [Kohavi 1995].
# The extreme case where *K = N* is known as **leave-one-out cross-validation,
# LOO-CV**.
# %%
# CV for regression
# ~~~~~~~~~~~~~~~~~
#
# Usually the error function :math:`\mathcal{L}()` is the r-squared score.
# However other function (MAE, MSE) can be used.
#
# **CV with explicit loop:**
from sklearn.model_selection import KFold
estimator = lm.Ridge(alpha=10)
cv = KFold(n_splits=5, shuffle=True, random_state=42)
r2_train, r2_test = list(), list()
for train, test in cv.split(X):
estimator.fit(X[train, :], y[train])
r2_train.append(metrics.r2_score(y[train], estimator.predict(X[train, :])))
r2_test.append(metrics.r2_score(y[test], estimator.predict(X[test, :])))
print("Train r2:%.2f" % np.mean(r2_train))
print("Test r2:%.2f" % np.mean(r2_test))
# %%
# Scikit-learn provides user-friendly function to perform CV:
#
# `cross_val_score()`: single metric
from sklearn.model_selection import cross_val_score
scores = cross_val_score(estimator=estimator, X=X, y=y, cv=5)
print("Test r2:%.2f" % scores.mean())
cv = KFold(n_splits=5, shuffle=True, random_state=42)
scores = cross_val_score(estimator=estimator, X=X, y=y, cv=cv)
print("Test r2:%.2f" % scores.mean())
# %%
# `cross_validate()`: multi metric, + time, etc.
from sklearn.model_selection import cross_validate
scores = cross_validate(estimator=mod, X=X, y=y, cv=cv,
scoring=['r2', 'neg_mean_absolute_error'])
print("Test R2:%.2f; MAE:%.2f" % (scores['test_r2'].mean(),
-scores['test_neg_mean_absolute_error'].mean()))
# %%
# CV for classification: stratifiy for the target label
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# With classification problems it is essential to sample folds where each
# set contains approximately the same percentage of samples of each target
# class as the complete set. This is called **stratification**.
# In this case, we will use ``StratifiedKFold`` with is a variation of
# k-fold which returns stratified folds.
# Usually the error function :math:`L()` are, at least, the sensitivity
# and the specificity. However other function could be used.
#
# **CV with explicit loop**:
from sklearn.model_selection import StratifiedKFold
X, y = datasets.make_classification(n_samples=100, n_features=100, shuffle=True,
n_informative=10, random_state=42)
mod = lm.LogisticRegression(C=1, solver='lbfgs')
cv = StratifiedKFold(n_splits=5)
# Lists to store scores by folds (for macro measure only)
bacc, auc = [], []
for train, test in cv.split(X, y):
mod.fit(X[train, :], y[train])
bacc.append(metrics.roc_auc_score(y[test], mod.decision_function(X[test, :])))
auc.append(metrics.balanced_accuracy_score(y[test], mod.predict(X[test, :])))
print("Test AUC:%.2f; bACC:%.2f" % (np.mean(bacc), np.mean(auc)))
# %%
# `cross_val_score()`: single metric
scores = cross_val_score(estimator=mod, X=X, y=y, cv=5)
print("Test ACC:%.2f" % scores.mean())
# %%
# Provide your own CV and score
def balanced_acc(estimator, X, y, **kwargs):
"""Balanced acuracy scorer."""
return metrics.recall_score(y, estimator.predict(X), average=None).mean()
scores = cross_val_score(estimator=mod, X=X, y=y, cv=cv,
scoring=balanced_acc)
print("Test bACC:%.2f" % scores.mean())
# %%
# `cross_validate()`: multi metric, + time, etc.
from sklearn.model_selection import cross_validate
scores = cross_validate(estimator=mod, X=X, y=y, cv=cv,
scoring=['balanced_accuracy', 'roc_auc'])
print("Test AUC:%.2f; bACC:%.2f" % (scores['test_roc_auc'].mean(),
scores['test_balanced_accuracy'].mean()))
# %%
# Cross-validation for model selection
# ------------------------------------
#
# Combine CV and grid search:
# Re-split (inner split) train set into CV folds train/validation folds and
# build a `GridSearchCV` out of it:
# Outer split:
X_train, X_test, y_train, y_test =\
train_test_split(X, y, test_size=0.25, shuffle=True, random_state=42)
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
# Cross-validation for model selection
lm_cv = GridSearchCV(lm.LogisticRegression(), {'C': 10. ** np.arange(-3, 3)},
cv=cv_inner, n_jobs=5)
# Fit, indluding model selection with internal CV
lm_cv.fit(X_train, y_train)
# Predict
y_pred_test = lm_cv.predict(X_test)
print("Test bACC: %.2f" % metrics.balanced_accuracy_score(y_test, y_pred_test))
# %%
# Cross-validation for both model (outer) evaluation and model (inner) selection
# ------------------------------------------------------------------------------
cv_outer = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
cv_inner = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
# Cross-validation for model (inner) selection
lm_cv = GridSearchCV(lm.Ridge(), {'alpha': 10. ** np.arange(-3, 3)},
cv=cv_inner, n_jobs=5)
# Cross-validation for model (outer) evaluation
scores = cross_validate(estimator=mod, X=X, y=y, cv=cv_outer,
scoring=['balanced_accuracy', 'roc_auc'])
print("Test AUC:%.2f; bACC:%.2f, Time: %.2fs" % (scores['test_roc_auc'].mean(),
scores['test_balanced_accuracy'].mean(),
scores['fit_time'].sum()))
# %%
# Models with built-in cross-validation
# --------------------------------------
#
# Let sklearn select the best parameters over a default grid.
#
# **Classification**
print("== Logistic Ridge (L2 penalty) ==")
mod_cv = lm.LogisticRegressionCV(class_weight='balanced', scoring='balanced_accuracy',
n_jobs=-1, cv=5)
scores = cross_val_score(estimator=mod_cv, X=X, y=y, cv=5)
print("Test ACC:%.2f" % scores.mean())
# %%
# **Regression**
X, y, coef = datasets.make_regression(n_samples=50, n_features=100, noise=10,
n_informative=2, random_state=42, coef=True)
print("== Ridge (L2 penalty) ==")
model = lm.RidgeCV(cv=3)
scores = cross_val_score(estimator=model, X=X, y=y, cv=5)
print("Test r2:%.2f" % scores.mean())
print("== Lasso (L1 penalty) ==")
model = lm.LassoCV(n_jobs=-1, cv=3)
scores = cross_val_score(estimator=model, X=X, y=y, cv=5)
print("Test r2:%.2f" % scores.mean())
print("== ElasticNet (L1 penalty) ==")
model = lm.ElasticNetCV(l1_ratio=[.1, .5, .9], n_jobs=-1, cv=3)
scores = cross_val_score(estimator=model, X=X, y=y, cv=5)
print("Test r2:%.2f" % scores.mean())
# %%
# Random Permutations: sample the null distribution
# -------------------------------------------------
#
# A permutation test is a type of non-parametric randomization test in which the null distribution of a test statistic is estimated by randomly permuting the observations.
#
# Permutation tests are highly attractive because they make no assumptions other than that the observations are independent and identically distributed under the null hypothesis.
#
# 1. Compute a observed statistic :math:`t_{obs}` on the data.
# 2. Use randomization to compute the distribution of :math:`t` under the null hypothesis: Perform :math:`N` random permutation of the data. For each sample of permuted data, :math:`i` the data compute the statistic :math:`t_i`. This procedure provides the distribution of *t* under the null hypothesis :math:`H_0`: :math:`P(t \vert H_0)`
# 3. Compute the p-value = :math:`P(t>t_{obs} | H_0) \left\vert\{t_i > t_{obs}\}\right\vert`, where :math:`t_i's include :math:`t_{obs}`.
#
# Example Ridge regression
#
# Sample the distributions of r-squared and coefficients of ridge regression under the null hypothesis. Simulated dataset:
# Regression dataset where first 2 features are predictives
np.random.seed(0)
n_features = 5
n_features_info = 2
n_samples = 100
X = np.random.randn(100, 5)
beta = np.zeros(n_features)
beta[:n_features_info] = 1
Xbeta = np.dot(X, beta)
eps = np.random.randn(n_samples)
y = Xbeta + eps
# %%
# Random permutations
# -------------------
# Fit model on all data (!! risk of overfit)
model = lm.RidgeCV()
model.fit(X, y)
print("Coefficients on all data:")
print(model.coef_)
# Random permutation loop
nperm = 1000 # !! Should be at least 1000 (to assess a p-value at 1%)
scores_names = ["r2"]
scores_perm = np.zeros((nperm + 1, len(scores_names)))
coefs_perm = np.zeros((nperm + 1, X.shape[1]))
scores_perm[0, :] = metrics.r2_score(y, model.predict(X))
coefs_perm[0, :] = model.coef_
orig_all = np.arange(X.shape[0])
for perm_i in range(1, nperm + 1):
model.fit(X, np.random.permutation(y))
y_pred = model.predict(X).ravel()
scores_perm[perm_i, :] = metrics.r2_score(y, y_pred)
coefs_perm[perm_i, :] = model.coef_
# One-tailed empirical p-value
pval_pred_perm = np.sum(scores_perm >= scores_perm[0]) / scores_perm.shape[0]
pval_coef_perm = np.sum(coefs_perm >= coefs_perm[0, :], axis=0) / coefs_perm.shape[0]
print("R2 p-value: %.3f" % pval_pred_perm)
print("Coeficients p-values:", np.round(pval_coef_perm, 3))
# %%
# Compute p-values corrected for multiple comparisons using FWER max-T
# (Westfall and Young, 1993) procedure.
pval_coef_perm_tmax = np.array([np.sum(coefs_perm.max(axis=1) >= coefs_perm[0, j])
for j in range(coefs_perm.shape[1])]) / coefs_perm.shape[0]
print("P-values with FWER (Westfall and Young) correction")
print(pval_coef_perm_tmax)
# %%
# Plot distribution of third coefficient under null-hypothesis
# Coeffitients 0 and 1 are significantly different from 0.
#
def hist_pvalue(perms, ax, name):
"""Plot statistic distribution as histogram.
Paramters
---------
perms: 1d array, statistics under the null hypothesis.
perms[0] is the true statistic .
"""
# Re-weight to obtain distribution
pval = np.sum(perms >= perms[0]) / perms.shape[0]
weights = np.ones(perms.shape[0]) / perms.shape[0]
ax.hist([perms[perms >= perms[0]], perms], histtype='stepfilled',
bins=100, label="p-val<%.3f" % pval,
weights=[weights[perms >= perms[0]], weights])
ax.axvline(x=perms[0], color="k", linewidth=2)#, label="observed statistic")
ax.set_ylabel(name)
ax.legend()
return ax
n_coef = coefs_perm.shape[1]
fig, axes = plt.subplots(n_coef, 1, figsize=(12, 9))
for i in range(n_coef):
hist_pvalue( coefs_perm[:, i], axes[i], str(i))
_ = axes[-1].set_xlabel("Coefficient distribution under null hypothesis")
# %%
# Exercise
#
# Given the logistic regression presented above and its validation given a 5 folds CV.
#
# 1. Compute the p-value associated with the prediction accuracy measured with 5CV using a permutation test.
#
# 2. Compute the p-value associated with the prediction accuracy using a parametric test.
# %%
# Bootstrapping
# -------------
#
# Bootstrapping is a statistical technique which consists in generating sample (called bootstrap samples) from an initial dataset of size *N* by randomly drawing with replacement *N* observations. It provides sub-samples with the same distribution than the original dataset. It aims to:
#
# 1. Assess the variability (standard error, [confidence intervals.](https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html#the-bootstrap-method-and-empirical-confidence-intervals)) of performances scores or estimated parameters (see Efron et al. 1986).
# 2. Regularize model by fitting several models on bootstrap samples and averaging their predictions (see Baging and random-forest).
#
# A great advantage of bootstrap is its simplicity. It is a straightforward way to derive estimates of standard errors and confidence intervals for complex estimators of complex parameters of the distribution, such as percentile points, proportions, odds ratio, and correlation coefficients.
#
# 1. Perform :math:`B` sampling, with replacement, of the dataset.
# 2. For each sample :math:`i` fit the model and compute the scores.
# 3. Assess standard errors and confidence intervals of scores using the scores obtained on the :math:`B` resampled dataset. Or, average models predictions.
#
# References:
#
# [<NAME>, <NAME>. Bootstrap methods for standard errors, confidence intervals, and other measures of statistical accuracy. Stat Sci 1986;1:54–75](https://projecteuclid.org/download/pdf_1/euclid.ss/1177013815)
# Bootstrap loop
nboot = 100 # !! Should be at least 1000
scores_names = ["r2"]
scores_boot = np.zeros((nboot, len(scores_names)))
coefs_boot = np.zeros((nboot, X.shape[1]))
orig_all = np.arange(X.shape[0])
for boot_i in range(nboot):
boot_tr = np.random.choice(orig_all, size=len(orig_all), replace=True)
boot_te = np.setdiff1d(orig_all, boot_tr, assume_unique=False)
Xtr, ytr = X[boot_tr, :], y[boot_tr]
Xte, yte = X[boot_te, :], y[boot_te]
model.fit(Xtr, ytr)
y_pred = model.predict(Xte).ravel()
scores_boot[boot_i, :] = metrics.r2_score(yte, y_pred)
coefs_boot[boot_i, :] = model.coef_
# %%
# Compute Mean, SE, CI
# Coeffitients 0 and 1 are significantly different from 0.
scores_boot = pd.DataFrame(scores_boot, columns=scores_names)
scores_stat = scores_boot.describe(percentiles=[.975, .5, .025])
print("r-squared: Mean=%.2f, SE=%.2f, CI=(%.2f %.2f)" % tuple(scores_stat.loc[["mean", "std", "2.5%", "97.5%"], "r2"]))
coefs_boot = pd.DataFrame(coefs_boot)
coefs_stat = coefs_boot.describe(percentiles=[.975, .5, .025])
print("Coefficients distribution")
print(coefs_stat)
# %%
# Plot coefficient distribution
df = pd.DataFrame(coefs_boot)
staked = pd.melt(df, var_name="Variable", value_name="Coef. distribution")
sns.set_theme(style="whitegrid")
ax = sns.violinplot(x="Variable", y="Coef. distribution", data=staked)
_ = ax.axhline(0, ls='--', lw=2, color="black")
# %%
# Parallel computation with joblib
# --------------------------------
#
# Dataset
import numpy as np
from sklearn import datasets
import sklearn.linear_model as lm
import sklearn.metrics as metrics
from sklearn.model_selection import StratifiedKFold
X, y = datasets.make_classification(n_samples=20, n_features=5, n_informative=2, random_state=42)
cv = StratifiedKFold(n_splits=5)
# %%
# Use `cross_validate` function
from sklearn.model_selection import cross_validate
estimator = lm.LogisticRegression(C=1, solver='lbfgs')
cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=5)
print(np.mean(cv_results['test_score']), cv_results['test_score'])
# %%
# Sequential computation
#
# If we want have full control of the operations performed within each fold (retrieve the models parameters, etc.). We would like to parallelize the folowing sequetial code:
# In[22]:
estimator = lm.LogisticRegression(C=1, solver='lbfgs')
y_test_pred_seq = np.zeros(len(y)) # Store predictions in the original order
coefs_seq = list()
for train, test in cv.split(X, y):
X_train, X_test, y_train, y_test = X[train, :], X[test, :], y[train], y[test]
estimator.fit(X_train, y_train)
y_test_pred_seq[test] = estimator.predict(X_test)
coefs_seq.append(estimator.coef_)
test_accs = [metrics.accuracy_score(y[test], y_test_pred_seq[test]) for train, test in cv.split(X, y)]
print(np.mean(test_accs), test_accs)
coefs_cv = np.array(coefs_seq)
print(coefs_cv)
print(coefs_cv.mean(axis=0))
print("Std Err of the coef")
print(coefs_cv.std(axis=0) / np.sqrt(coefs_cv.shape[0]))
# %%
# Parallel computation with joblib
# --------------------------------
from joblib import Parallel, delayed
from sklearn.base import is_classifier, clone
def _split_fit_predict(estimator, X, y, train, test):
X_train, X_test, y_train, y_test = X[train, :], X[test, :], y[train], y[test]
estimator.fit(X_train, y_train)
return [estimator.predict(X_test), estimator.coef_]
estimator = lm.LogisticRegression(C=1, solver='lbfgs')
parallel = Parallel(n_jobs=5)
cv_ret = parallel(
delayed(_split_fit_predict)(
clone(estimator), X, y, train, test)
for train, test in cv.split(X, y))
y_test_pred_cv, coefs_cv = zip(*cv_ret)
# Retrieve predictions in the original order
y_test_pred = np.zeros(len(y))
for i, (train, test) in enumerate(cv.split(X, y)):
y_test_pred[test] = y_test_pred_cv[i]
test_accs = [metrics.accuracy_score(y[test], y_test_pred[test]) for train, test in cv.split(X, y)]
print(np.mean(test_accs), test_accs)
# %%
# Test same predictions and same coeficients
assert np.all(y_test_pred == y_test_pred_seq)
assert np.allclose(np.array(coefs_cv).squeeze(), np.array(coefs_seq).squeeze())
| [
"numpy.sqrt",
"sklearn.metrics.balanced_accuracy_score",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sklearn.linear_model.LogisticRegressionCV",
"sklearn.linear_model.RidgeCV",
"seaborn.violinplot",
"sklearn.model_selection.KFold",
"sklearn.metrics.r2_score",
"numpy.arange",
"nump... | [((389, 483), 'sklearn.datasets.make_regression', 'datasets.make_regression', ([], {'n_samples': '(100)', 'n_features': '(100)', 'n_informative': '(10)', 'random_state': '(42)'}), '(n_samples=100, n_features=100, n_informative=10,\n random_state=42)\n', (413, 483), False, 'from sklearn import datasets\n'), ((3002, 3071), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'shuffle': '(True)', 'random_state': '(42)'}), '(X, y, test_size=0.25, shuffle=True, random_state=42)\n', (3018, 3071), False, 'from sklearn.model_selection import train_test_split, KFold, PredefinedSplit\n'), ((3079, 3097), 'sklearn.linear_model.Ridge', 'lm.Ridge', ([], {'alpha': '(10)'}), '(alpha=10)\n', (3087, 3097), True, 'import sklearn.linear_model as lm\n'), ((4654, 4695), 'sklearn.model_selection.PredefinedSplit', 'PredefinedSplit', ([], {'test_fold': 'validation_idx'}), '(test_fold=validation_idx)\n', (4669, 4695), False, 'from sklearn.model_selection import train_test_split, KFold, PredefinedSplit\n'), ((8344, 8362), 'sklearn.linear_model.Ridge', 'lm.Ridge', ([], {'alpha': '(10)'}), '(alpha=10)\n', (8352, 8362), True, 'import sklearn.linear_model as lm\n'), ((8369, 8417), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=5, shuffle=True, random_state=42)\n', (8374, 8417), False, 'from sklearn.model_selection import KFold\n'), ((8940, 8992), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'estimator', 'X': 'X', 'y': 'y', 'cv': '(5)'}), '(estimator=estimator, X=X, y=y, cv=5)\n', (8955, 8992), False, 'from sklearn.model_selection import cross_val_score\n'), ((9038, 9086), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=5, shuffle=True, random_state=42)\n', (9043, 9086), False, 'from sklearn.model_selection import KFold\n'), ((9096, 9149), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'estimator', 'X': 'X', 'y': 'y', 'cv': 'cv'}), '(estimator=estimator, X=X, y=y, cv=cv)\n', (9111, 9149), False, 'from sklearn.model_selection import cross_val_score\n'), ((9307, 9400), 'sklearn.model_selection.cross_validate', 'cross_validate', ([], {'estimator': 'mod', 'X': 'X', 'y': 'y', 'cv': 'cv', 'scoring': "['r2', 'neg_mean_absolute_error']"}), "(estimator=mod, X=X, y=y, cv=cv, scoring=['r2',\n 'neg_mean_absolute_error'])\n", (9321, 9400), False, 'from sklearn.model_selection import cross_validate\n'), ((10236, 10348), 'sklearn.datasets.make_classification', 'datasets.make_classification', ([], {'n_samples': '(100)', 'n_features': '(100)', 'shuffle': '(True)', 'n_informative': '(10)', 'random_state': '(42)'}), '(n_samples=100, n_features=100, shuffle=True,\n n_informative=10, random_state=42)\n', (10264, 10348), False, 'from sklearn import datasets\n'), ((10388, 10430), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {'C': '(1)', 'solver': '"""lbfgs"""'}), "(C=1, solver='lbfgs')\n", (10409, 10430), True, 'import sklearn.linear_model as lm\n'), ((10437, 10464), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (10452, 10464), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((10900, 10946), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'mod', 'X': 'X', 'y': 'y', 'cv': '(5)'}), '(estimator=mod, X=X, y=y, cv=5)\n', (10915, 10946), False, 'from sklearn.model_selection import cross_val_score\n'), ((11195, 11264), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'mod', 'X': 'X', 'y': 'y', 'cv': 'cv', 'scoring': 'balanced_acc'}), '(estimator=mod, X=X, y=y, cv=cv, scoring=balanced_acc)\n', (11210, 11264), False, 'from sklearn.model_selection import cross_val_score\n'), ((11449, 11541), 'sklearn.model_selection.cross_validate', 'cross_validate', ([], {'estimator': 'mod', 'X': 'X', 'y': 'y', 'cv': 'cv', 'scoring': "['balanced_accuracy', 'roc_auc']"}), "(estimator=mod, X=X, y=y, cv=cv, scoring=['balanced_accuracy',\n 'roc_auc'])\n", (11463, 11541), False, 'from sklearn.model_selection import cross_validate\n'), ((11994, 12063), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'shuffle': '(True)', 'random_state': '(42)'}), '(X, y, test_size=0.25, shuffle=True, random_state=42)\n', (12010, 12063), False, 'from sklearn.model_selection import train_test_split, KFold, PredefinedSplit\n'), ((12076, 12134), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=5, shuffle=True, random_state=42)\n', (12091, 12134), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((12684, 12742), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=5, shuffle=True, random_state=42)\n', (12699, 12742), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((12754, 12812), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=5, shuffle=True, random_state=42)\n', (12769, 12812), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((13032, 13131), 'sklearn.model_selection.cross_validate', 'cross_validate', ([], {'estimator': 'mod', 'X': 'X', 'y': 'y', 'cv': 'cv_outer', 'scoring': "['balanced_accuracy', 'roc_auc']"}), "(estimator=mod, X=X, y=y, cv=cv_outer, scoring=[\n 'balanced_accuracy', 'roc_auc'])\n", (13046, 13131), False, 'from sklearn.model_selection import cross_validate\n'), ((13607, 13706), 'sklearn.linear_model.LogisticRegressionCV', 'lm.LogisticRegressionCV', ([], {'class_weight': '"""balanced"""', 'scoring': '"""balanced_accuracy"""', 'n_jobs': '(-1)', 'cv': '(5)'}), "(class_weight='balanced', scoring=\n 'balanced_accuracy', n_jobs=-1, cv=5)\n", (13630, 13706), True, 'import sklearn.linear_model as lm\n'), ((13744, 13793), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'mod_cv', 'X': 'X', 'y': 'y', 'cv': '(5)'}), '(estimator=mod_cv, X=X, y=y, cv=5)\n', (13759, 13793), False, 'from sklearn.model_selection import cross_val_score\n'), ((13871, 13984), 'sklearn.datasets.make_regression', 'datasets.make_regression', ([], {'n_samples': '(50)', 'n_features': '(100)', 'noise': '(10)', 'n_informative': '(2)', 'random_state': '(42)', 'coef': '(True)'}), '(n_samples=50, n_features=100, noise=10,\n n_informative=2, random_state=42, coef=True)\n', (13895, 13984), False, 'from sklearn import datasets\n'), ((14049, 14065), 'sklearn.linear_model.RidgeCV', 'lm.RidgeCV', ([], {'cv': '(3)'}), '(cv=3)\n', (14059, 14065), True, 'import sklearn.linear_model as lm\n'), ((14075, 14123), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'model', 'X': 'X', 'y': 'y', 'cv': '(5)'}), '(estimator=model, X=X, y=y, cv=5)\n', (14090, 14123), False, 'from sklearn.model_selection import cross_val_score\n'), ((14206, 14233), 'sklearn.linear_model.LassoCV', 'lm.LassoCV', ([], {'n_jobs': '(-1)', 'cv': '(3)'}), '(n_jobs=-1, cv=3)\n', (14216, 14233), True, 'import sklearn.linear_model as lm\n'), ((14243, 14291), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'model', 'X': 'X', 'y': 'y', 'cv': '(5)'}), '(estimator=model, X=X, y=y, cv=5)\n', (14258, 14291), False, 'from sklearn.model_selection import cross_val_score\n'), ((14379, 14437), 'sklearn.linear_model.ElasticNetCV', 'lm.ElasticNetCV', ([], {'l1_ratio': '[0.1, 0.5, 0.9]', 'n_jobs': '(-1)', 'cv': '(3)'}), '(l1_ratio=[0.1, 0.5, 0.9], n_jobs=-1, cv=3)\n', (14394, 14437), True, 'import sklearn.linear_model as lm\n'), ((14444, 14492), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'model', 'X': 'X', 'y': 'y', 'cv': '(5)'}), '(estimator=model, X=X, y=y, cv=5)\n', (14459, 14492), False, 'from sklearn.model_selection import cross_val_score\n'), ((15755, 15772), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (15769, 15772), True, 'import numpy as np\n'), ((15828, 15851), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (15843, 15851), True, 'import numpy as np\n'), ((15859, 15879), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (15867, 15879), True, 'import numpy as np\n'), ((15915, 15930), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (15921, 15930), True, 'import numpy as np\n'), ((15937, 15963), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (15952, 15963), True, 'import numpy as np\n'), ((16084, 16096), 'sklearn.linear_model.RidgeCV', 'lm.RidgeCV', ([], {}), '()\n', (16094, 16096), True, 'import sklearn.linear_model as lm\n'), ((16355, 16388), 'numpy.zeros', 'np.zeros', (['(nperm + 1, X.shape[1])'], {}), '((nperm + 1, X.shape[1]))\n', (16363, 16388), True, 'import numpy as np\n'), ((16491, 16512), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (16500, 16512), True, 'import numpy as np\n'), ((18265, 18305), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_coef', '(1)'], {'figsize': '(12, 9)'}), '(n_coef, 1, figsize=(12, 9))\n', (18277, 18305), True, 'import matplotlib.pyplot as plt\n'), ((20469, 20498), 'numpy.zeros', 'np.zeros', (['(nboot, X.shape[1])'], {}), '((nboot, X.shape[1]))\n', (20477, 20498), True, 'import numpy as np\n'), ((20511, 20532), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (20520, 20532), True, 'import numpy as np\n'), ((21051, 21098), 'pandas.DataFrame', 'pd.DataFrame', (['scores_boot'], {'columns': 'scores_names'}), '(scores_boot, columns=scores_names)\n', (21063, 21098), True, 'import pandas as pd\n'), ((21304, 21328), 'pandas.DataFrame', 'pd.DataFrame', (['coefs_boot'], {}), '(coefs_boot)\n', (21316, 21328), True, 'import pandas as pd\n'), ((21489, 21513), 'pandas.DataFrame', 'pd.DataFrame', (['coefs_boot'], {}), '(coefs_boot)\n', (21501, 21513), True, 'import pandas as pd\n'), ((21523, 21588), 'pandas.melt', 'pd.melt', (['df'], {'var_name': '"""Variable"""', 'value_name': '"""Coef. distribution"""'}), "(df, var_name='Variable', value_name='Coef. distribution')\n", (21530, 21588), True, 'import pandas as pd\n'), ((21589, 21621), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (21602, 21621), True, 'import seaborn as sns\n'), ((21627, 21692), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': '"""Variable"""', 'y': '"""Coef. distribution"""', 'data': 'staked'}), "(x='Variable', y='Coef. distribution', data=staked)\n", (21641, 21692), True, 'import seaborn as sns\n'), ((22005, 22099), 'sklearn.datasets.make_classification', 'datasets.make_classification', ([], {'n_samples': '(20)', 'n_features': '(5)', 'n_informative': '(2)', 'random_state': '(42)'}), '(n_samples=20, n_features=5, n_informative=2,\n random_state=42)\n', (22033, 22099), False, 'from sklearn import datasets\n'), ((22101, 22128), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (22116, 22128), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((22233, 22275), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {'C': '(1)', 'solver': '"""lbfgs"""'}), "(C=1, solver='lbfgs')\n", (22254, 22275), True, 'import sklearn.linear_model as lm\n'), ((22289, 22337), 'sklearn.model_selection.cross_validate', 'cross_validate', (['estimator', 'X', 'y'], {'cv': 'cv', 'n_jobs': '(5)'}), '(estimator, X, y, cv=cv, n_jobs=5)\n', (22303, 22337), False, 'from sklearn.model_selection import cross_validate\n'), ((22638, 22680), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {'C': '(1)', 'solver': '"""lbfgs"""'}), "(C=1, solver='lbfgs')\n", (22659, 22680), True, 'import sklearn.linear_model as lm\n'), ((23174, 23193), 'numpy.array', 'np.array', (['coefs_seq'], {}), '(coefs_seq)\n', (23182, 23193), True, 'import numpy as np\n'), ((23730, 23772), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {'C': '(1)', 'solver': '"""lbfgs"""'}), "(C=1, solver='lbfgs')\n", (23751, 23772), True, 'import sklearn.linear_model as lm\n'), ((23785, 23803), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(5)'}), '(n_jobs=5)\n', (23793, 23803), False, 'from joblib import Parallel, delayed\n'), ((24348, 24386), 'numpy.all', 'np.all', (['(y_test_pred == y_test_pred_seq)'], {}), '(y_test_pred == y_test_pred_seq)\n', (24354, 24386), True, 'import numpy as np\n'), ((4473, 4500), 'numpy.arange', 'np.arange', (['X_train.shape[0]'], {}), '(X_train.shape[0])\n', (4482, 4500), True, 'import numpy as np\n'), ((4889, 4899), 'sklearn.linear_model.Ridge', 'lm.Ridge', ([], {}), '()\n', (4897, 4899), True, 'import sklearn.linear_model as lm\n'), ((12196, 12219), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {}), '()\n', (12217, 12219), True, 'import sklearn.linear_model as lm\n'), ((12882, 12892), 'sklearn.linear_model.Ridge', 'lm.Ridge', ([], {}), '()\n', (12890, 12892), True, 'import sklearn.linear_model as lm\n'), ((16658, 16685), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (16674, 16685), True, 'import sklearn.metrics as metrics\n'), ((16775, 16812), 'numpy.sum', 'np.sum', (['(scores_perm >= scores_perm[0])'], {}), '(scores_perm >= scores_perm[0])\n', (16781, 16812), True, 'import numpy as np\n'), ((16853, 16899), 'numpy.sum', 'np.sum', (['(coefs_perm >= coefs_perm[0, :])'], {'axis': '(0)'}), '(coefs_perm >= coefs_perm[0, :], axis=0)\n', (16859, 16899), True, 'import numpy as np\n'), ((16997, 17024), 'numpy.round', 'np.round', (['pval_coef_perm', '(3)'], {}), '(pval_coef_perm, 3)\n', (17005, 17024), True, 'import numpy as np\n'), ((20650, 20702), 'numpy.setdiff1d', 'np.setdiff1d', (['orig_all', 'boot_tr'], {'assume_unique': '(False)'}), '(orig_all, boot_tr, assume_unique=False)\n', (20662, 20702), True, 'import numpy as np\n'), ((20878, 20907), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['yte', 'y_pred'], {}), '(yte, y_pred)\n', (20894, 20907), True, 'import sklearn.metrics as metrics\n'), ((22344, 22377), 'numpy.mean', 'np.mean', (["cv_results['test_score']"], {}), "(cv_results['test_score'])\n", (22351, 22377), True, 'import numpy as np\n'), ((23036, 23090), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y[test]', 'y_test_pred_seq[test]'], {}), '(y[test], y_test_pred_seq[test])\n', (23058, 23090), True, 'import sklearn.metrics as metrics\n'), ((23132, 23150), 'numpy.mean', 'np.mean', (['test_accs'], {}), '(test_accs)\n', (23139, 23150), True, 'import numpy as np\n'), ((24165, 24215), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y[test]', 'y_test_pred[test]'], {}), '(y[test], y_test_pred[test])\n', (24187, 24215), True, 'import sklearn.metrics as metrics\n'), ((24257, 24275), 'numpy.mean', 'np.mean', (['test_accs'], {}), '(test_accs)\n', (24264, 24275), True, 'import numpy as np\n'), ((3184, 3221), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (3200, 3221), True, 'import sklearn.metrics as metrics\n'), ((5154, 5191), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (5170, 5191), True, 'import sklearn.metrics as metrics\n'), ((8709, 8726), 'numpy.mean', 'np.mean', (['r2_train'], {}), '(r2_train)\n', (8716, 8726), True, 'import numpy as np\n'), ((8752, 8768), 'numpy.mean', 'np.mean', (['r2_test'], {}), '(r2_test)\n', (8759, 8768), True, 'import numpy as np\n'), ((12449, 12501), 'sklearn.metrics.balanced_accuracy_score', 'metrics.balanced_accuracy_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (12480, 12501), True, 'import sklearn.metrics as metrics\n'), ((16565, 16589), 'numpy.random.permutation', 'np.random.permutation', (['y'], {}), '(y)\n', (16586, 16589), True, 'import numpy as np\n'), ((17810, 17835), 'numpy.sum', 'np.sum', (['(perms >= perms[0])'], {}), '(perms >= perms[0])\n', (17816, 17835), True, 'import numpy as np\n'), ((17867, 17890), 'numpy.ones', 'np.ones', (['perms.shape[0]'], {}), '(perms.shape[0])\n', (17874, 17890), True, 'import numpy as np\n'), ((23298, 23324), 'numpy.sqrt', 'np.sqrt', (['coefs_cv.shape[0]'], {}), '(coefs_cv.shape[0])\n', (23305, 23324), True, 'import numpy as np\n'), ((4918, 4934), 'numpy.arange', 'np.arange', (['(-3)', '(3)'], {}), '(-3, 3)\n', (4927, 4934), True, 'import numpy as np\n'), ((10816, 10829), 'numpy.mean', 'np.mean', (['bacc'], {}), '(bacc)\n', (10823, 10829), True, 'import numpy as np\n'), ((10831, 10843), 'numpy.mean', 'np.mean', (['auc'], {}), '(auc)\n', (10838, 10843), True, 'import numpy as np\n'), ((12234, 12250), 'numpy.arange', 'np.arange', (['(-3)', '(3)'], {}), '(-3, 3)\n', (12243, 12250), True, 'import numpy as np\n'), ((12911, 12927), 'numpy.arange', 'np.arange', (['(-3)', '(3)'], {}), '(-3, 3)\n', (12920, 12927), True, 'import numpy as np\n'), ((23827, 23854), 'joblib.delayed', 'delayed', (['_split_fit_predict'], {}), '(_split_fit_predict)\n', (23834, 23854), False, 'from joblib import Parallel, delayed\n'), ((23864, 23880), 'sklearn.base.clone', 'clone', (['estimator'], {}), '(estimator)\n', (23869, 23880), False, 'from sklearn.base import is_classifier, clone\n'), ((24406, 24424), 'numpy.array', 'np.array', (['coefs_cv'], {}), '(coefs_cv)\n', (24414, 24424), True, 'import numpy as np\n'), ((24436, 24455), 'numpy.array', 'np.array', (['coefs_seq'], {}), '(coefs_seq)\n', (24444, 24455), True, 'import numpy as np\n')] |
from ...pipeline.BPtPipeline import BPtPipeline
from ...pipeline.BPtSearchCV import NevergradSearchCV
from ...pipeline.ScopeObjs import ScopeTransformer
from ...pipeline.BPtModel import BPtModel
from ..input import (Model, ModelPipeline, Pipeline, CV, Scaler,
ProblemSpec, ParamSearch, Imputer, Transformer)
from ..funcs import (pipeline_check, problem_spec_check, get_estimator,
_preproc_pipeline, _preproc_param_search)
from ..CV import BPtCV
from ...dataset.dataset import Dataset
import pytest
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.preprocessing import RobustScaler
import numpy as np
from ..compare import CompareDict, Options, Compare, Option
def test_no_overlap_param_names():
ps_params = set(ProblemSpec._get_param_names())
pipe_params = set(ModelPipeline._get_param_names())
assert len(ps_params.intersection(pipe_params)) == 0
def test_pipeline_check():
mp_params = ModelPipeline(imputers=None,
model=Model('ridge'))
mp = pipeline_check(mp_params)
assert isinstance(mp, ModelPipeline)
mp_params = ModelPipeline(imputers=None,
model='ridge')
mp = pipeline_check(mp_params)
assert isinstance(mp, ModelPipeline)
mp_params = Model('ridge')
mp = pipeline_check(mp_params)
assert isinstance(mp, Pipeline)
mp_params = ModelPipeline('ridge')
mp = pipeline_check(mp_params)
assert isinstance(mp, Pipeline)
def test_get_default_pipeline_str():
mp = pipeline_check('elastic_pipe')
assert isinstance(mp, Pipeline)
def test_pipeline_check_compare():
mp_params = Compare([Model('ridge'), Model('elastic')])
mp = pipeline_check(mp_params, error_if_compare=False)
assert isinstance(mp, CompareDict)
assert len(mp) == 2
mp_params = Compare([Option(Model('ridge'), 'ridge'),
Option(Model('elastic'), 'elastic')])
mp = pipeline_check(mp_params, error_if_compare=False)
assert isinstance(mp, CompareDict)
assert len(mp) == 2
pipe = mp["pipeline=ridge"]
assert isinstance(pipe, Pipeline)
pipe = mp["pipeline=elastic"]
assert isinstance(pipe, Pipeline)
def test_pipeline_check_extra_args():
mp_params = ModelPipeline(imputers=None,
model=Model('ridge'))
mp = pipeline_check(mp_params)
assert isinstance(mp, ModelPipeline)
assert mp.imputers is None
mp = pipeline_check(mp_params, imputers=Imputer('mean'),
ignore='ignore')
assert mp.imputers is not None
assert isinstance(mp, ModelPipeline)
def get_fake_dataset():
fake = Dataset()
fake['1'] = [1, 2, 3]
fake['2'] = [4, 5, 6]
fake['3'] = [7, 8, 9]
fake = fake.set_role('3', 'target')
fake._check_sr()
return fake
def get_test_ps():
return ProblemSpec(problem_type='default',
target='3',
scorer='default',
scope='all',
subjects='all',
n_jobs=2,
random_state=1)
def get_checked_ps():
dataset = get_fake_dataset()
p = get_test_ps()
ps = problem_spec_check(p, dataset)
return ps
def test_problem_spec_compare_fail():
dataset = get_fake_dataset()
p = ProblemSpec(scope=Compare(['all', 'float']))
with pytest.raises(RuntimeError):
problem_spec_check(p, dataset, error_if_compare=True)
problem_spec_check(p, dataset, error_if_compare=False)
def test_problem_spec_compare():
dataset = get_fake_dataset()
dataset._check_sr()
# Default names
p = ProblemSpec(scope=Compare(['all', 'float']))
ps = problem_spec_check(p, dataset, error_if_compare=False)
assert isinstance(ps, CompareDict)
assert len(ps) == 2
assert ps["scope='all'"].scope == 'all'
assert ps["scope='float'"].scope == 'float'
assert ps[Options(scope='all')].scope == 'all'
assert ps[Options(scope='float')].scope == 'float'
# Custom names
compare_scopes = Compare([Option('all', 'all'),
Option('float', '2')])
p = ProblemSpec(scope=compare_scopes)
ps = problem_spec_check(p, dataset, error_if_compare=False)
assert ps["scope=all"].scope == 'all'
assert ps["scope=2"].scope == 'float'
def test_problem_spec_multiple_compares():
dataset = get_fake_dataset()
dataset._check_sr()
# Default names
p = ProblemSpec(scope=Compare(['all', 'float']),
subjects=Compare(['all', 'train', 'test']))
ps = problem_spec_check(p, dataset, error_if_compare=False)
assert isinstance(ps, dict)
assert len(ps) == 6
assert ps["scope='all', subjects='all'"].scope == 'all'
assert ps["scope='all', subjects='all'"].subjects == 'all'
assert ps["scope='all', subjects='train'"].scope == 'all'
assert ps["scope='all', subjects='train'"].subjects == 'train'
assert ps["scope='float', subjects='test'"].scope == 'float'
assert ps["scope='float', subjects='train'"].subjects == 'train'
ft = ps[Options(scope='float', subjects='train')]
assert ft.scope == 'float'
assert ft.subjects == 'train'
subset = ps["scope='float'"]
assert isinstance(subset, CompareDict)
assert len(subset) == 3
with pytest.raises(KeyError):
ps['nonsense']
def test_problem_spec_check():
dataset = get_fake_dataset()
dataset._check_sr()
# Test some cases
p = get_test_ps()
ps = problem_spec_check(p, dataset)
assert ps.problem_type == 'regression'
assert ps.target == '3'
assert ps.scorer != 'default'
p.target = 9
assert ps.target != 9
assert ps.n_jobs == 2
assert ps.random_state == 1
# Test default case
ps = problem_spec_check('default', dataset)
assert ps.problem_type == 'regression'
assert ps.target == '3'
assert ps.scorer != 'default'
p.target = '1'
with pytest.raises(IndexError):
ps = problem_spec_check(p, dataset)
p.target = '4'
with pytest.raises(IndexError):
ps = problem_spec_check(p, dataset)
p.target = 8
with pytest.raises(IndexError):
ps = problem_spec_check(p, dataset)
def test_preproc_preproc_param_search():
ps = get_checked_ps()
search = ParamSearch(search_type='RandomSearch',
cv='default',
n_iter=10,
scorer='default',
mp_context='loky',
n_jobs='default',
weight_scorer=True,
random_state='default',
dask_ip=None,
memmap_X=False,
search_only_params=None,
progress_loc=None)
pipe = ModelPipeline(param_search=search)
has_search = _preproc_param_search(pipe, ps)
assert has_search is True
search_d = pipe.param_search
assert isinstance(search_d, dict)
assert search_d['n_iter'] == 10
assert search_d['search_type'] == 'RandomSearch'
# These should be proc'ed since default
assert search_d['n_jobs'] == 2
assert search_d['random_state'] == 1
assert search_d['scorer'] != 'default'
assert search_d['weight_scorer'] is True
assert isinstance(search_d['search_only_params'], dict)
assert len(search_d['search_only_params']) == 0
pipe = ModelPipeline(param_search=None)
has_search = _preproc_param_search(pipe, ps)
assert has_search is False
assert pipe.param_search is None
# Try non-default case
search = ParamSearch(scorer='r2',
n_jobs=10,
random_state=9)
pipe = ModelPipeline(param_search=search)
has_search = _preproc_param_search(pipe, ps)
search_d = pipe.param_search
assert has_search is True
assert search_d['n_jobs'] == 10
assert search_d['random_state'] == 9
assert callable(search_d['scorer'])
def test_preproc_pipeline():
ps = get_checked_ps()
data = get_fake_dataset()
data._check_sr()
# Test imputers first
pipe = ModelPipeline(model='ridge', imputers='default')
proc_pipe = _preproc_pipeline(pipe, ps, dataset=data)
assert proc_pipe.imputers is None
data.loc[0, '1'] = np.nan
pipe = ModelPipeline(model='ridge', imputers='default')
proc_pipe = _preproc_pipeline(pipe, ps, dataset=data)
assert isinstance(proc_pipe.imputers[0], Imputer)
assert isinstance(proc_pipe.imputers[1], Imputer)
# Check CV case
pipe = ModelPipeline(model='ridge', imputers='default', scalers=None,
param_search=ParamSearch(
search_type='DiscreteOnePlusOne'))
proc_pipe = _preproc_pipeline(pipe, ps, dataset=data)
assert isinstance(proc_pipe.param_search, dict)
assert proc_pipe.param_search['search_type'] == 'DiscreteOnePlusOne'
cv_obj = proc_pipe.param_search['cv']
assert isinstance(cv_obj, BPtCV)
assert cv_obj.cv_strategy.groups is None
assert cv_obj.cv_strategy.stratify is None
assert cv_obj.cv_strategy.train_only is None
# Check another non default CV case - splits
data = get_fake_dataset()
data.copy_as_non_input('1', '4', inplace=True)
data.copy_as_non_input('1', '5', inplace=True)
# Remove category, and make sure dtype changes
data = data.remove_scope('5', 'category')
assert data['5'].dtype.name != 'category'
pipe = ModelPipeline(param_search=ParamSearch(cv=CV(splits='4')))
proc_pipe = _preproc_pipeline(pipe, ps, dataset=data)
cv_obj = proc_pipe.param_search['cv']
assert len(cv_obj.splits_vals) == 3
assert cv_obj.splits_vals.nunique() == 3
# Not a valid column error
pipe = ModelPipeline(param_search=ParamSearch(cv=CV(splits='6')))
with pytest.raises(KeyError):
proc_pipe = _preproc_pipeline(pipe, ps, dataset=data)
# Trigger role failure
pipe = ModelPipeline(param_search=ParamSearch(cv=CV(splits='3')))
with pytest.raises(RuntimeError):
proc_pipe = _preproc_pipeline(pipe, ps, dataset=data)
# Not category failure
pipe = ModelPipeline(param_search=ParamSearch(cv=CV(splits='5')))
with pytest.raises(RuntimeError):
proc_pipe = _preproc_pipeline(pipe, ps, dataset=data)
def test_get_estimator_simple_case():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = ModelPipeline(model='ridge', imputers='default',
scalers=None, verbose=1)
est = get_estimator(pipeline=pipe, dataset=data, problem_spec=ps)
# Should be BPt pipeline output
assert isinstance(est, BPtPipeline)
# Make sure verbose arg propergates
assert est.verbose == 1
# Should just be model
assert len(est.steps) == 1
model_name = est.steps[0][0]
assert isinstance(model_name, str)
# Should be regression ridge, so make sure
# this tests default ps steps too
scope_model = est.steps[0][1]
assert isinstance(scope_model, BPtModel)
model = scope_model.estimator
assert isinstance(model, Ridge)
def test_get_estimator_from_build_simple_case():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = ModelPipeline(model='ridge', imputers='default',
scalers=None, verbose=1)
est = pipe.build(dataset=data, problem_spec=ps)
# Should be BPt pipeline output
assert isinstance(est, BPtPipeline)
# Make sure verbose arg propergates
assert est.verbose == 1
# Should just be model
assert len(est.steps) == 1
model_name = est.steps[0][0]
assert isinstance(model_name, str)
# Should be regression ridge, so make sure
# this tests default ps steps too
scope_model = est.steps[0][1]
assert isinstance(scope_model, BPtModel)
model = scope_model.estimator
assert isinstance(model, Ridge)
def test_get_estimator_with_ng_search():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = ModelPipeline(model=Model('ridge', params=1), scalers=None,
param_search=ParamSearch('RandomSearch'))
search_est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
# Nevergrad cv
assert isinstance(search_est, NevergradSearchCV)
# Estimator should be pipeline, w/ ridge at last step
est = search_est.estimator
assert isinstance(est, BPtPipeline)
scope_model = est.steps[-1][1]
ridge = scope_model.estimator
assert isinstance(ridge, Ridge)
param_search = search_est.ps
assert isinstance(param_search['cv'], BPtCV)
assert param_search['search_type'] == 'RandomSearch'
param_dists = search_est.param_distributions
assert isinstance(param_dists, dict)
assert len(param_dists) > 0
def test_get_estimator_n_jobs():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = ModelPipeline(model=Model('random forest'), scalers=None)
est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
assert isinstance(est, BPtPipeline)
scope_model = est.steps[0][1]
assert isinstance(scope_model, BPtModel)
model = scope_model.estimator
assert isinstance(model, RandomForestRegressor)
assert model.n_jobs == 2
def test_get_estimator_extra_params():
ps = get_test_ps()
data = get_fake_dataset()
pipe = ModelPipeline(model=Model('ridge'), scalers=None)
est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps, model=Model('random forest'),
problem_type='binary')
assert isinstance(est, BPtPipeline)
scope_model = est.steps[0][1]
assert isinstance(scope_model, BPtModel)
model = scope_model.estimator
assert isinstance(model, RandomForestClassifier)
def test_get_estimator_extra_params_pipeline():
ps = get_test_ps()
data = get_fake_dataset()
pipe = Pipeline([Model('ridge')])
# Using Pipeline so should ignore Model
est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps, model=Model('random forest'),
problem_type='regression')
assert isinstance(est, BPtPipeline)
scope_model = est.steps[0][1]
assert isinstance(scope_model, BPtModel)
model = scope_model.estimator
assert isinstance(model, Ridge)
def test_get_estimator_n_jobs_ng():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = ModelPipeline(model=Model('random forest', params=1), scalers=None,
param_search=ParamSearch('RandomSearch'))
search_est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
assert isinstance(search_est, NevergradSearchCV)
est = search_est.estimator
assert isinstance(est, BPtPipeline)
scope_model = est.steps[0][1]
assert isinstance(scope_model, BPtModel)
model = scope_model.estimator
assert isinstance(model, RandomForestRegressor)
# Should be n_jobs 1 in model
assert model.n_jobs == 1
# and n_jobs 2 in nevergrad search cv
assert search_est.n_jobs == 2
def test_get_estimator_n_jobs_ng_pipeline():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = Pipeline(steps=[Model('random forest', params=1)],
param_search=ParamSearch('RandomSearch'))
search_est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
assert isinstance(search_est, NevergradSearchCV)
est = search_est.estimator
assert isinstance(est, BPtPipeline)
scope_model = est.steps[0][1]
assert isinstance(scope_model, BPtModel)
model = scope_model.estimator
assert isinstance(model, RandomForestRegressor)
# Should be n_jobs 1 in model
assert model.n_jobs == 1
# and n_jobs 2 in nevergrad search cv
assert search_est.n_jobs == 2
def test_get_estimator_with_scope():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = ModelPipeline(model=Model('ridge', scope='1'),
scalers=Scaler('robust', scope='float'))
est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
assert len(est.steps) == 2
scaler = est.steps[0][1]
assert isinstance(scaler, ScopeTransformer)
assert isinstance(scaler.estimator, RobustScaler)
assert scaler.inds is Ellipsis
model = est.steps[1][1]
assert isinstance(model, BPtModel)
assert isinstance(model.estimator, Ridge)
assert model.inds == [0]
def test_get_binary_estimator_default_ps():
data = get_fake_dataset()
data = data.binarize('3', threshold=8)
pipe = Model('random forest')
est = get_estimator(pipeline=pipe, dataset=data)
assert isinstance(est, BPtPipeline)
assert isinstance(est.steps[0][1], BPtModel)
assert isinstance(est.steps[0][1].estimator, RandomForestClassifier)
def test_get_param_wrapped_model():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = Model('random forest', params=1,
param_search=ParamSearch('RandomSearch'))
est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
assert isinstance(est, BPtPipeline)
assert len(est.steps) == 1
search_scope_est = est.steps[0][1]
assert isinstance(search_scope_est, BPtModel)
search_est = search_scope_est.estimator
assert isinstance(search_est, NevergradSearchCV)
param_search = search_est.ps
assert isinstance(param_search['cv'], BPtCV)
assert param_search['search_type'] == 'RandomSearch'
e = search_est.estimator
assert isinstance(e, RandomForestRegressor)
assert e.random_state == 1
param_dists = search_est.param_distributions
assert isinstance(param_dists, dict)
assert len(param_dists) > 0
def test_get_estimator_compare1():
ps = get_checked_ps()
data = get_fake_dataset()
pipe = Pipeline([
Model(obj=Compare(['random forest',
'ridge']))])
est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
assert isinstance(est, CompareDict)
assert isinstance(est["'random forest'"], BPtPipeline)
assert isinstance(est["random forest"], BPtPipeline)
def test_get_estimator_compare_fail():
ps = ProblemSpec(scope=Compare(['all', 'float']))
data = get_fake_dataset()
pipe = Pipeline([
Model(obj=Compare(['random forest',
'ridge']))])
with pytest.raises(RuntimeError):
get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
def test_get_estimator_compare_merge():
ps = get_checked_ps()
data = get_fake_dataset()
pipe1 = Pipeline([
Model(obj=Compare(['random forest',
'ridge']))])
pipe2 = Pipeline([
Model(obj=Compare(['elastic',
'ridge']))])
pipe = Compare([Option(pipe1, 'pipe1'),
Option(pipe2, 'pipe2')])
est = get_estimator(pipeline=pipe, dataset=data,
problem_spec=ps)
assert isinstance(est, CompareDict)
print(est)
assert len(est) == 4
# Make sure smart index work
assert len(est['pipe1']) == 2
assert len(est['pipe2']) == 2
p1 = est[Options(pipeline='pipe1',
steps__0__obj='random forest')]
assert isinstance(p1, BPtPipeline)
assert isinstance(p1.steps[-1][1].estimator, RandomForestRegressor)
assert p1.steps[-1][1].estimator.n_jobs == 2
assert p1.steps[-1][1].estimator.random_state == 1
p2 = est["pipeline=pipe1, steps__0__obj=ridge"]
assert isinstance(p2, BPtPipeline)
assert isinstance(p2.steps[-1][1].estimator, Ridge)
assert p2.steps[-1][1].estimator.random_state == 1
def test_get_estimator_pipeline_with_custom_steps_base():
ps = get_checked_ps()
data = get_fake_dataset()
trans = Transformer('one hot encoder', scope='all')
model = Ridge()
pipe = Pipeline(steps=[trans, model])
est = get_estimator(pipeline=pipe, dataset=data, problem_spec=ps)
assert isinstance(est, BPtPipeline)
assert isinstance(est.steps[1][1], Ridge)
def test_get_estimator_pipeline_with_custom_steps_naming():
ps = get_checked_ps()
data = get_fake_dataset()
scalers = [RobustScaler(), RobustScaler(), ('rs', RobustScaler())]
model = Ridge()
pipe = Pipeline(steps=scalers + [model])
est = get_estimator(pipeline=pipe, dataset=data, problem_spec=ps)
assert isinstance(est, BPtPipeline)
assert isinstance(est.steps[-1][1], Ridge)
r1 = est.steps[0][0]
r2 = est.steps[1][0]
r3 = est.steps[2][0]
assert r1 != r2
assert r1 != r3
assert r2 != r3
def test_get_estimator_stacking_default():
ps = get_checked_ps()
data = get_fake_dataset()
from ...default.pipelines import stacking_pipe
# Just want to make sure it doesn't break during construction
est = get_estimator(pipeline=stacking_pipe, dataset=data,
problem_spec=ps)
assert len(est.steps) == 5
# Test for breaking behavior because of duplicates, i.e. does
# uniquify work.
est = get_estimator(pipeline=stacking_pipe, dataset=data,
problem_spec=ps, problem_type='binary')
assert len(est.steps) == 5
def test_nested_pipelines():
ps = get_checked_ps()
data = get_fake_dataset()
pipe1 = Pipeline(steps=[Model('linear')])
pipe2 = Pipeline(steps=[pipe1])
est = get_estimator(pipeline=pipe2, dataset=data, problem_spec=ps)
assert isinstance(est, BPtPipeline)
# Make sure doesn't break on fit
X = np.ones((20, 20))
y = np.ones(20)
est.fit(X, y)
assert isinstance(est.steps[0][1], BPtPipeline)
def test_nested_pipelines_params():
ps = get_checked_ps()
data = get_fake_dataset()
pipe1 = Pipeline(steps=[Model('ridge', params=1)])
pipe2 = Pipeline(steps=[pipe1])
est = get_estimator(pipeline=pipe2, dataset=data, problem_spec=ps)
assert isinstance(est, BPtPipeline)
# Make sure doesn't break on fit
X = np.ones((20, 20))
y = np.ones(20)
est.fit(X, y)
assert not isinstance(est.steps[0][1], BPtPipeline)
assert isinstance(est.steps[0][1], BPtModel)
assert isinstance(est.steps[0][1].estimator, BPtPipeline)
| [
"sklearn.preprocessing.RobustScaler",
"sklearn.linear_model.Ridge",
"numpy.ones",
"pytest.raises"
] | [((20233, 20240), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (20238, 20240), False, 'from sklearn.linear_model import Ridge\n'), ((20644, 20651), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (20649, 20651), False, 'from sklearn.linear_model import Ridge\n'), ((21922, 21939), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (21929, 21939), True, 'import numpy as np\n'), ((21948, 21959), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (21955, 21959), True, 'import numpy as np\n'), ((22377, 22394), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (22384, 22394), True, 'import numpy as np\n'), ((22403, 22414), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (22410, 22414), True, 'import numpy as np\n'), ((3487, 3514), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3500, 3514), False, 'import pytest\n'), ((5431, 5454), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (5444, 5454), False, 'import pytest\n'), ((6071, 6096), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (6084, 6096), False, 'import pytest\n'), ((6171, 6196), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (6184, 6196), False, 'import pytest\n'), ((6269, 6294), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (6282, 6294), False, 'import pytest\n'), ((9980, 10003), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (9993, 10003), False, 'import pytest\n'), ((10174, 10201), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10187, 10201), False, 'import pytest\n'), ((10372, 10399), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10385, 10399), False, 'import pytest\n'), ((18740, 18767), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (18753, 18767), False, 'import pytest\n'), ((20576, 20590), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (20588, 20590), False, 'from sklearn.preprocessing import RobustScaler\n'), ((20592, 20606), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (20604, 20606), False, 'from sklearn.preprocessing import RobustScaler\n'), ((20615, 20629), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (20627, 20629), False, 'from sklearn.preprocessing import RobustScaler\n')] |
import numpy as np
import pandas as pd
def autocorr_single_tp(a: np.array, t: int) -> float:
"""Do autocorrelation for a single time point.
Parameters
----------
a : np.array
The array to correlate (complex or real number)
t : int
The distance (in the index)
Returns
-------
float
The autocorrelation as a real number.
"""
return np.real(np.sum(a[0] * np.conj(a[t])))
def autocorr(df: pd.DataFrame) -> pd.DataFrame:
"""Do autocorrelation for all possible time steps over all columns.
Parameters
----------
df : pd.DataFrame
The data frame to correlate
Returns
-------
pd.DataFrame
The resulting dataframe with timestep as index and one column named autocorr
"""
df_result = pd.DataFrame()
df_result['autocorr'] = [autocorr_single_tp(df.values, i) for i in range(df.shape[0])]
df_result.index.name = 'timestep'
return df_result
def fourier_transform(df: pd.DataFrame) -> pd.DataFrame:
"""Fourier transform a dataframe column-wise. The shape of the dataframe is
not changed, only the column names are appended with _ft.
Parameters
----------
df : pd.DataFrame
The dataframe to transform
Returns
-------
pd.DataFrame
The dataframe with the fourier transform of all columns (they are named {}_ft)
"""
df_result = df.apply(np.fft.fft)
df_result.index.name = 'frequency'
df_result.columns = [f'{c}_ft' for c in df_result.columns]
return df_result
| [
"pandas.DataFrame",
"numpy.conj"
] | [((798, 812), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (810, 812), True, 'import pandas as pd\n'), ((420, 433), 'numpy.conj', 'np.conj', (['a[t]'], {}), '(a[t])\n', (427, 433), True, 'import numpy as np\n')] |
import os
import numpy as np
import scipy.io
from sklearn.manifold import TSNE
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
import xgboost as xgb
import pickle
## Training
reals1 = np.load('realChinesesignetf95_features.npy')
forges1 = np.load('forgeChinesesignetf95_features.npy')
genuines1 = []
fakes1 = []
for i in range(len(reals1)):
genuines1.append(reals1[i].flatten())
for i in range(len(forges1)):
fakes1.append(forges1[i].flatten())
genuines1 = np.array(genuines1)
fakes1 = np.array(fakes1)
X = np.vstack([genuines1, fakes1])
y = np.hstack([np.ones((genuines1.shape[0],)), np.zeros((fakes1.shape[0],))])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
clf = xgb.XGBClassifier()
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
np.save('Dutchtrainy67.npy', y_train)
np.save('Dutchtesty33.npy', y_test)
trainprobabilities = clf.predict_proba(X_train)
testprobabilities = clf.predict_proba(X_test)
np.save('signetf95Dutchprobstrain67.npy', trainprobabilities)
np.save('signetf95Dutchprobstest33.npy', testprobabilities)
print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
| [
"numpy.ones",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"numpy.array",
"numpy.zeros",
"matplotlib.style.use",
"numpy.vstack",
"numpy.save",
"numpy.load",
"xgboost.XGBClassifier",
"sklearn.metrics.confusion_matrix"
] | [((289, 317), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (298, 317), False, 'from matplotlib import style\n'), ((377, 421), 'numpy.load', 'np.load', (['"""realChinesesignetf95_features.npy"""'], {}), "('realChinesesignetf95_features.npy')\n", (384, 421), True, 'import numpy as np\n'), ((432, 477), 'numpy.load', 'np.load', (['"""forgeChinesesignetf95_features.npy"""'], {}), "('forgeChinesesignetf95_features.npy')\n", (439, 477), True, 'import numpy as np\n'), ((662, 681), 'numpy.array', 'np.array', (['genuines1'], {}), '(genuines1)\n', (670, 681), True, 'import numpy as np\n'), ((691, 707), 'numpy.array', 'np.array', (['fakes1'], {}), '(fakes1)\n', (699, 707), True, 'import numpy as np\n'), ((713, 743), 'numpy.vstack', 'np.vstack', (['[genuines1, fakes1]'], {}), '([genuines1, fakes1])\n', (722, 743), True, 'import numpy as np\n'), ((859, 914), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(X, y, test_size=0.33, random_state=42)\n', (875, 914), False, 'from sklearn.model_selection import train_test_split\n'), ((922, 941), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (939, 941), True, 'import xgboost as xgb\n'), ((1005, 1042), 'numpy.save', 'np.save', (['"""Dutchtrainy67.npy"""', 'y_train'], {}), "('Dutchtrainy67.npy', y_train)\n", (1012, 1042), True, 'import numpy as np\n'), ((1043, 1078), 'numpy.save', 'np.save', (['"""Dutchtesty33.npy"""', 'y_test'], {}), "('Dutchtesty33.npy', y_test)\n", (1050, 1078), True, 'import numpy as np\n'), ((1174, 1235), 'numpy.save', 'np.save', (['"""signetf95Dutchprobstrain67.npy"""', 'trainprobabilities'], {}), "('signetf95Dutchprobstrain67.npy', trainprobabilities)\n", (1181, 1235), True, 'import numpy as np\n'), ((1236, 1295), 'numpy.save', 'np.save', (['"""signetf95Dutchprobstest33.npy"""', 'testprobabilities'], {}), "('signetf95Dutchprobstest33.npy', testprobabilities)\n", (1243, 1295), True, 'import numpy as np\n'), ((1303, 1340), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1319, 1340), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((1348, 1390), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1369, 1390), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((760, 790), 'numpy.ones', 'np.ones', (['(genuines1.shape[0],)'], {}), '((genuines1.shape[0],))\n', (767, 790), True, 'import numpy as np\n'), ((792, 820), 'numpy.zeros', 'np.zeros', (['(fakes1.shape[0],)'], {}), '((fakes1.shape[0],))\n', (800, 820), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from enrest.functions import run_test, get_threshold, get_deg_gene_ids, get_other_gene_ids_for_deg_case, split_scores_by_gene_ids
from enrest.parsers import matrices_parser, promoters_parser, read_set_of_genes
import enrest.speedup as sup
def work_with_matrix(name, pwm, pfm, matrix_length, all_ids, deg_table, promoters, parameter,
padj_thr, log2fc_thr_deg, log2fc_thr_background):
container = {'ALL': [], 'UP': [], 'DOWN': []}
all_scores = sup.scaner(promoters, pwm)
best_scores = np.max(all_scores, axis=1)
flatten_scores = all_scores.ravel()
flatten_scores = sup.sort(flatten_scores)
threshold_table = get_threshold(flatten_scores)
threshold_table = np.array(threshold_table)
fprs_table = threshold_table[:,1]
fprs_choosen = np.array([0.0005, 0.00015, 0.00005]) # LOW, MIDDLE, HIGH
indexes = np.searchsorted(fprs_table, fprs_choosen)
threshold_table = threshold_table[indexes]
for index, condition in enumerate(['ALL', 'UP', 'DOWN'], 1):
line = {('', 'ID'): name}
deg_ids = get_deg_gene_ids(deg_table, condition, padj_thr=padj_thr, log2fc_thr=log2fc_thr_deg)
other_ids = get_other_gene_ids_for_deg_case(deg_table, padj_thr=padj_thr, log2fc_thr=log2fc_thr_background)
if parameter == "enrichment":
deg_scores, other_scores, genes = split_scores_by_gene_ids(all_scores, all_ids, deg_ids, other_ids)
elif parameter == "fraction":
deg_scores, other_scores, genes = split_scores_by_gene_ids(best_scores, all_ids, deg_ids, other_ids)
results = run_test(genes, deg_scores, other_scores, threshold_table, parameter)
line.update(results)
container[condition] = line
return container
def deg_case(path_to_deg, path_to_db, output_dir, path_to_promoters,
file_format='meme', parameter='enrichment', padj_thr=0.05,
log2fc_thr_deg=1, log2fc_thr_background=np.log2(5/4)):
print('-'*30)
print('Read DEG table')
deg_table = pd.read_csv(path_to_deg, sep=',', comment='#')
deg_table = deg_table[deg_table['padj'] <= 1]
print('-'*30)
print('Read promoters')
promoters, all_ids = promoters_parser(path_to_promoters)
print('-'*30)
print('Read matrices')
matrices = matrices_parser(path_to_db, f=file_format)
number_of_matrices = len(matrices)
print(f'Number of matrices = {number_of_matrices}')
print('-'*30)
results = []
for index, matrix_data in enumerate(matrices, start=1):
name, pwm, pfm, matrix_length = matrix_data
print(f'{index}. {name}')
line = work_with_matrix(name, pwm, pfm, matrix_length,
all_ids, deg_table,
promoters, parameter, padj_thr,
log2fc_thr_deg, log2fc_thr_background)
results.append(line)
for index, condition in enumerate(['ALL', 'UP', 'DOWN'], 1):
container = [i[condition] for i in results]
df = pd.DataFrame(container, columns=container[0].keys())
condition = condition.lower()
output_path = f"{output_dir}/{condition}.tsv"
df.to_csv(output_path, sep='\t', index=False)
print('-'*30)
print('All done. Exit')
return None
| [
"enrest.functions.split_scores_by_gene_ids",
"pandas.read_csv",
"numpy.searchsorted",
"enrest.parsers.promoters_parser",
"enrest.functions.get_other_gene_ids_for_deg_case",
"enrest.functions.get_threshold",
"enrest.functions.get_deg_gene_ids",
"enrest.functions.run_test",
"numpy.max",
"numpy.array... | [((496, 522), 'enrest.speedup.scaner', 'sup.scaner', (['promoters', 'pwm'], {}), '(promoters, pwm)\n', (506, 522), True, 'import enrest.speedup as sup\n'), ((541, 567), 'numpy.max', 'np.max', (['all_scores'], {'axis': '(1)'}), '(all_scores, axis=1)\n', (547, 567), True, 'import numpy as np\n'), ((629, 653), 'enrest.speedup.sort', 'sup.sort', (['flatten_scores'], {}), '(flatten_scores)\n', (637, 653), True, 'import enrest.speedup as sup\n'), ((676, 705), 'enrest.functions.get_threshold', 'get_threshold', (['flatten_scores'], {}), '(flatten_scores)\n', (689, 705), False, 'from enrest.functions import run_test, get_threshold, get_deg_gene_ids, get_other_gene_ids_for_deg_case, split_scores_by_gene_ids\n'), ((728, 753), 'numpy.array', 'np.array', (['threshold_table'], {}), '(threshold_table)\n', (736, 753), True, 'import numpy as np\n'), ((811, 845), 'numpy.array', 'np.array', (['[0.0005, 0.00015, 5e-05]'], {}), '([0.0005, 0.00015, 5e-05])\n', (819, 845), True, 'import numpy as np\n'), ((882, 923), 'numpy.searchsorted', 'np.searchsorted', (['fprs_table', 'fprs_choosen'], {}), '(fprs_table, fprs_choosen)\n', (897, 923), True, 'import numpy as np\n'), ((1961, 1975), 'numpy.log2', 'np.log2', (['(5 / 4)'], {}), '(5 / 4)\n', (1968, 1975), True, 'import numpy as np\n'), ((2042, 2088), 'pandas.read_csv', 'pd.read_csv', (['path_to_deg'], {'sep': '""","""', 'comment': '"""#"""'}), "(path_to_deg, sep=',', comment='#')\n", (2053, 2088), True, 'import pandas as pd\n'), ((2210, 2245), 'enrest.parsers.promoters_parser', 'promoters_parser', (['path_to_promoters'], {}), '(path_to_promoters)\n', (2226, 2245), False, 'from enrest.parsers import matrices_parser, promoters_parser, read_set_of_genes\n'), ((2306, 2348), 'enrest.parsers.matrices_parser', 'matrices_parser', (['path_to_db'], {'f': 'file_format'}), '(path_to_db, f=file_format)\n', (2321, 2348), False, 'from enrest.parsers import matrices_parser, promoters_parser, read_set_of_genes\n'), ((1088, 1177), 'enrest.functions.get_deg_gene_ids', 'get_deg_gene_ids', (['deg_table', 'condition'], {'padj_thr': 'padj_thr', 'log2fc_thr': 'log2fc_thr_deg'}), '(deg_table, condition, padj_thr=padj_thr, log2fc_thr=\n log2fc_thr_deg)\n', (1104, 1177), False, 'from enrest.functions import run_test, get_threshold, get_deg_gene_ids, get_other_gene_ids_for_deg_case, split_scores_by_gene_ids\n'), ((1193, 1293), 'enrest.functions.get_other_gene_ids_for_deg_case', 'get_other_gene_ids_for_deg_case', (['deg_table'], {'padj_thr': 'padj_thr', 'log2fc_thr': 'log2fc_thr_background'}), '(deg_table, padj_thr=padj_thr, log2fc_thr=\n log2fc_thr_background)\n', (1224, 1293), False, 'from enrest.functions import run_test, get_threshold, get_deg_gene_ids, get_other_gene_ids_for_deg_case, split_scores_by_gene_ids\n'), ((1608, 1677), 'enrest.functions.run_test', 'run_test', (['genes', 'deg_scores', 'other_scores', 'threshold_table', 'parameter'], {}), '(genes, deg_scores, other_scores, threshold_table, parameter)\n', (1616, 1677), False, 'from enrest.functions import run_test, get_threshold, get_deg_gene_ids, get_other_gene_ids_for_deg_case, split_scores_by_gene_ids\n'), ((1373, 1438), 'enrest.functions.split_scores_by_gene_ids', 'split_scores_by_gene_ids', (['all_scores', 'all_ids', 'deg_ids', 'other_ids'], {}), '(all_scores, all_ids, deg_ids, other_ids)\n', (1397, 1438), False, 'from enrest.functions import run_test, get_threshold, get_deg_gene_ids, get_other_gene_ids_for_deg_case, split_scores_by_gene_ids\n'), ((1523, 1589), 'enrest.functions.split_scores_by_gene_ids', 'split_scores_by_gene_ids', (['best_scores', 'all_ids', 'deg_ids', 'other_ids'], {}), '(best_scores, all_ids, deg_ids, other_ids)\n', (1547, 1589), False, 'from enrest.functions import run_test, get_threshold, get_deg_gene_ids, get_other_gene_ids_for_deg_case, split_scores_by_gene_ids\n')] |
# ----------------------------------------------------------------- #
# This code was taken from github repo: #
# "Connectionist Temporal Classification (CTC) decoding algorithms" #
# developed by <NAME> #
# https://github.com/githubharald/CTCDecoder/ #
# Copyright (c) 2018 <NAME> #
# ----------------------------------------------------------------- #
from __future__ import division
from __future__ import print_function
from itertools import groupby
import numpy as np
def ctcBestPath(mat, classes):
"implements best path decoding as shown by Graves (Dissertation, p63)"
# get char indices along best path
best_path = np.argmax(mat, axis=1)
# collapse best path (using itertools.groupby), map to chars, join char list to string
blank_idx = 0 # index of CTC BLANK character (in original repo: blank_idx = len(classes))
best_chars_collapsed = [classes[k] for k, _ in groupby(best_path) if k != blank_idx]
res = ''.join(best_chars_collapsed)
return res | [
"itertools.groupby",
"numpy.argmax"
] | [((758, 780), 'numpy.argmax', 'np.argmax', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (767, 780), True, 'import numpy as np\n'), ((1022, 1040), 'itertools.groupby', 'groupby', (['best_path'], {}), '(best_path)\n', (1029, 1040), False, 'from itertools import groupby\n')] |
import argparse
import gc
import glob
import logging
import math
import os
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import data
import model_search_rnn as model
from architect_rnn import Architect
from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint
import utils
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='dataset/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=300,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=3,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='', help='experiment name')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_wdecay', type=float, default=1e-3,
help='weight decay for the architecture encoding alpha')
parser.add_argument('--arch_lr', type=float, default=3e-3,
help='learning rate for the architecture encoding alpha')
parser.add_argument('--crb', action='store_true', default=False, help='use CRB activation instead of softmax')
parser.add_argument('--rho', type=float, default=1e-1, help='admm/prox relative weight')
parser.add_argument('--init_alpha_threshold', type=float, default=1.0, help='initial alpha threshold')
parser.add_argument('--threshold_multiplier', type=float, default=1.05, help='threshold multiplier')
parser.add_argument('--schedfreq', type=float, default=1.0, help='w steps per each alpha step')
parser.add_argument('--ewma', type=float, default=1.0, help='weight for exp weighted moving average (1.0 for no ewma)')
parser.add_argument('--dyno_split', action='store_true', default=False,
help='use train/val split based on dynamic schedule')
parser.add_argument('--dyno_schedule', action='store_true', default=False, help='use dynamic schedule')
parser.add_argument('--reg', type=str, default='darts', help='reg/opt to use')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if len(args.save) == 0:
args.save = os.path.join(utils.get_dir(),
'exp/rnn-{}-{}'.format(os.getenv('SLURM_JOB_ID'), time.strftime("%Y%m%d-%H%M%S")))
else:
args.save = os.path.join(utils.get_dir(), 'exp', args.save)
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('src/*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled = True
torch.cuda.manual_seed_all(args.seed)
if args.dyno_schedule:
args.threshold_divider = np.exp(-np.log(args.threshold_multiplier) * args.schedfreq)
if args.dyno_split:
args.train_portion = 1 - 1 / (1 + args.schedfreq)
alpha_threshold = args.init_alpha_threshold
corpus = data.Corpus(os.path.join(utils.get_dir(), args.data))
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
search_data = batchify(corpus.valid, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
model = model.RNNModelSearch(args.crb, args.rho, args.ewma, args.reg, args.epochs, ntokens, args.emsize, args.nhid,
args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute)
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
logging.info('initial genotype:')
logging.info(model.genotype())
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
architect = Architect(parallel_model, args)
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss.item() / len(data_source)
def train(alpha_threshold):
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
arch_step = False
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
# seq_len = max(5, int(np.random.normal(bptt, 5)))
# # There's a very small chance that it could select a very long sequence length resulting in OOM
# seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
seq_len = int(bptt)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
print("alpha step: ", model.FI_ewma, alpha_threshold, end=" ")
if (not args.dyno_schedule and (batch + 1) % int(args.schedfreq) == 0) or (
args.dyno_schedule and 0.0 < model.FI_ewma < alpha_threshold):
data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args)
arch_step = True
if args.dyno_schedule:
alpha_threshold *= args.threshold_divider
elif args.dyno_schedule:
alpha_threshold *= args.threshold_multiplier
print(arch_step)
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
model.tick(1 / (len(train_data) // args.bptt))
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
if arch_step:
cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:,
start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
if arch_step:
hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id])
hidden_valid[s_id], grad_norm = architect.step(
hidden[s_id], cur_data, cur_targets,
hidden_valid[s_id], cur_data_valid, cur_targets_valid,
optimizer,
args.unrolled)
# assuming small_batch_size = batch_size so we don't accumulate gradients
optimizer.zero_grad()
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
model.track_FI()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
logging.info(parallel_model.genotype())
print(parallel_model.activate(parallel_model.weights))
cur_loss = total_loss.item() / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
return alpha_threshold
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
alpha_threshold = train(alpha_threshold)
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
genotype = model.genotype()
logging.info('genotype = %s', genotype)
f = open(os.path.join(args.save, 'genotype.txt'), "w")
f.write(str(genotype))
f.close()
best_val_loss.append(val_loss)
| [
"logging.getLogger",
"architect_rnn.Architect",
"numpy.log",
"utils_rnn.get_batch",
"torch.cuda.is_available",
"model_search_rnn.cuda",
"math.exp",
"logging.info",
"argparse.ArgumentParser",
"numpy.random.random",
"utils_rnn.save_checkpoint",
"model_search_rnn.RNNModelSearch",
"utils.get_dir... | [((404, 493), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch PennTreeBank/WikiText2 Language Model"""'}), "(description=\n 'PyTorch PennTreeBank/WikiText2 Language Model')\n", (427, 493), False, 'import argparse\n'), ((5783, 5894), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (5802, 5894), False, 'import logging\n'), ((6106, 6131), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (6120, 6131), True, 'import numpy as np\n'), ((6132, 6160), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6149, 6160), False, 'import torch\n'), ((6164, 6189), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6187, 6189), False, 'import torch\n'), ((6809, 6854), 'utils_rnn.batchify', 'batchify', (['corpus.train', 'args.batch_size', 'args'], {}), '(corpus.train, args.batch_size, args)\n', (6817, 6854), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((6869, 6914), 'utils_rnn.batchify', 'batchify', (['corpus.valid', 'args.batch_size', 'args'], {}), '(corpus.valid, args.batch_size, args)\n', (6877, 6914), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((6926, 6971), 'utils_rnn.batchify', 'batchify', (['corpus.valid', 'eval_batch_size', 'args'], {}), '(corpus.valid, eval_batch_size, args)\n', (6934, 6971), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((6984, 7028), 'utils_rnn.batchify', 'batchify', (['corpus.test', 'test_batch_size', 'args'], {}), '(corpus.test, test_batch_size, args)\n', (6992, 7028), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((7447, 7465), 'model_search_rnn.parameters', 'model.parameters', ([], {}), '()\n', (7463, 7465), True, 'import model_search_rnn as model\n'), ((7536, 7569), 'logging.info', 'logging.info', (['"""initial genotype:"""'], {}), "('initial genotype:')\n", (7548, 7569), False, 'import logging\n'), ((7795, 7826), 'architect_rnn.Architect', 'Architect', (['parallel_model', 'args'], {}), '(parallel_model, args)\n', (7804, 7826), False, 'from architect_rnn import Architect\n'), ((5935, 5969), 'os.path.join', 'os.path.join', (['args.save', '"""log.txt"""'], {}), "(args.save, 'log.txt')\n", (5947, 5969), False, 'import os\n'), ((5987, 6016), 'logging.Formatter', 'logging.Formatter', (['log_format'], {}), '(log_format)\n', (6004, 6016), False, 'import logging\n'), ((7165, 7370), 'model_search_rnn.RNNModelSearch', 'model.RNNModelSearch', (['args.crb', 'args.rho', 'args.ewma', 'args.reg', 'args.epochs', 'ntokens', 'args.emsize', 'args.nhid', 'args.nhidlast', 'args.dropout', 'args.dropouth', 'args.dropoutx', 'args.dropouti', 'args.dropoute'], {}), '(args.crb, args.rho, args.ewma, args.reg, args.epochs,\n ntokens, args.emsize, args.nhid, args.nhidlast, args.dropout, args.\n dropouth, args.dropoutx, args.dropouti, args.dropoute)\n', (7185, 7370), True, 'import model_search_rnn as model\n'), ((7583, 7599), 'model_search_rnn.genotype', 'model.genotype', ([], {}), '()\n', (7597, 7599), True, 'import model_search_rnn as model\n'), ((8098, 8110), 'model_search_rnn.eval', 'model.eval', ([], {}), '()\n', (8108, 8110), True, 'import model_search_rnn as model\n'), ((8180, 8209), 'model_search_rnn.init_hidden', 'model.init_hidden', (['batch_size'], {}), '(batch_size)\n', (8197, 8209), True, 'import model_search_rnn as model\n'), ((8880, 8891), 'time.time', 'time.time', ([], {}), '()\n', (8889, 8891), False, 'import time\n'), ((14345, 14356), 'time.time', 'time.time', ([], {}), '()\n', (14354, 14356), False, 'import time\n'), ((14458, 14480), 'logging.info', 'logging.info', (["('-' * 89)"], {}), "('-' * 89)\n", (14470, 14480), False, 'import logging\n'), ((14726, 14748), 'logging.info', 'logging.info', (["('-' * 89)"], {}), "('-' * 89)\n", (14738, 14748), False, 'import logging\n'), ((5473, 5488), 'utils.get_dir', 'utils.get_dir', ([], {}), '()\n', (5486, 5488), False, 'import utils\n'), ((5637, 5652), 'utils.get_dir', 'utils.get_dir', ([], {}), '()\n', (5650, 5652), False, 'import utils\n'), ((5720, 5741), 'glob.glob', 'glob.glob', (['"""src/*.py"""'], {}), "('src/*.py')\n", (5729, 5741), False, 'import glob\n'), ((6018, 6037), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6035, 6037), False, 'import logging\n'), ((6320, 6351), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (6341, 6351), False, 'import torch\n'), ((6420, 6457), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (6446, 6457), False, 'import torch\n'), ((6724, 6739), 'utils.get_dir', 'utils.get_dir', ([], {}), '()\n', (6737, 6739), False, 'import utils\n'), ((7110, 7145), 'os.path.join', 'os.path.join', (['args.save', '"""model.pt"""'], {}), "(args.save, 'model.pt')\n", (7122, 7145), False, 'import os\n'), ((7665, 7677), 'model_search_rnn.cuda', 'model.cuda', ([], {}), '()\n', (7675, 7677), True, 'import model_search_rnn as model\n'), ((8293, 8341), 'utils_rnn.get_batch', 'get_batch', (['data_source', 'i', 'args'], {'evaluation': '(True)'}), '(data_source, i, args, evaluation=True)\n', (8302, 8341), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((8581, 8605), 'utils_rnn.repackage_hidden', 'repackage_hidden', (['hidden'], {}), '(hidden)\n', (8597, 8605), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((8943, 8983), 'model_search_rnn.init_hidden', 'model.init_hidden', (['args.small_batch_size'], {}), '(args.small_batch_size)\n', (8960, 8983), True, 'import model_search_rnn as model\n'), ((9062, 9102), 'model_search_rnn.init_hidden', 'model.init_hidden', (['args.small_batch_size'], {}), '(args.small_batch_size)\n', (9079, 9102), True, 'import model_search_rnn as model\n'), ((9769, 9782), 'model_search_rnn.train', 'model.train', ([], {}), '()\n', (9780, 9782), True, 'import model_search_rnn as model\n'), ((10380, 10427), 'utils_rnn.get_batch', 'get_batch', (['train_data', 'i', 'args'], {'seq_len': 'seq_len'}), '(train_data, i, args, seq_len=seq_len)\n', (10389, 10427), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((13831, 13870), 'os.path.join', 'os.path.join', (['args.save', '"""optimizer.pt"""'], {}), "(args.save, 'optimizer.pt')\n", (13843, 13870), False, 'import os\n'), ((14223, 14241), 'model_search_rnn.parameters', 'model.parameters', ([], {}), '()\n', (14239, 14241), True, 'import model_search_rnn as model\n'), ((14789, 14840), 'utils_rnn.save_checkpoint', 'save_checkpoint', (['model', 'optimizer', 'epoch', 'args.save'], {}), '(model, optimizer, epoch, args.save)\n', (14804, 14840), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((14849, 14879), 'logging.info', 'logging.info', (['"""Saving Normal!"""'], {}), "('Saving Normal!')\n", (14861, 14879), False, 'import logging\n'), ((14931, 14947), 'model_search_rnn.genotype', 'model.genotype', ([], {}), '()\n', (14945, 14947), True, 'import model_search_rnn as model\n'), ((14956, 14995), 'logging.info', 'logging.info', (['"""genotype = %s"""', 'genotype'], {}), "('genotype = %s', genotype)\n", (14968, 14995), False, 'import logging\n'), ((5542, 5567), 'os.getenv', 'os.getenv', (['"""SLURM_JOB_ID"""'], {}), "('SLURM_JOB_ID')\n", (5551, 5567), False, 'import os\n'), ((5569, 5599), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (5582, 5599), False, 'import time\n'), ((7874, 7892), 'model_search_rnn.parameters', 'model.parameters', ([], {}), '()\n', (7890, 7892), True, 'import model_search_rnn as model\n'), ((11179, 11209), 'utils_rnn.repackage_hidden', 'repackage_hidden', (['hidden[s_id]'], {}), '(hidden[s_id])\n', (11195, 11209), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((11721, 11751), 'utils_rnn.repackage_hidden', 'repackage_hidden', (['hidden[s_id]'], {}), '(hidden[s_id])\n', (11737, 11751), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((12531, 12547), 'model_search_rnn.track_FI', 'model.track_FI', ([], {}), '()\n', (12545, 12547), True, 'import model_search_rnn as model\n'), ((12656, 12668), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12666, 12668), False, 'import gc\n'), ((12789, 12807), 'model_search_rnn.parameters', 'model.parameters', ([], {}), '()\n', (12805, 12807), True, 'import model_search_rnn as model\n'), ((13617, 13628), 'time.time', 'time.time', ([], {}), '()\n', (13626, 13628), False, 'import time\n'), ((13960, 13978), 'model_search_rnn.parameters', 'model.parameters', ([], {}), '()\n', (13976, 13978), True, 'import model_search_rnn as model\n'), ((14080, 14098), 'model_search_rnn.parameters', 'model.parameters', ([], {}), '()\n', (14096, 14098), True, 'import model_search_rnn as model\n'), ((14701, 14719), 'math.exp', 'math.exp', (['val_loss'], {}), '(val_loss)\n', (14709, 14719), False, 'import math\n'), ((15014, 15053), 'os.path.join', 'os.path.join', (['args.save', '"""genotype.txt"""'], {}), "(args.save, 'genotype.txt')\n", (15026, 15053), False, 'import os\n'), ((6519, 6552), 'numpy.log', 'np.log', (['args.threshold_multiplier'], {}), '(args.threshold_multiplier)\n', (6525, 6552), True, 'import numpy as np\n'), ((7713, 7742), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {'dim': '(1)'}), '(model, dim=1)\n', (7728, 7742), True, 'import torch.nn as nn\n'), ((9273, 9291), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (9289, 9291), True, 'import numpy as np\n'), ((11273, 11309), 'utils_rnn.repackage_hidden', 'repackage_hidden', (['hidden_valid[s_id]'], {}), '(hidden_valid[s_id])\n', (11289, 11309), False, 'from utils_rnn import batchify, get_batch, repackage_hidden, save_checkpoint\n'), ((13189, 13200), 'time.time', 'time.time', ([], {}), '()\n', (13198, 13200), False, 'import time\n'), ((14614, 14625), 'time.time', 'time.time', ([], {}), '()\n', (14623, 14625), False, 'import time\n'), ((13544, 13562), 'math.exp', 'math.exp', (['cur_loss'], {}), '(cur_loss)\n', (13552, 13562), False, 'import math\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import joblib
from utils import normalize_MPU9250_data, split_df, get_intervals_from_moments, EventIntervals
from GeneralAnalyser import GeneralAnalyser, plot_measurements
# plt.interactive(True)
pd.options.display.max_columns = 15
pic_prefix = 'pic/'
# data_path = 'data/CSV'
# data_path = 'Anonimised Data/Data'
# sessions_dict = joblib.load('data/sessions_dict')
sessions_dict = joblib.load('data/sessions_dict')
gamedata_dict = joblib.load('data/gamedata_dict')
sensors_columns_dict = {
'hrm': ['hrm'],
'envibox': ['als', 'mic', 'humidity', 'temperature', 'co2'],
'datalog': ['hrm2', 'resistance', 'muscle_activity']
}
sensors_list = list(sensors_columns_dict.keys())
sensors_columns_list = []
for session_id, session_data_dict in sessions_dict.items():
df_dict = {}
if not set(sensors_list).issubset(set(session_data_dict.keys())):
continue
if session_id not in gamedata_dict:
continue
df_discretized_list = []
for sensor_name in sensors_columns_dict:
df = session_data_dict[sensor_name]
df = df.set_index(pd.DatetimeIndex(pd.to_datetime(df['time'], unit='s')))
df_discretized = df.resample('100ms').mean().ffill() # Forward fill is better
df_discretized_list.append(df_discretized)
moments_kills = gamedata_dict[session_id]['times_kills']
moments_death = gamedata_dict[session_id]['times_is_killed']
duration = 1
intervals_shootout = gamedata_dict[session_id]['shootout_times_start_end']
intervals_kills = get_intervals_from_moments(moments_kills, interval_start=-duration, interval_end=duration)
intervals_death = get_intervals_from_moments(moments_death, interval_start=-duration, interval_end=duration)
event_intervals_shootout = EventIntervals(intervals_list=intervals_shootout, label='shootouts', color='blue')
event_intervals_kills = EventIntervals(intervals_list=intervals_kills, label='kills', color='green')
event_intervals_death = EventIntervals(intervals_list=intervals_death, label='deaths', color='red')
def discretize_time_column(time_column, discretization=0.1):
time_column_discretized = time_column - time_column % discretization
return time_column_discretized
def auxilary_discretization_table(time_column, discretization):
time_column_discretized = discretize_time_column(df['time'], discretization)
timesteps = np.arange(0, time_column_discretized.max() + discretization, discretization)
'''
If there are several records to one timestep => select the earliest
If there isn't any records to one timestep => select the latest available
'''
pd.PeriodIndex(df['time'])
pd.TimedeltaIndex(df['time'])
df = df.set_index()
nano = df.resample('1ns')
nano.sample()
event_intervals_shootout.intervals_list
time_column_discretized = df['time'] - df['time'] % 0.1
time_column_discretized = pd.Series(np.arange(0, 180, 0.1))
game_mask = event_intervals_shootout.get_mask_intervals_union(time_column_discretized)
game_mask = 1 * game_mask
df_merged = pd.concat(df_discretized_list, axis=1)
df_merged = df_merged.ffill().bfill()
df_merged = df_merged.drop(['time'], axis=1)
df_merged = df_merged.reset_index(drop=True)
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
train = ss.fit_transform(df_merged)
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.nn.utils.rnn import pack_padded_sequence, pack_sequence
input_size = train.shape[1]
hidden_size = input_size
lstm = nn.LSTM(input_size, hidden_size)
opt = Adam(lstm.parameters())
from torch.utils.data import TensorDataset, DataLoader
train = torch.Tensor(train)
target = torch.Tensor(game_mask).long()
dataset = TensorDataset(train, target)
data_loader = DataLoader(dataset, batch_size=8)
pack_padded_sequence(train)
list(pack_sequence(train))[0].shape
for x_batch, y_batch in data_loader:
lstm(x_batch)
| [
"torch.utils.data.DataLoader",
"torch.nn.LSTM",
"joblib.load",
"utils.get_intervals_from_moments",
"torch.nn.utils.rnn.pack_sequence",
"torch.Tensor",
"pandas.to_datetime",
"torch.utils.data.TensorDataset",
"sklearn.preprocessing.StandardScaler",
"torch.nn.utils.rnn.pack_padded_sequence",
"panda... | [((465, 498), 'joblib.load', 'joblib.load', (['"""data/sessions_dict"""'], {}), "('data/sessions_dict')\n", (476, 498), False, 'import joblib\n'), ((515, 548), 'joblib.load', 'joblib.load', (['"""data/gamedata_dict"""'], {}), "('data/gamedata_dict')\n", (526, 548), False, 'import joblib\n'), ((2745, 2771), 'pandas.PeriodIndex', 'pd.PeriodIndex', (["df['time']"], {}), "(df['time'])\n", (2759, 2771), True, 'import pandas as pd\n'), ((2772, 2801), 'pandas.TimedeltaIndex', 'pd.TimedeltaIndex', (["df['time']"], {}), "(df['time'])\n", (2789, 2801), True, 'import pandas as pd\n'), ((3148, 3186), 'pandas.concat', 'pd.concat', (['df_discretized_list'], {'axis': '(1)'}), '(df_discretized_list, axis=1)\n', (3157, 3186), True, 'import pandas as pd\n'), ((3370, 3386), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3384, 3386), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3626, 3658), 'torch.nn.LSTM', 'nn.LSTM', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (3633, 3658), True, 'import torch.nn as nn\n'), ((3754, 3773), 'torch.Tensor', 'torch.Tensor', (['train'], {}), '(train)\n', (3766, 3773), False, 'import torch\n'), ((3825, 3853), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train', 'target'], {}), '(train, target)\n', (3838, 3853), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((3868, 3901), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(8)'}), '(dataset, batch_size=8)\n', (3878, 3901), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((3903, 3930), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['train'], {}), '(train)\n', (3923, 3930), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pack_sequence\n'), ((2995, 3017), 'numpy.arange', 'np.arange', (['(0)', '(180)', '(0.1)'], {}), '(0, 180, 0.1)\n', (3004, 3017), True, 'import numpy as np\n'), ((1624, 1718), 'utils.get_intervals_from_moments', 'get_intervals_from_moments', (['moments_kills'], {'interval_start': '(-duration)', 'interval_end': 'duration'}), '(moments_kills, interval_start=-duration,\n interval_end=duration)\n', (1650, 1718), False, 'from utils import normalize_MPU9250_data, split_df, get_intervals_from_moments, EventIntervals\n'), ((1741, 1835), 'utils.get_intervals_from_moments', 'get_intervals_from_moments', (['moments_death'], {'interval_start': '(-duration)', 'interval_end': 'duration'}), '(moments_death, interval_start=-duration,\n interval_end=duration)\n', (1767, 1835), False, 'from utils import normalize_MPU9250_data, split_df, get_intervals_from_moments, EventIntervals\n'), ((1868, 1955), 'utils.EventIntervals', 'EventIntervals', ([], {'intervals_list': 'intervals_shootout', 'label': '"""shootouts"""', 'color': '"""blue"""'}), "(intervals_list=intervals_shootout, label='shootouts', color=\n 'blue')\n", (1882, 1955), False, 'from utils import normalize_MPU9250_data, split_df, get_intervals_from_moments, EventIntervals\n'), ((1983, 2059), 'utils.EventIntervals', 'EventIntervals', ([], {'intervals_list': 'intervals_kills', 'label': '"""kills"""', 'color': '"""green"""'}), "(intervals_list=intervals_kills, label='kills', color='green')\n", (1997, 2059), False, 'from utils import normalize_MPU9250_data, split_df, get_intervals_from_moments, EventIntervals\n'), ((2092, 2167), 'utils.EventIntervals', 'EventIntervals', ([], {'intervals_list': 'intervals_death', 'label': '"""deaths"""', 'color': '"""red"""'}), "(intervals_list=intervals_death, label='deaths', color='red')\n", (2106, 2167), False, 'from utils import normalize_MPU9250_data, split_df, get_intervals_from_moments, EventIntervals\n'), ((3783, 3806), 'torch.Tensor', 'torch.Tensor', (['game_mask'], {}), '(game_mask)\n', (3795, 3806), False, 'import torch\n'), ((3936, 3956), 'torch.nn.utils.rnn.pack_sequence', 'pack_sequence', (['train'], {}), '(train)\n', (3949, 3956), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pack_sequence\n'), ((1182, 1218), 'pandas.to_datetime', 'pd.to_datetime', (["df['time']"], {'unit': '"""s"""'}), "(df['time'], unit='s')\n", (1196, 1218), True, 'import pandas as pd\n')] |
import pandas as pd
import numpy as np
import sys
import glob
import os
import re
import Bio.PDB.PDBParser
import warnings
import math
warnings.filterwarnings("ignore", message="Used element '.' for Atom")
levels = ["class", "arch", "topo", "superfam"]
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--cath-filename",
help="CATH domain list input file")
parser.add_argument("--pdb-dir",
help="PDB directory")
parser.add_argument("--n-splits", default=10, type=int,
help="Number of splits (default: %(default)s)")
parser.add_argument("--atom-selector-regexp", default="CA",
help="Atom selector (default: %(default)s)")
parser.add_argument("--max-distance", default=50.0,
help="Maximum distance from atom to center of mass (default: %(default)s)")
parser.add_argument("--extract-at-level", default="arch",
help="Which CATH-level to use, i.e. class, arch, topo, or superfam (default: %(default)s)")
parser.add_argument("--sub-category-level", default="superfam",
help="Which CATH-level to use, i.e. class, arch, topo, or superfam (default: %(default)s)")
parser.add_argument("--min-size", default=500, type=int,
help="Minimum number of elements in category in order for it to be included (default: %(default)s)")
parser.add_argument("--min-resolution", default=3.5, type=float,
help="The minimum resolution for entries to be included (note that the resolution grows when this number drops) (default: %(default)s)")
parser.add_argument("--print-group-sizes-only", default=False, action="store_true",
help="Just print out the unfiltered group sizes - useful for deciding on a min-size value (default: %(default)s)")
args = parser.parse_args()
print("# Arguments")
for key, value in sorted(vars(args).items()):
print(key, "=", value)
extract_at_level = levels.index(args.extract_at_level)+1
sub_category_col_range = list(range(1, levels.index(args.sub_category_level)+1+1)) # add one offset and one because end is excluded
# Read data into pandas dataframe
data = pd.read_csv(args.cath_filename, sep='\s+', header=None, usecols=[0,1,2,3,4,11], comment="#")
# Iterate over PDB files, and use IDs to filter dataframe
pdb_filenames = glob.glob(os.path.join(args.pdb_dir, "*"))
pdb_ids = [os.path.basename(name) for name in pdb_filenames]
data = data[data[0].isin(pdb_ids)]
data = data.reset_index(drop=True)
print ("Processing PDB files for distance to CM")
# Create Bio.PDB parser object
pdb_parser = Bio.PDB.PDBParser()
# Iterate over PDB files, and calculate distance from center of mass. Add as additional column
data['dist'] = 0
max_distance_to_cm = np.zeros(len(data))
# import pickle
# max_distance_to_cm = pickle.load(open('max_distance_to_cm.pickle', 'rb'))
for index, row in data.iterrows():
# Extract CATH classification and PDB ID
cath_id = row[0]
pdb_filename = os.path.join(args.pdb_dir, cath_id)
print("%d/%d:" % (index,len(data)), pdb_filename)
# Parse structure
structure = pdb_parser.get_structure(pdb_filename, pdb_filename)
positions_all_atoms = []
# Retrieve all atom coordinates
for atom in structure.get_atoms():
# The all-atom selection is used to calculate distance from center of mass
positions_all_atoms.append(atom.get_coord())
# Translate to center of mass
positions_all_atoms = np.array(positions_all_atoms)
positions_all_atoms = positions_all_atoms - np.mean(positions_all_atoms, axis=0)
max_distance_to_cm[index] = np.max(np.linalg.norm(positions_all_atoms, axis=1))
data = data.assign(dist=max_distance_to_cm)
# Group dataframe by [class, architecture] levels
minimum_group_len = None
for name, group in data.groupby(list(range(1,extract_at_level+1))):
# Select elements with at least min_arch_size structures with resolution at least min_resolution (note higher resolution is smaller number)
# and with specified maximum distance to CM.
if len(group[np.logical_and(group[11] < args.min_resolution, group['dist'] < args.max_distance)]) > args.min_size:
# Print entry and size
if args.print_group_sizes_only:
print(name, len(group))
continue
# Group by all subcategories (i.e., to superfamily level)
sub_category_groups = group[np.logical_and(group[11] < args.min_resolution, group['dist'] < args.max_distance)].groupby(sub_category_col_range)
# Calculate group length by summing subgroup lengths
group_len = np.sum([len(g[1]) for g in sub_category_groups])
# Update minimum group length
minimum_group_len = group_len if minimum_group_len is None else min(minimum_group_len, group_len)
groups_reduced = []
for name, group in data.groupby(list(range(1,extract_at_level+1))):
# Select elements with at least min_arch_size structures with resolution at least min_resolution (note higher resolution is smaller number)
# and with specified maximum distance to CM.
if len(group[np.logical_and(group[11] < args.min_resolution, group['dist'] < args.max_distance)]) > args.min_size:
# Reduce to the minimum number
n_entries = minimum_group_len
# Reduce to minimum number of elements, but attempting to select evenly from superfamilies
group_reduced = []
# Group by all subcategories (i.e., to superfamily level)
sub_category_groups = group[np.logical_and(group[11] < args.min_resolution, group['dist'] < args.max_distance)].groupby(sub_category_col_range)
# Skip if there is only one subcategory (this violates the constraint that all splits should have all groups represented)
if len(sub_category_groups) < args.n_splits:
continue
# Keep track of numbers of entries added so far
n_added_entries = 0
# print(name)
# We now reduce to the limit. Rather than taking arbitrary members, we try to sample as uniformly
# as possible among the members of the subcategory level
# Iterate over sub_category groups - sorted by length - smallest groups first
for i, group_inner_pair in enumerate(sorted(sub_category_groups, key=lambda k:len(k[1]))):
name_inner, group_inner = group_inner_pair
# Calculate how much we are allowed to include. This is simply a matter of spreading
# what remains evenly over the remaining iterations
inclusion_size = int(math.ceil((n_entries - n_added_entries) / (len(sub_category_groups) - i)))
included_entries = group_inner.sort_values([11])[:inclusion_size]
group_reduced.append(included_entries)
n_added_entries += len(included_entries)
# print(name_inner, inclusion_size, len(group_inner.sort_values([11])))
# print("\t", name_inner)
# print("\t", len(included_entries), n_added_entries, inclusion_size, len(group_inner))
# Finally, append added entries to form new dataframe
groups_reduced.append(pd.concat(group_reduced))
# print(n_added_entries, n_entries, minimum_group_len)
assert(n_added_entries == n_entries)
if args.print_group_sizes_only:
sys.exit()
# Merge list of groups into single data frame
data_reduced = pd.concat(groups_reduced)
# Reset index
data_reduced = data_reduced.reset_index(drop=True)
# Create splits by iterating over all sub-categories in sorted order (largest
# first), and adding each sub-category to the split which currently has fewest
# elements. In addition, we keep track of balancing the corresponding main
# categories, so that a split can only receive a sub-category if it does not
# already contain another sub-category from the same category. This counter is
# reset every time all splits have sub-categories from all categories.
# This is the reason that the total number of elements in each split is not the
# same over all splits
# Create splits
splits = [[[],{}] for i in range(args.n_splits)]
# Collect sub_categories across all categories
sub_categories = []
n_categories = len(list(data_reduced.groupby(list(range(1,extract_at_level+1)))))
for name, group in data_reduced.groupby(list(range(1,extract_at_level+1))):
for name_inner, group_inner in group.groupby(sub_category_col_range):
sub_categories.append((name, group_inner))
# Sort them by number of elements
sub_categories.sort(key=lambda k: len(k[1]), reverse=True)
print("n_categories: ", n_categories)
# Iterate over sorted sub_categories list
for j, pair in enumerate(sub_categories):
category_id, sub_category = pair
# Iterate over splits until split is found in which the
# category corresponding to this sub-category is not yet present
# if no suitable entry is found, use the first (smallest)
split_index = 0
for i, split_pair in enumerate(splits):
split, split_category_ids = split_pair
if category_id not in split_category_ids:
split_index = i
break
# Add indices as first element to the splits array
split, split_category_ids = splits[split_index]
split += sub_category.index.values.tolist()
# Register category in split
split_category_ids[category_id] = True
# Reset if all splits have seen all categories
splits_all_complete = True
for split, split_category_ids in splits:
splits_all_complete = splits_all_complete and len(split_category_ids) == n_categories
if splits_all_complete:
for i, _ in enumerate(splits):
splits[i][1] = {}
# Sort array so that smallest entrt appears first
splits.sort(key=lambda k:len(k[0]))
# print([len(v[0]) for v in splits])
# print([(len(v[0]),list(v[1].keys())) for v in splits])
# Throw out category id from splits
splits = [v[0] for v in splits]
print("Split distribution: ", [len(split) for split in splits])
# Reorder entries in data frame so that they follow the split division
# (this allows us to keep track of each split using only a start index)
data_reduced_reordered = []
split_start_indices = []
for i, split in enumerate(splits):
data_reduced_reordered.append(data_reduced.iloc[split])
if i==0:
split_start_indices.append(0)
else:
split_start_indices.append(split_start_indices[-1]+len(splits[i-1]))
data_reduced = pd.concat(data_reduced_reordered)
data_reduced = data_reduced.reset_index(drop=True)
# Check that members of a sub_category always end in the same split and that all splits contain all main categories
split_ids = {}
print("Split start indices: ", split_start_indices)
for split_index, split_pair in enumerate(zip(split_start_indices[0:], split_start_indices[1:]+[None])):
sub_category_ids = {}
# Check that sub_categories always end up in the same split
split_start, split_end = split_pair
for index, row in data_reduced.iloc[split_start:split_end].iterrows():
cath_id = row[1], row[2], row[3], row[4]
if cath_id not in split_ids:
split_ids[cath_id] = split_index
else:
assert split_ids[cath_id] == split_index
# Check that each split contains all main categories
assert len(data_reduced.iloc[split_start:split_end].groupby(list(range(1,extract_at_level+1)))) == n_categories
print ("Processing PDB files...")
positions = []
n_atoms = []
atom_types = []
res_indices = []
labels = []
atom_selector_regexp = re.compile(args.atom_selector_regexp)
for index, row in data_reduced.iterrows():
# Extract CATH classification and PDB ID
cath_id = row[0]
cath_classification = row[1], row[2], row[3], row[4]
pdb_filename = os.path.join(args.pdb_dir, cath_id)
print(index, pdb_filename)
# Parse structure
structure = pdb_parser.get_structure(pdb_filename, pdb_filename)
positions_all_atoms = []
positions_tmp = []
atom_types_tmp = []
res_indices_tmp = []
# Retrieve all atom coordinates
for atom in structure.get_atoms():
# The all-atom selection is used to calculate distance from center of mass
positions_all_atoms.append(atom.get_coord())
# filter with atom selection
match = atom_selector_regexp.match(atom.id)
if match and len(match.group(0)) == len(atom.id):
positions_tmp.append(atom.get_coord())
atom_types_tmp.append(match.group(0))
# assert(match.group(0) == "CA")
res_indices_tmp.append(int(atom.get_parent().id[1]))
# Translate to center of mass
positions_tmp = np.array(positions_tmp)
positions_tmp = positions_tmp - np.mean(positions_tmp, axis=0)
positions_all_atoms = np.array(positions_all_atoms)
positions_all_atoms = positions_all_atoms - np.mean(positions_all_atoms, axis=0)
# Check that all PDBs have max distance to center of mass within limit
assert np.max(np.linalg.norm(positions_all_atoms, axis=1)) < args.max_distance
positions.append(positions_tmp)
n_atoms.append(len(positions_tmp))
atom_types.append(atom_types_tmp)
res_indices.append(res_indices_tmp)
labels.append(cath_classification[:extract_at_level])
# Translate positions to numpy array, by finding maximum number of elements
max_n_atoms = max([len(pos) for pos in positions])
positions_array = np.zeros([len(positions), max_n_atoms, 3])
atom_types_array = np.empty([len(positions), max_n_atoms], dtype='S10')
atom_types_array[:] = ""
res_indices_array = np.zeros([len(positions), max_n_atoms])-1
for i, pos in enumerate(positions):
positions_array[i, :n_atoms[i]] = pos
atom_types_array[i, :n_atoms[i]] = atom_types[i]
res_indices_array[i, :n_atoms[i]] = res_indices[i]
# Save features
print(len(set(labels)))
np.savez_compressed("cath_%d%s_%s"%(len(set(labels)), args.extract_at_level, args.atom_selector_regexp.replace("|","")),
n_atoms=np.array(n_atoms),
atom_types=atom_types_array,
res_indices=res_indices_array,
positions=positions_array,
labels=np.array(labels),
split_start_indices=np.array(split_start_indices))
# print(len(data_reduced))
| [
"numpy.mean",
"argparse.ArgumentParser",
"re.compile",
"pandas.read_csv",
"numpy.logical_and",
"os.path.join",
"numpy.linalg.norm",
"numpy.array",
"os.path.basename",
"sys.exit",
"pandas.concat",
"warnings.filterwarnings"
] | [((135, 205), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""Used element \'.\' for Atom"""'}), '(\'ignore\', message="Used element \'.\' for Atom")\n', (158, 205), False, 'import warnings\n'), ((280, 305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (303, 305), False, 'import argparse\n'), ((2197, 2300), 'pandas.read_csv', 'pd.read_csv', (['args.cath_filename'], {'sep': '"""\\\\s+"""', 'header': 'None', 'usecols': '[0, 1, 2, 3, 4, 11]', 'comment': '"""#"""'}), "(args.cath_filename, sep='\\\\s+', header=None, usecols=[0, 1, 2, \n 3, 4, 11], comment='#')\n", (2208, 2300), True, 'import pandas as pd\n'), ((7474, 7499), 'pandas.concat', 'pd.concat', (['groups_reduced'], {}), '(groups_reduced)\n', (7483, 7499), True, 'import pandas as pd\n'), ((10561, 10594), 'pandas.concat', 'pd.concat', (['data_reduced_reordered'], {}), '(data_reduced_reordered)\n', (10570, 10594), True, 'import pandas as pd\n'), ((11665, 11702), 're.compile', 're.compile', (['args.atom_selector_regexp'], {}), '(args.atom_selector_regexp)\n', (11675, 11702), False, 'import re\n'), ((2375, 2406), 'os.path.join', 'os.path.join', (['args.pdb_dir', '"""*"""'], {}), "(args.pdb_dir, '*')\n", (2387, 2406), False, 'import os\n'), ((2419, 2441), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (2435, 2441), False, 'import os\n'), ((3021, 3056), 'os.path.join', 'os.path.join', (['args.pdb_dir', 'cath_id'], {}), '(args.pdb_dir, cath_id)\n', (3033, 3056), False, 'import os\n'), ((3506, 3535), 'numpy.array', 'np.array', (['positions_all_atoms'], {}), '(positions_all_atoms)\n', (3514, 3535), True, 'import numpy as np\n'), ((7397, 7407), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7405, 7407), False, 'import sys\n'), ((11889, 11924), 'os.path.join', 'os.path.join', (['args.pdb_dir', 'cath_id'], {}), '(args.pdb_dir, cath_id)\n', (11901, 11924), False, 'import os\n'), ((12776, 12799), 'numpy.array', 'np.array', (['positions_tmp'], {}), '(positions_tmp)\n', (12784, 12799), True, 'import numpy as np\n'), ((12893, 12922), 'numpy.array', 'np.array', (['positions_all_atoms'], {}), '(positions_all_atoms)\n', (12901, 12922), True, 'import numpy as np\n'), ((3584, 3620), 'numpy.mean', 'np.mean', (['positions_all_atoms'], {'axis': '(0)'}), '(positions_all_atoms, axis=0)\n', (3591, 3620), True, 'import numpy as np\n'), ((3661, 3704), 'numpy.linalg.norm', 'np.linalg.norm', (['positions_all_atoms'], {'axis': '(1)'}), '(positions_all_atoms, axis=1)\n', (3675, 3704), True, 'import numpy as np\n'), ((12836, 12866), 'numpy.mean', 'np.mean', (['positions_tmp'], {'axis': '(0)'}), '(positions_tmp, axis=0)\n', (12843, 12866), True, 'import numpy as np\n'), ((12971, 13007), 'numpy.mean', 'np.mean', (['positions_all_atoms'], {'axis': '(0)'}), '(positions_all_atoms, axis=0)\n', (12978, 13007), True, 'import numpy as np\n'), ((14107, 14124), 'numpy.array', 'np.array', (['n_atoms'], {}), '(n_atoms)\n', (14115, 14124), True, 'import numpy as np\n'), ((14300, 14316), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (14308, 14316), True, 'import numpy as np\n'), ((14358, 14387), 'numpy.array', 'np.array', (['split_start_indices'], {}), '(split_start_indices)\n', (14366, 14387), True, 'import numpy as np\n'), ((7225, 7249), 'pandas.concat', 'pd.concat', (['group_reduced'], {}), '(group_reduced)\n', (7234, 7249), True, 'import pandas as pd\n'), ((13102, 13145), 'numpy.linalg.norm', 'np.linalg.norm', (['positions_all_atoms'], {'axis': '(1)'}), '(positions_all_atoms, axis=1)\n', (13116, 13145), True, 'import numpy as np\n'), ((4110, 4197), 'numpy.logical_and', 'np.logical_and', (['(group[11] < args.min_resolution)', "(group['dist'] < args.max_distance)"], {}), "(group[11] < args.min_resolution, group['dist'] < args.\n max_distance)\n", (4124, 4197), True, 'import numpy as np\n'), ((5168, 5255), 'numpy.logical_and', 'np.logical_and', (['(group[11] < args.min_resolution)', "(group['dist'] < args.max_distance)"], {}), "(group[11] < args.min_resolution, group['dist'] < args.\n max_distance)\n", (5182, 5255), True, 'import numpy as np\n'), ((4444, 4531), 'numpy.logical_and', 'np.logical_and', (['(group[11] < args.min_resolution)', "(group['dist'] < args.max_distance)"], {}), "(group[11] < args.min_resolution, group['dist'] < args.\n max_distance)\n", (4458, 4531), True, 'import numpy as np\n'), ((5586, 5673), 'numpy.logical_and', 'np.logical_and', (['(group[11] < args.min_resolution)', "(group['dist'] < args.max_distance)"], {}), "(group[11] < args.min_resolution, group['dist'] < args.\n max_distance)\n", (5600, 5673), True, 'import numpy as np\n')] |
import numpy as np
def get_user_purchase_matrix(shoppers,numshoppers, brands,num_brands):
# numpy zeros is sparse, so size is ok.
shopper_brand_matrix = np.zeros((numshoppers,num_brands),dtype = np.int8)
for i in range(len(shoppers)):
shopper_brand_matrix[shoppers[i],brands[i]]+=1
return shopper_brand_matrix
if __name__ == "__main__":
customers = [0,0,1,1,1,2,2,2,2]
purchases = [0,3,5,1,2,4,1,3,5]
print(get_user_purchase_matrix(customers, 3, purchases, 6)) | [
"numpy.zeros"
] | [((162, 212), 'numpy.zeros', 'np.zeros', (['(numshoppers, num_brands)'], {'dtype': 'np.int8'}), '((numshoppers, num_brands), dtype=np.int8)\n', (170, 212), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from spn.algorithms.Inference import log_likelihood
from spn.algorithms.MPE import mpe
from spn.io.CPP import get_cpp_function, setup_cpp_bridge, get_cpp_mpe_function
from spn.io.Graphics import plot_spn
from spn.structure.Base import get_nodes_by_type
from spn.structure.leaves.parametric.Inference import add_parametric_inference_support
from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli
class TestCPP(unittest.TestCase):
def setUp(self):
add_parametric_inference_support()
def test_binary(self):
A = 0.4 * (
Bernoulli(p=0.8, scope=0)
* (
0.3 * (Bernoulli(p=0.7, scope=1) * Bernoulli(p=0.6, scope=2))
+ 0.7 * (Bernoulli(p=0.5, scope=1) * Bernoulli(p=0.4, scope=2))
)
) + 0.6 * (Bernoulli(p=0.8, scope=0) * Bernoulli(p=0.7, scope=1) * Bernoulli(p=0.6, scope=2))
setup_cpp_bridge(A)
spn_cc_eval_func_bernoulli = get_cpp_function(A)
num_data = 200000
data = (
np.random.binomial(1, 0.3, size=(num_data)).astype("float32").tolist()
+ np.random.binomial(1, 0.3, size=(num_data)).astype("float32").tolist()
+ np.random.binomial(1, 0.3, size=(num_data)).astype("float32").tolist()
)
data = np.array(data).reshape((-1, 3))
num_nodes = len(get_nodes_by_type(A))
lls_matrix = np.zeros((num_data, num_nodes))
# Test for every single lls_maxtrix element.
_ = log_likelihood(A, data, lls_matrix=lls_matrix)
c_ll = spn_cc_eval_func_bernoulli(data)
self.assertTrue(np.allclose(lls_matrix, c_ll))
### Testing for MPE.
spn_cc_mpe_func_bernoulli = get_cpp_mpe_function(A)
# drop some data.
for i in range(data.shape[0]):
drop_data = np.random.binomial(data.shape[1] - 1, 0.5)
data[i, drop_data] = np.nan
cc_completion = spn_cc_mpe_func_bernoulli(data)
py_completion = mpe(A, data)
self.assertTrue(np.allclose(py_completion, cc_completion))
if __name__ == "__main__":
unittest.main()
| [
"numpy.allclose",
"spn.algorithms.Inference.log_likelihood",
"spn.algorithms.MPE.mpe",
"spn.io.CPP.get_cpp_mpe_function",
"spn.io.CPP.setup_cpp_bridge",
"spn.io.CPP.get_cpp_function",
"spn.structure.leaves.parametric.Inference.add_parametric_inference_support",
"numpy.zeros",
"numpy.array",
"spn.s... | [((2146, 2161), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2159, 2161), False, 'import unittest\n'), ((517, 551), 'spn.structure.leaves.parametric.Inference.add_parametric_inference_support', 'add_parametric_inference_support', ([], {}), '()\n', (549, 551), False, 'from spn.structure.leaves.parametric.Inference import add_parametric_inference_support\n'), ((938, 957), 'spn.io.CPP.setup_cpp_bridge', 'setup_cpp_bridge', (['A'], {}), '(A)\n', (954, 957), False, 'from spn.io.CPP import get_cpp_function, setup_cpp_bridge, get_cpp_mpe_function\n'), ((996, 1015), 'spn.io.CPP.get_cpp_function', 'get_cpp_function', (['A'], {}), '(A)\n', (1012, 1015), False, 'from spn.io.CPP import get_cpp_function, setup_cpp_bridge, get_cpp_mpe_function\n'), ((1440, 1471), 'numpy.zeros', 'np.zeros', (['(num_data, num_nodes)'], {}), '((num_data, num_nodes))\n', (1448, 1471), True, 'import numpy as np\n'), ((1538, 1584), 'spn.algorithms.Inference.log_likelihood', 'log_likelihood', (['A', 'data'], {'lls_matrix': 'lls_matrix'}), '(A, data, lls_matrix=lls_matrix)\n', (1552, 1584), False, 'from spn.algorithms.Inference import log_likelihood\n'), ((1754, 1777), 'spn.io.CPP.get_cpp_mpe_function', 'get_cpp_mpe_function', (['A'], {}), '(A)\n', (1774, 1777), False, 'from spn.io.CPP import get_cpp_function, setup_cpp_bridge, get_cpp_mpe_function\n'), ((2032, 2044), 'spn.algorithms.MPE.mpe', 'mpe', (['A', 'data'], {}), '(A, data)\n', (2035, 2044), False, 'from spn.algorithms.MPE import mpe\n'), ((1396, 1416), 'spn.structure.Base.get_nodes_by_type', 'get_nodes_by_type', (['A'], {}), '(A)\n', (1413, 1416), False, 'from spn.structure.Base import get_nodes_by_type\n'), ((1657, 1686), 'numpy.allclose', 'np.allclose', (['lls_matrix', 'c_ll'], {}), '(lls_matrix, c_ll)\n', (1668, 1686), True, 'import numpy as np\n'), ((1868, 1910), 'numpy.random.binomial', 'np.random.binomial', (['(data.shape[1] - 1)', '(0.5)'], {}), '(data.shape[1] - 1, 0.5)\n', (1886, 1910), True, 'import numpy as np\n'), ((2069, 2110), 'numpy.allclose', 'np.allclose', (['py_completion', 'cc_completion'], {}), '(py_completion, cc_completion)\n', (2080, 2110), True, 'import numpy as np\n'), ((1339, 1353), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1347, 1353), True, 'import numpy as np\n'), ((613, 638), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.8)', 'scope': '(0)'}), '(p=0.8, scope=0)\n', (622, 638), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((902, 927), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.6)', 'scope': '(2)'}), '(p=0.6, scope=2)\n', (911, 927), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((846, 871), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.8)', 'scope': '(0)'}), '(p=0.8, scope=0)\n', (855, 871), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((874, 899), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.7)', 'scope': '(1)'}), '(p=0.7, scope=1)\n', (883, 899), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((1242, 1283), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.3)'], {'size': 'num_data'}), '(1, 0.3, size=num_data)\n', (1260, 1283), True, 'import numpy as np\n'), ((678, 703), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.7)', 'scope': '(1)'}), '(p=0.7, scope=1)\n', (687, 703), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((706, 731), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.6)', 'scope': '(2)'}), '(p=0.6, scope=2)\n', (715, 731), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((758, 783), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.5)', 'scope': '(1)'}), '(p=0.5, scope=1)\n', (767, 783), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((786, 811), 'spn.structure.leaves.parametric.Parametric.Bernoulli', 'Bernoulli', ([], {'p': '(0.4)', 'scope': '(2)'}), '(p=0.4, scope=2)\n', (795, 811), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Bernoulli\n'), ((1072, 1113), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.3)'], {'size': 'num_data'}), '(1, 0.3, size=num_data)\n', (1090, 1113), True, 'import numpy as np\n'), ((1157, 1198), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.3)'], {'size': 'num_data'}), '(1, 0.3, size=num_data)\n', (1175, 1198), True, 'import numpy as np\n')] |
#!/usr/bin/python3
from numpy import array
from numpy.linalg import eig
from scipy.sparse import diags
import numpy as np
import sys
import ast
# define matrix
args = sys.argv
maind = ast.literal_eval(args[3])
second = ast.literal_eval(args[4])
k = array([
second, maind, second
])
offset = [-1, 0, 1]
A = diags(k, offset).toarray()
values, vectors = eig(A)
for val in values:
print(val)
| [
"ast.literal_eval",
"numpy.array",
"numpy.linalg.eig",
"scipy.sparse.diags"
] | [((185, 210), 'ast.literal_eval', 'ast.literal_eval', (['args[3]'], {}), '(args[3])\n', (201, 210), False, 'import ast\n'), ((220, 245), 'ast.literal_eval', 'ast.literal_eval', (['args[4]'], {}), '(args[4])\n', (236, 245), False, 'import ast\n'), ((250, 280), 'numpy.array', 'array', (['[second, maind, second]'], {}), '([second, maind, second])\n', (255, 280), False, 'from numpy import array\n'), ((356, 362), 'numpy.linalg.eig', 'eig', (['A'], {}), '(A)\n', (359, 362), False, 'from numpy.linalg import eig\n'), ((311, 327), 'scipy.sparse.diags', 'diags', (['k', 'offset'], {}), '(k, offset)\n', (316, 327), False, 'from scipy.sparse import diags\n')] |
"""Conversion data fixtures
"""
import numpy as np
from pytest import fixture
@fixture(scope='module')
def year_to_month_coefficients():
"""From one year to 12 months
(apportions)
"""
return np.array([[31, 28, 31, 30, 31, 31, 30, 30, 31, 31, 30, 31]], dtype=np.float).T / 365
@fixture(scope='module')
def month_to_year_coefficients():
"""From 12 months to one year
"""
return np.ones((1, 12), dtype=np.float)
@fixture(scope='module')
def month_to_season_coefficients():
"""
12 months to four seasons (winter is December, January, Feb)
Sum value for each month into season
"""
coef = np.array([
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], # winter
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # spring
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # summer
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0] # autumn
])
return coef.T
@fixture(scope='module')
def season_to_month_coefficients():
"""
12 months to four seasons (winter is December, January, Feb)
To convert from seasons to months, find the proportion of each season that
corresponds to the relevant month.
E.g. winter to january is (duration of Jan / total duration of winter)
"""
coef = np.array(
# winter
# spring
# summer
# autumn
[[31, 0, 0, 0], # January
[28, 0, 0, 0], # Feb
[0, 31, 0, 0], # March
[0, 30, 0, 0], # April
[0, 31, 0, 0], # May
[0, 0, 30, 0], # June
[0, 0, 31, 0], # July
[0, 0, 31, 0], # August
[0, 0, 0, 30], # September
[0, 0, 0, 31], # October
[0, 0, 0, 30], # November
[31, 0, 0, 0]] # December
)
days_in_seasons = np.array([
31+31+28, # winter
31+30+31, # spring
30+31+31, # summer
30+31+30 # autumn
], dtype=float)
return np.transpose(coef / days_in_seasons)
@fixture(scope='function')
def months():
data = [
{'name': 'jan', 'interval': [['P0M', 'P1M']]},
{'name': 'feb', 'interval': [['P1M', 'P2M']]},
{'name': 'mar', 'interval': [['P2M', 'P3M']]},
{'name': 'apr', 'interval': [['P3M', 'P4M']]},
{'name': 'may', 'interval': [['P4M', 'P5M']]},
{'name': 'jun', 'interval': [['P5M', 'P6M']]},
{'name': 'jul', 'interval': [['P6M', 'P7M']]},
{'name': 'aug', 'interval': [['P7M', 'P8M']]},
{'name': 'sep', 'interval': [['P8M', 'P9M']]},
{'name': 'oct', 'interval': [['P9M', 'P10M']]},
{'name': 'nov', 'interval': [['P10M', 'P11M']]},
{'name': 'dec', 'interval': [['P11M', 'P12M']]},
]
return data
@fixture
def seasons():
# NB "winter" is split into two pieces around the year end
data = [
{'name': 'winter', 'interval': [['P0M', 'P2M'], ['P11M', 'P12M']]},
{'name': 'spring', 'interval': [['P2M', 'P5M']]},
{'name': 'summer', 'interval': [['P5M', 'P8M']]},
{'name': 'autumn', 'interval': [['P8M', 'P11M']]},
]
return data
@fixture(scope='function')
def twenty_four_hours():
data = [
{'name': '1_0', 'interval': [['PT0H', 'PT1H']]},
{'name': '1_1', 'interval': [['PT1H', 'PT2H']]},
{'name': '1_2', 'interval': [['PT2H', 'PT3H']]},
{'name': '1_3', 'interval': [['PT3H', 'PT4H']]},
{'name': '1_4', 'interval': [['PT4H', 'PT5H']]},
{'name': '1_5', 'interval': [['PT5H', 'PT6H']]},
{'name': '1_6', 'interval': [['PT6H', 'PT7H']]},
{'name': '1_7', 'interval': [['PT7H', 'PT8H']]},
{'name': '1_8', 'interval': [['PT8H', 'PT9H']]},
{'name': '1_9', 'interval': [['PT9H', 'PT10H']]},
{'name': '1_10', 'interval': [['PT10H', 'PT11H']]},
{'name': '1_11', 'interval': [['PT11H', 'PT12H']]},
{'name': '1_12', 'interval': [['PT12H', 'PT13H']]},
{'name': '1_13', 'interval': [['PT13H', 'PT14H']]},
{'name': '1_14', 'interval': [['PT14H', 'PT15H']]},
{'name': '1_15', 'interval': [['PT15H', 'PT16H']]},
{'name': '1_16', 'interval': [['PT16H', 'PT17H']]},
{'name': '1_17', 'interval': [['PT17H', 'PT18H']]},
{'name': '1_18', 'interval': [['PT18H', 'PT19H']]},
{'name': '1_19', 'interval': [['PT19H', 'PT20H']]},
{'name': '1_20', 'interval': [['PT20H', 'PT21H']]},
{'name': '1_21', 'interval': [['PT21H', 'PT22H']]},
{'name': '1_22', 'interval': [['PT22H', 'PT23H']]},
{'name': '1_23', 'interval': [['PT23H', 'PT24H']]},
]
return data
@fixture(scope='function')
def one_day():
data = [
{'name': 'one_day', 'interval': [['P0D', 'P1D']]},
]
return data
@fixture(scope='function')
def one_year():
data = [
{'name': 'one_year', 'interval': [['P0Y', 'P1Y']]},
]
return data
@fixture(scope='function')
def monthly_data():
"""[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
"""
data = np.array([
31,
28,
31,
30,
31,
30,
31,
31,
30,
31,
30,
31
])
return data
@fixture(scope='function')
def monthly_data_as_seasons():
return np.array([
31+31+28,
31+30+31,
30+31+31,
30+31+30
], dtype=float)
@fixture(scope='function')
def remap_month_data():
data = np.array([
31+31+28, # Dec, Jan, Feb
31+30+31, # Mar, Apr, May
30+31+31, # Jun, Jul, Aug
30+31+30 # Sep, Oct, Nov
], dtype=float) / 3
return data
@fixture(scope='function')
def remap_month_data_as_months():
data = np.array([
30.666666666,
29.666666666,
29.666666666,
29.666666666,
30.666666666,
30.666666666,
30.666666666,
30.666666666,
30.666666666,
30.666666666,
30.666666666,
30.666666666
])
return data
@fixture(scope='function')
def regions_rect():
"""Return single region covering 2x1 area::
|```````|
| 0 |
|.......|
"""
return [
{
'name': 'zero',
'feature': {
'type': 'Feature',
'properties': {'name': 'zero'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [0, 2], [1, 2], [1, 0]]]
}
}
}
]
@fixture(scope='function')
def regions_half_squares():
"""Return two adjacent square regions::
|```|```|
| A | B |
|...|...|
"""
return [
{
'name': 'a',
'feature': {
'type': 'Feature',
'properties': {'name': 'a'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [0, 1], [1, 1], [1, 0]]]
}
}
},
{
'name': 'b',
'feature': {
'type': 'Feature',
'properties': {'name': 'b'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 1], [0, 2], [1, 2], [1, 1]]]
}
}
}
]
@fixture(scope='function')
def regions():
"""Return data structure for test regions/shapes
"""
return [
{
'name': 'unit',
'feature': {
'type': 'Feature',
'properties': {'name': 'unit'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [0, 1], [1, 1], [1, 0]]]
}
}
},
{
'name': 'half',
'feature': {
'type': 'Feature',
'properties': {'name': 'half'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [0, 0.5], [1, 0.5], [1, 0]]]
}
}
},
{
'name': 'two',
'feature': {
'type': 'Feature',
'properties': {'name': 'two'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [0, 2], [1, 2], [1, 0]]]
}
}
}
]
@fixture(scope='function')
def regions_single_half_square():
"""Return single half-size square region::
|```|
| A |
|...|
"""
return [
{
'name': 'a',
'feature': {
'type': 'Feature',
'properties': {'name': 'a'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [0, 1], [1, 1], [1, 0]]]
}
}
}
]
@fixture(scope='function')
def regions_half_triangles():
"""Return regions split diagonally::
|``````/|
| 0 / 1 |
|/......|
"""
return [
{
'name': 'zero',
'feature': {
'type': 'Feature',
'properties': {'name': 'zero'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [0, 2], [1, 0]]]
}
}
},
{
'name': 'one',
'feature': {
'type': 'Feature',
'properties': {'name': 'one'},
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 2], [1, 2], [1, 0]]]
}
}
}
]
| [
"pytest.fixture",
"numpy.array",
"numpy.transpose",
"numpy.ones"
] | [((81, 104), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (88, 104), False, 'from pytest import fixture\n'), ((298, 321), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (305, 321), False, 'from pytest import fixture\n'), ((445, 468), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (452, 468), False, 'from pytest import fixture\n'), ((901, 924), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (908, 924), False, 'from pytest import fixture\n'), ((1976, 2001), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1983, 2001), False, 'from pytest import fixture\n'), ((3094, 3119), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3101, 3119), False, 'from pytest import fixture\n'), ((4594, 4619), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (4601, 4619), False, 'from pytest import fixture\n'), ((4732, 4757), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (4739, 4757), False, 'from pytest import fixture\n'), ((4872, 4897), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (4879, 4897), False, 'from pytest import fixture\n'), ((5174, 5199), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (5181, 5199), False, 'from pytest import fixture\n'), ((5347, 5372), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (5354, 5372), False, 'from pytest import fixture\n'), ((5602, 5627), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (5609, 5627), False, 'from pytest import fixture\n'), ((5973, 5998), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (5980, 5998), False, 'from pytest import fixture\n'), ((6480, 6505), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (6487, 6505), False, 'from pytest import fixture\n'), ((7307, 7332), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (7314, 7332), False, 'from pytest import fixture\n'), ((8416, 8441), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (8423, 8441), False, 'from pytest import fixture\n'), ((8918, 8943), 'pytest.fixture', 'fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (8925, 8943), False, 'from pytest import fixture\n'), ((409, 441), 'numpy.ones', 'np.ones', (['(1, 12)'], {'dtype': 'np.float'}), '((1, 12), dtype=np.float)\n', (416, 441), True, 'import numpy as np\n'), ((639, 809), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0]]'], {}), '([[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 1, 1, 1, 0, 0, 0, 0,\n 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0,\n 1, 1, 1, 0]])\n', (647, 809), True, 'import numpy as np\n'), ((1248, 1447), 'numpy.array', 'np.array', (['[[31, 0, 0, 0], [28, 0, 0, 0], [0, 31, 0, 0], [0, 30, 0, 0], [0, 31, 0, 0],\n [0, 0, 30, 0], [0, 0, 31, 0], [0, 0, 31, 0], [0, 0, 0, 30], [0, 0, 0, \n 31], [0, 0, 0, 30], [31, 0, 0, 0]]'], {}), '([[31, 0, 0, 0], [28, 0, 0, 0], [0, 31, 0, 0], [0, 30, 0, 0], [0, \n 31, 0, 0], [0, 0, 30, 0], [0, 0, 31, 0], [0, 0, 31, 0], [0, 0, 0, 30],\n [0, 0, 0, 31], [0, 0, 0, 30], [31, 0, 0, 0]])\n', (1256, 1447), True, 'import numpy as np\n'), ((1782, 1861), 'numpy.array', 'np.array', (['[31 + 31 + 28, 31 + 30 + 31, 30 + 31 + 31, 30 + 31 + 30]'], {'dtype': 'float'}), '([31 + 31 + 28, 31 + 30 + 31, 30 + 31 + 31, 30 + 31 + 30], dtype=float)\n', (1790, 1861), True, 'import numpy as np\n'), ((1936, 1972), 'numpy.transpose', 'np.transpose', (['(coef / days_in_seasons)'], {}), '(coef / days_in_seasons)\n', (1948, 1972), True, 'import numpy as np\n'), ((4993, 5051), 'numpy.array', 'np.array', (['[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]'], {}), '([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n', (5001, 5051), True, 'import numpy as np\n'), ((5242, 5321), 'numpy.array', 'np.array', (['[31 + 31 + 28, 31 + 30 + 31, 30 + 31 + 31, 30 + 31 + 30]'], {'dtype': 'float'}), '([31 + 31 + 28, 31 + 30 + 31, 30 + 31 + 31, 30 + 31 + 30], dtype=float)\n', (5250, 5321), True, 'import numpy as np\n'), ((5673, 5861), 'numpy.array', 'np.array', (['[30.666666666, 29.666666666, 29.666666666, 29.666666666, 30.666666666, \n 30.666666666, 30.666666666, 30.666666666, 30.666666666, 30.666666666, \n 30.666666666, 30.666666666]'], {}), '([30.666666666, 29.666666666, 29.666666666, 29.666666666, \n 30.666666666, 30.666666666, 30.666666666, 30.666666666, 30.666666666, \n 30.666666666, 30.666666666, 30.666666666])\n', (5681, 5861), True, 'import numpy as np\n'), ((5408, 5487), 'numpy.array', 'np.array', (['[31 + 31 + 28, 31 + 30 + 31, 30 + 31 + 31, 30 + 31 + 30]'], {'dtype': 'float'}), '([31 + 31 + 28, 31 + 30 + 31, 30 + 31 + 31, 30 + 31 + 30], dtype=float)\n', (5416, 5487), True, 'import numpy as np\n'), ((210, 286), 'numpy.array', 'np.array', (['[[31, 28, 31, 30, 31, 31, 30, 30, 31, 31, 30, 31]]'], {'dtype': 'np.float'}), '([[31, 28, 31, 30, 31, 31, 30, 30, 31, 31, 30, 31]], dtype=np.float)\n', (218, 286), True, 'import numpy as np\n')] |
"""
Class that plays the Reinforcement Learning agent
"""
# !/usr/bin/python
import csv
import pprint
import threading
import numpy as np
import json
import random
import pathlib
from datetime import datetime
import time
import copy
from time import sleep
import logging
import sys
from formatter_for_output import format_console_output
from plotter.plot_output_data import PlotOutputData
from learning.run_output_Q_parameters import RunOutputQParameters
from request_builder.builder import build_command
from device_communication.client import operate_on_bulb, operate_on_bulb_json
from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, \
get_optimal_policy, get_optimal_path
from config import FrameworkConfiguration
class ReinforcementLearningAlgorithm(object):
def __init__(self, discovery_report, thread_id):
self.discovery_report = discovery_report
self.total_episodes = FrameworkConfiguration.total_episodes
self.max_steps = FrameworkConfiguration.max_steps
self.epsilon = FrameworkConfiguration.epsilon
self.alpha = FrameworkConfiguration.alpha
self.gamma = FrameworkConfiguration.gamma
self.decay_episode = FrameworkConfiguration.decay_episode
self.decay_value = FrameworkConfiguration.decay_value
self.show_graphs = FrameworkConfiguration.show_graphs
self.follow_policy = FrameworkConfiguration.follow_policy
self.seconds_to_wait = FrameworkConfiguration.seconds_to_wait
self.follow_partial_policy = FrameworkConfiguration.follow_partial_policy
self.follow_policy_every_tot_episodes = FrameworkConfiguration.follow_policy_every_tot_episodes
self.num_actions_to_use = FrameworkConfiguration.num_actions_to_use
self.algorithm = FrameworkConfiguration.algorithm
# lambda is needed only in case of sarsa(lambda) or Q(lambda) algorithms
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
self.lam = FrameworkConfiguration.lam
if FrameworkConfiguration.date_old_matrix != 'YY_mm_dd_HH_MM_SS':
self.use_old_matrix = FrameworkConfiguration.use_old_matrix # in sarsa lambda also E is needed
self.date_old_matrix = FrameworkConfiguration.date_old_matrix # I should check it is in a correct format
else:
self.use_old_matrix = False
self.current_date = datetime.now()
if thread_id:
self.thread_id = thread_id
self.id_for_output = '%Y_%m_%d_%H_%M_%S' + '_' + str(self.thread_id)
self.storage_reward = 0 # temporary storage variable
def choose_action(self, state, q_matrix):
"""
Function to choose the next action, same for all algorithms
"""
# Here I should choose the method
if np.random.uniform(0, 1) < self.epsilon:
# print("\t\tSelect the action randomly")
action = random.randint(0, self.num_actions_to_use - 1) # don't use the first one
else:
# Select maximum, if multiple values select randomly
# print("\t\tSelect maximum")
# choose random action between the max ones
action = np.random.choice(np.where(q_matrix[state, :] == q_matrix[state, :].max())[0])
# The action then should be converted when used into a json_string returned by builder_yeelight
# action is an index
return action
def update_sarsa(self, state, state_2, reward, action, action_2, q_matrix):
"""
SARSA function to learn the Q-value
"""
predict = q_matrix[state, action]
target = reward + self.gamma * q_matrix[state_2, action_2]
q_matrix[state, action] = q_matrix[state, action] + self.alpha * (target - predict)
def update_sarsa_lambda(self, state, state_2, reward, action, action_2, len_states, len_actions, q_matrix,
e_matrix):
"""
SARSA(lambda) function to update the Q-value matrix and the Eligibility matrix
"""
predict = q_matrix[state, action]
target = reward + self.gamma * q_matrix[state_2, action_2]
delta = target - predict
e_matrix[state, action] = e_matrix[state, action] + 1
# For all s, a
for s in range(len_states):
for a in range(len_actions):
q_matrix[s, a] = q_matrix[s, a] + self.alpha * delta * e_matrix[s, a]
e_matrix[s, a] = self.gamma * self.lam * e_matrix[s, a]
def update_qlearning_lambda(self, state, state_2, reward, action, action_2, len_states, len_actions, q_matrix,
e_matrix):
"""
Q-learning(lambda) (Watkins's Q(lambda) algorithm) function to update the Q-value matrix and the Eligibility matrix
"""
predict = q_matrix[state, action]
maxQ = np.amax(q_matrix[state_2, :]) # Find maximum value for the new state Q(s', a*)
maxIndex = np.argmax(q_matrix[state_2, :]) # Find index of the maximum value a*
target = reward + self.gamma * maxQ
delta = target - predict
e_matrix[state, action] = e_matrix[state, action] + 1
# For all s, a
for s in range(len_states):
for a in range(len_actions):
q_matrix[s, a] = q_matrix[s, a] + self.alpha * delta * e_matrix[s, a]
if action_2 == maxIndex:
e_matrix[s, a] = self.gamma * self.lam * e_matrix[s, a]
else:
e_matrix[s, a] = 0
def update_qlearning(self, state, state_2, reward, action, q_matrix):
"""
# Q-learning function to learn the Q-value
"""
predict = q_matrix[state, action]
maxQ = np.amax(q_matrix[state_2, :]) # Find maximum value for the new state
target = reward + self.gamma * maxQ
q_matrix[state, action] = q_matrix[state, action] + self.alpha * (target - predict)
def initialize_log_files(self, output_directory, log_directory):
"""
Get log filenames and build non-existing directories
"""
log_dir = FrameworkConfiguration.directory + output_directory + '/' + log_directory
pathlib.Path(log_dir + '/').mkdir(parents=True, exist_ok=True) # for Python > 3.5 YY_mm_dd_HH_MM_SS'
log_filename = self.current_date.strftime(log_dir + '/' + 'log_' + self.id_for_output + '.log')
log_date_filename = FrameworkConfiguration.directory + output_directory + '/log_date.log'
return log_filename, log_date_filename
def initialize_output_q_params_files(self, output_directory, q_params_directory):
"""
Get output filenames for saving Q and parameters and build non-existing directories
"""
output_Q_params_dir = FrameworkConfiguration.directory + output_directory + '/' + q_params_directory
pathlib.Path(output_Q_params_dir + '/').mkdir(parents=True, exist_ok=True) # for Python > 3.5
output_Q_filename = self.current_date.strftime(
output_Q_params_dir + '/' + 'output_Q_' + self.id_for_output + '.csv')
output_parameters_filename = self.current_date.strftime(
output_Q_params_dir + '/' + 'output_parameters_' + self.id_for_output + '.csv')
output_E_filename = ''
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
output_E_filename = self.current_date.strftime(
output_Q_params_dir + '/' + 'output_E_' + self.id_for_output + '.csv')
return output_Q_filename, output_parameters_filename, output_E_filename
def initialize_output_csv_files(self, output_directory, output_csv_directory):
"""
Get output filenames for saving all episodes result and build non-existing directories
"""
output_dir = FrameworkConfiguration.directory + output_directory + '/' + output_csv_directory
pathlib.Path(output_dir + '/').mkdir(parents=True, exist_ok=True) # for Python > 3.5
output_filename = self.current_date.strftime(
output_dir + '/' + 'output_' + self.algorithm + '_' + self.id_for_output + '.csv')
partial_output_filename = self.current_date.strftime(
output_dir + '/' + 'partial_output_' + self.algorithm + '_' + self.id_for_output + '.csv')
return output_filename, partial_output_filename
def write_date_id_to_log(self, log_date_filename):
"""
Write the identifier of files (date) and corresponding algorithm to log_date.log file
"""
with open(log_date_filename, mode='a') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
output_writer.writerow([self.current_date.strftime(self.id_for_output), self.algorithm])
def write_params_to_output_file(self, output_parameters_filename, optimal_policy, optimal_path):
"""
Write all parameters of the algorithm to output file
"""
with open(output_parameters_filename, mode='w') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
output_writer.writerow(['algorithm_used', self.algorithm])
output_writer.writerow(['epsilon', self.epsilon])
output_writer.writerow(['max_steps', self.max_steps])
output_writer.writerow(['total_episodes', self.total_episodes])
output_writer.writerow(['alpha', self.alpha])
output_writer.writerow(['num_actions_to_use', self.num_actions_to_use])
output_writer.writerow(['gamma', self.gamma])
output_writer.writerow(['decay_episode', self.decay_episode])
output_writer.writerow(['decay_value', self.decay_value])
output_writer.writerow(['seconds_to_wait', self.seconds_to_wait])
output_writer.writerow(['optimal_policy', "-".join(str(act) for act in optimal_policy)])
output_writer.writerow(['optimal_path', "-".join(str(pat) for pat in optimal_path)])
output_writer.writerow(['path', FrameworkConfiguration.path])
output_writer.writerow(['protocol', self.discovery_report['protocol']])
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
output_writer.writerow(['lambda', self.lam])
def retrieve_old_q_matrix(self, output_directory, q_params_directory, len_states, len_actions, empty_matrix):
"""
Retrieve old save Q matrix
"""
file_Q = 'output_Q_' + self.date_old_matrix + '.csv'
try:
output_Q_params_dir = FrameworkConfiguration.directory + output_directory + '/' + q_params_directory
tmp_matrix = np.genfromtxt(output_Q_params_dir + '/' + file_Q, delimiter=',', dtype=np.float32)
Q_tmp = tmp_matrix[1:, 1:]
Q = copy.deepcopy(Q_tmp)
except Exception as e:
logging.warning("Wrong file format: " + str(e))
logging.warning("Using an empty Q matrix instead of the old one.")
return empty_matrix
# Check the format of the matrix is correct
if len_states != len(Q) or len_actions != len(Q[0]) or np.isnan(np.sum(Q)):
logging.warning("Wrong file format: wrong Q dimensions or nan values present")
logging.warning("Using an empty Q matrix instead of the old one.")
return empty_matrix
return Q
def retrieve_old_e_matrix(self, output_directory, q_params_directory, len_states, len_actions, empty_matrix):
"""
Retrieve old save Q matrix
"""
file_E = 'output_E_' + self.date_old_matrix + '.csv'
try:
output_Q_params_dir = FrameworkConfiguration.directory + output_directory + '/' + q_params_directory
tmp_matrix = np.genfromtxt(output_Q_params_dir + '/' + file_E, delimiter=',', dtype=np.float32)
E_tmp = tmp_matrix[1:, 1:]
E = copy.deepcopy(E_tmp)
except Exception as e:
logging.warning("Wrong file format: " + str(e))
logging.warning("Using an empty E matrix instead of the old one.")
return empty_matrix
# Check the format of the matrix is correct
if len_states != len(E) or len_actions != len(E[0]) or np.isnan(np.sum(E)):
logging.warning("Wrong file format: wrong E dimensions or nan values present")
logging.warning("Using an empty E matrix instead of the old one.")
return empty_matrix
return E
def write_headers_to_output_files(self, output_filename, partial_output_filename):
"""
Write headers to output csv files
"""
with open(output_filename, mode='w') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
output_writer.writerow(['Episodes', 'Reward', 'CumReward', 'Timesteps'])
if self.follow_partial_policy:
with open(partial_output_filename, mode='w') as partial_output_file:
output_writer = csv.writer(partial_output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
output_writer.writerow(
['CurrentEpisode', 'Timesteps', 'ObtainedReward', 'Time', 'PolicySelected', 'StatesPassed'])
def set_initial_state(self):
"""
Set device to starting state (e.g. power off)
"""
num_actions = 0
if FrameworkConfiguration.path == 3:
# Special initial configuration for visual checks on the bulb
# ONLY FOR PATH 3
operate_on_bulb("set_power", str("\"on\", \"sudden\", 0"), self.discovery_report, self.discovery_report['protocol'])
num_actions += 1
sleep(self.seconds_to_wait)
operate_on_bulb("set_rgb", str("255" + ", \"sudden\", 500"), self.discovery_report, self.discovery_report['protocol'])
num_actions += 1
sleep(self.seconds_to_wait)
elif FrameworkConfiguration.path == 4:
# Special initial configuration for for path 4, starting to power on
# ONLY FOR PATH 4
if FrameworkConfiguration.DEBUG:
logging.debug("\t\tREQUEST: Setting power on")
operate_on_bulb("set_power", str("\"on\", \"sudden\", 0"), self.discovery_report,
self.discovery_report['protocol'])
num_actions += 1
sleep(self.seconds_to_wait)
return num_actions
# Turn off the lamp
if FrameworkConfiguration.DEBUG:
logging.debug("\t\tREQUEST: Setting power off")
operate_on_bulb("set_power", str("\"off\", \"sudden\", 0"), self.discovery_report, self.discovery_report['protocol'])
num_actions += 1
return num_actions
def write_log_file(self, log_filename, t, tmp_reward, state1, state2, action1, action2):
"""
Write data at each time step
"""
with open(log_filename, "a") as write_file:
write_file.write("\nTimestep " + str(t) + " finished.")
write_file.write(" Temporary reward: " + str(tmp_reward))
write_file.write(" Previous state: " + str(state1))
write_file.write(" Current state: " + str(state2))
write_file.write(" Performed action: " + str(action1))
if self.algorithm != 'qlearning':
write_file.write(" Next action: " + str(action2))
def write_episode_summary(self, log_filename, output_filename, episode, reward_per_episode, cumulative_reward, t):
"""
Write data at the end of each episode
"""
with open(log_filename, "a") as write_file:
write_file.write("\nEpisode " + str(episode) + " finished.\n")
with open(output_filename, mode="a") as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
output_writer.writerow([episode, reward_per_episode, cumulative_reward, t - 1])
def save_matrix(self, output_filename, states, matrix, label):
"""
Save Q-matrix
"""
header = [label] # For correct output structure
for i in range(0, self.num_actions_to_use):
json_string = build_command(method_chosen_index=i, select_all_props=False, protocol=self.discovery_report['protocol'])
header.append(json.loads(json_string)['method'])
with open(output_filename, "w") as output_matrix_file:
output_matrix_writer = csv.writer(output_matrix_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_NONE)
output_matrix_writer.writerow(header)
for index, stat in enumerate(states):
row = [stat]
for val in matrix[index]:
row.append("%.4f" % val)
output_matrix_writer.writerow(row)
def run(self):
"""
Run RL algorithm
"""
# INITIALIZATION PHASE
np.set_printoptions(formatter={'float': lambda output: "{0:0.4f}".format(output)})
# Obtain data about states, path and policy
states = get_states(FrameworkConfiguration.path)
optimal_policy = get_optimal_policy(FrameworkConfiguration.path)
optimal_path = get_optimal_path(FrameworkConfiguration.path)
# Initialize filenames to be generated
output_dir = 'output'
q_params_dir = 'output_Q_parameters'
log_filename, log_date_filename = self.initialize_log_files(output_dir, 'log')
output_Q_filename, output_parameters_filename, output_E_filename = self.initialize_output_q_params_files(
output_dir, q_params_dir)
output_filename, partial_output_filename = self.initialize_output_csv_files(output_dir, 'output_csv')
self.write_date_id_to_log(log_date_filename)
self.write_params_to_output_file(output_parameters_filename, optimal_policy, optimal_path)
if self.show_graphs:
logging.debug("States are " + str(len(states)))
logging.debug("Actions are " + str(self.num_actions_to_use))
# Initializing the Q-matrix
# to 0 values
# Q = np.zeros((len(states), self.num_actions_to_use))
# or to random values from 0 to 1
Q = np.random.rand(len(states), self.num_actions_to_use)
if self.use_old_matrix:
# Retrieve from output_Q_data.csv an old matrix for "transfer learning"
Q = self.retrieve_old_q_matrix(output_dir, q_params_dir, len(states), self.num_actions_to_use, Q)
# if FrameworkConfiguration.DEBUG:
# logging.debug(Q)
E = []
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
# Initializing the E-matrix
E = np.zeros((len(states), self.num_actions_to_use)) # trace for state action pairs
if self.use_old_matrix:
# Retrieve from output_E_data.csv
# Check the format of the matrix is correct
# TODO or should I start always from an empty E matrix?
E = self.retrieve_old_e_matrix(output_dir, q_params_dir, len(states), self.num_actions_to_use, E)
# if FrameworkConfiguration.DEBUG:
# logging.debug(E)
start_time = time.time()
y_timesteps = []
y_reward = []
y_cum_reward = []
cumulative_reward = 0
count_actions = 0
# Write into output_filename the header: Episodes, Reward, CumReward, Timesteps
self.write_headers_to_output_files(output_filename, partial_output_filename)
# STARTING THE LEARNING PROCESS
# LOOP OVER EPISODES
for episode in range(self.total_episodes):
logging.info("----------------------------------------------------")
logging.info("Episode " + str(episode))
sleep(3)
t = 0
count_actions += self.set_initial_state()
sleep(self.seconds_to_wait)
state1, old_props_values = compute_next_state_from_props(FrameworkConfiguration.path, 0, [], self.discovery_report)
if FrameworkConfiguration.DEBUG:
logging.debug("\tSTARTING FROM STATE " + str(states[state1]))
action1 = self.choose_action(state1, Q)
done = False
reward_per_episode = 0
# Exploration reduces every some episodes
if (episode + 1) % self.decay_episode == 0: # configurable parameter
self.epsilon = self.epsilon - self.decay_value * self.epsilon # could be another configurable parameter, decay of epsilon
if self.epsilon < 0.1:
self.epsilon = 0.1
self.decay_value = 0
# LOOP OVER TIME STEPS
while t < self.max_steps:
if count_actions > 55: # To avoid crashing the lamp (rate of 60 commands/minute)
sleep(60)
count_actions = 0
# Getting the next state
if self.algorithm == 'qlearning':
action1 = self.choose_action(state1, Q)
# Perform an action on the bulb sending a command
json_string = build_command(method_chosen_index=action1, select_all_props=False, protocol=self.discovery_report['protocol'])
if FrameworkConfiguration.DEBUG:
logging.debug("\t\tREQUEST: " + str(json_string))
reward_from_response = operate_on_bulb_json(json_string, self.discovery_report, self.discovery_report['protocol'])
count_actions += 1
sleep(self.seconds_to_wait)
state2, new_props_values = compute_next_state_from_props(FrameworkConfiguration.path, state1, old_props_values,
self.discovery_report)
if FrameworkConfiguration.DEBUG:
logging.debug("\tFROM STATE " + states[state1] + " TO STATE " + states[state2])
reward_from_states, self.storage_reward = compute_reward_from_states(FrameworkConfiguration.path, state1, state2,
self.storage_reward)
tmp_reward = -1 + reward_from_response + reward_from_states # -1 for using a command more
if FrameworkConfiguration.use_colored_output:
LOG = logging.getLogger()
if tmp_reward >= 0:
LOG.debug("\t\tREWARD: " + str(tmp_reward))
else:
LOG.error("\t\tREWARD: " + str(tmp_reward))
sleep(0.1)
else:
logging.info("\t\tREWARD: " + str(tmp_reward))
if state2 == 5 or (state2 == 4 and FrameworkConfiguration.path == 4):
done = True
if self.algorithm == 'sarsa_lambda':
# Choosing the next action
action2 = self.choose_action(state2, Q)
# Learning the Q-value
self.update_sarsa_lambda(state1, state2, tmp_reward, action1, action2, len(states),
self.num_actions_to_use, Q, E)
elif self.algorithm == 'qlearning_lambda':
# Choosing the next action
action2 = self.choose_action(state2, Q)
# Learning the Q-value
self.update_qlearning_lambda(state1, state2, tmp_reward, action1, action2, len(states),
self.num_actions_to_use, Q, E)
elif self.algorithm == 'qlearning':
action2 = -1 # Invalid action to avoid warnings
# Learning the Q-value
self.update_qlearning(state1, state2, tmp_reward, action1, Q)
else:
# SARSA as default algorithm
# Choosing the next action
action2 = self.choose_action(state2, Q)
# Learning the Q-value
self.update_sarsa(state1, state2, tmp_reward, action1, action2, Q)
# Update log file
self.write_log_file(log_filename, t, tmp_reward, state1, state2, action1, action2)
state1 = state2
old_props_values = new_props_values
if self.algorithm != 'qlearning':
action1 = action2
# Updating the respective values
t += 1
reward_per_episode += tmp_reward
# If at the end of learning process
if done:
break
cumulative_reward += reward_per_episode
y_timesteps.append(t - 1)
y_cum_reward.append(cumulative_reward)
y_reward.append(reward_per_episode)
self.write_episode_summary(log_filename, output_filename, episode, reward_per_episode, cumulative_reward, t)
if FrameworkConfiguration.use_colored_output:
LOG = logging.getLogger()
if reward_per_episode >= 0:
LOG.debug("\tREWARD OF THE EPISODE: " + str(reward_per_episode))
else:
LOG.error("\tREWARD OF THE EPISODE: " + str(reward_per_episode))
sleep(0.1)
else:
logging.info("\tREWARD OF THE EPISODE: " + str(reward_per_episode))
if self.follow_partial_policy:
if (episode + 1) % self.follow_policy_every_tot_episodes == 0:
# Follow best policy found after some episodes
logging.info("- - - - - - - - - - - - - - - - - - - - - -")
logging.info("\tFOLLOW PARTIAL POLICY AT EPISODE " + str(episode))
if count_actions > 35: # To avoid crashing lamp
sleep(60)
count_actions = 0
# Save Q-matrix
self.save_matrix(output_Q_filename, states, Q, 'Q')
# Save E-matrix
# Only for sarsa(lambda) and Q(lambda)
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
self.save_matrix(output_E_filename, states, E, 'E')
found_policy, dict_results = RunOutputQParameters(
date_to_retrieve=self.current_date.strftime(self.id_for_output), show_retrieved_info=False,
discovery_report=self.discovery_report).run()
count_actions += 20
with open(partial_output_filename, mode="a") as partial_output_file:
output_writer = csv.writer(partial_output_file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
output_writer.writerow(
[episode, dict_results['timesteps_from_run'], dict_results['reward_from_run'],
dict_results['time_from_run'], dict_results['policy_from_run'],
dict_results['states_from_run']]) # Episode or episode+1?
if found_policy:
# I could stop here if found good policy, could continue if you think you could find a better one
pass
# SAVE DATA
# Print and save the Q-matrix inside external file
if FrameworkConfiguration.DEBUG:
logging.debug("Q MATRIX:")
logging.debug(Q)
self.save_matrix(output_Q_filename, states, Q, 'Q')
# Only for sarsa(lambda) and Q(lambda)
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
# Print and save the E-matrix inside external file
if FrameworkConfiguration.DEBUG:
logging.debug("E matrix")
logging.debug(E)
self.save_matrix(output_E_filename, states, E, 'E')
# Write total time for learning algorithm
with open(log_filename, "a") as write_file:
write_file.write("\nTotal time of %s seconds." % (time.time() - start_time))
sleep(5) # Wait for writing to files
# PLOT DATA
if self.show_graphs:
PlotOutputData(date_to_retrieve=self.current_date.strftime(self.id_for_output), separate_plots=False).run()
# FOLLOW BEST POLICY FOUND
if self.follow_policy:
RunOutputQParameters(date_to_retrieve=self.current_date.strftime(self.id_for_output),
discovery_report=self.discovery_report).run()
def main(discovery_report=None):
format_console_output()
# if FrameworkConfiguration.DEBUG:
# logging.debug(str(FrameworkConfiguration().as_dict()))
if discovery_report is None:
logging.error("No discovery report found.")
logging.error("Please run this framework from the main script.")
exit(-1)
elif discovery_report['ip']:
if FrameworkConfiguration.DEBUG:
logging.debug("Received discovery report:")
logging.debug(str(discovery_report))
logging.info("Discovery report found at " + discovery_report['ip'])
logging.info("Waiting...")
sleep(5)
for i in range(4):
logging.info("INDEX " + str(i))
logging.info("####### Starting RL algorithm path " + str(FrameworkConfiguration.path) + " #######")
logging.info("ALGORITHM " + FrameworkConfiguration.algorithm
+ " - PATH " + str(FrameworkConfiguration.path)
+ " - EPS " + str(FrameworkConfiguration.epsilon)
+ " - ALP " + str(FrameworkConfiguration.alpha)
+ " - GAM " + str(FrameworkConfiguration.gamma))
ReinforcementLearningAlgorithm(discovery_report=discovery_report, thread_id=threading.get_ident()).run()
logging.info("####### Finish RL algorithm #######")
sleep(50)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"logging.debug",
"time.sleep",
"copy.deepcopy",
"numpy.genfromtxt",
"logging.info",
"logging.error",
"state_machine.state_machine_yeelight.get_optimal_path",
"formatter_for_output.format_console_output",
"pathlib.Path",
"state_machine.state_machine_yeelight.get_optimal_polic... | [((29168, 29191), 'formatter_for_output.format_console_output', 'format_console_output', ([], {}), '()\n', (29189, 29191), False, 'from formatter_for_output import format_console_output\n'), ((2470, 2484), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2482, 2484), False, 'from datetime import datetime\n'), ((4935, 4964), 'numpy.amax', 'np.amax', (['q_matrix[state_2, :]'], {}), '(q_matrix[state_2, :])\n', (4942, 4964), True, 'import numpy as np\n'), ((5034, 5065), 'numpy.argmax', 'np.argmax', (['q_matrix[state_2, :]'], {}), '(q_matrix[state_2, :])\n', (5043, 5065), True, 'import numpy as np\n'), ((5814, 5843), 'numpy.amax', 'np.amax', (['q_matrix[state_2, :]'], {}), '(q_matrix[state_2, :])\n', (5821, 5843), True, 'import numpy as np\n'), ((17398, 17437), 'state_machine.state_machine_yeelight.get_states', 'get_states', (['FrameworkConfiguration.path'], {}), '(FrameworkConfiguration.path)\n', (17408, 17437), False, 'from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, get_optimal_policy, get_optimal_path\n'), ((17463, 17510), 'state_machine.state_machine_yeelight.get_optimal_policy', 'get_optimal_policy', (['FrameworkConfiguration.path'], {}), '(FrameworkConfiguration.path)\n', (17481, 17510), False, 'from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, get_optimal_policy, get_optimal_path\n'), ((17534, 17579), 'state_machine.state_machine_yeelight.get_optimal_path', 'get_optimal_path', (['FrameworkConfiguration.path'], {}), '(FrameworkConfiguration.path)\n', (17550, 17579), False, 'from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, get_optimal_policy, get_optimal_path\n'), ((19572, 19583), 'time.time', 'time.time', ([], {}), '()\n', (19581, 19583), False, 'import time\n'), ((28678, 28686), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (28683, 28686), False, 'from time import sleep\n'), ((29338, 29381), 'logging.error', 'logging.error', (['"""No discovery report found."""'], {}), "('No discovery report found.')\n", (29351, 29381), False, 'import logging\n'), ((29390, 29454), 'logging.error', 'logging.error', (['"""Please run this framework from the main script."""'], {}), "('Please run this framework from the main script.')\n", (29403, 29454), False, 'import logging\n'), ((2877, 2900), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2894, 2900), True, 'import numpy as np\n'), ((2992, 3038), 'random.randint', 'random.randint', (['(0)', '(self.num_actions_to_use - 1)'], {}), '(0, self.num_actions_to_use - 1)\n', (3006, 3038), False, 'import random\n'), ((8719, 8796), 'csv.writer', 'csv.writer', (['output_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_NONE'}), '(output_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_NONE)\n', (8729, 8796), False, 'import csv\n'), ((9185, 9262), 'csv.writer', 'csv.writer', (['output_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_NONE'}), '(output_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_NONE)\n', (9195, 9262), False, 'import csv\n'), ((10853, 10940), 'numpy.genfromtxt', 'np.genfromtxt', (["(output_Q_params_dir + '/' + file_Q)"], {'delimiter': '""","""', 'dtype': 'np.float32'}), "(output_Q_params_dir + '/' + file_Q, delimiter=',', dtype=np.\n float32)\n", (10866, 10940), True, 'import numpy as np\n'), ((10991, 11011), 'copy.deepcopy', 'copy.deepcopy', (['Q_tmp'], {}), '(Q_tmp)\n', (11004, 11011), False, 'import copy\n'), ((11364, 11442), 'logging.warning', 'logging.warning', (['"""Wrong file format: wrong Q dimensions or nan values present"""'], {}), "('Wrong file format: wrong Q dimensions or nan values present')\n", (11379, 11442), False, 'import logging\n'), ((11455, 11521), 'logging.warning', 'logging.warning', (['"""Using an empty Q matrix instead of the old one."""'], {}), "('Using an empty Q matrix instead of the old one.')\n", (11470, 11521), False, 'import logging\n'), ((11958, 12045), 'numpy.genfromtxt', 'np.genfromtxt', (["(output_Q_params_dir + '/' + file_E)"], {'delimiter': '""","""', 'dtype': 'np.float32'}), "(output_Q_params_dir + '/' + file_E, delimiter=',', dtype=np.\n float32)\n", (11971, 12045), True, 'import numpy as np\n'), ((12096, 12116), 'copy.deepcopy', 'copy.deepcopy', (['E_tmp'], {}), '(E_tmp)\n', (12109, 12116), False, 'import copy\n'), ((12469, 12547), 'logging.warning', 'logging.warning', (['"""Wrong file format: wrong E dimensions or nan values present"""'], {}), "('Wrong file format: wrong E dimensions or nan values present')\n", (12484, 12547), False, 'import logging\n'), ((12560, 12626), 'logging.warning', 'logging.warning', (['"""Using an empty E matrix instead of the old one."""'], {}), "('Using an empty E matrix instead of the old one.')\n", (12575, 12626), False, 'import logging\n'), ((12919, 13004), 'csv.writer', 'csv.writer', (['output_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(output_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL\n )\n', (12929, 13004), False, 'import csv\n'), ((13935, 13962), 'time.sleep', 'sleep', (['self.seconds_to_wait'], {}), '(self.seconds_to_wait)\n', (13940, 13962), False, 'from time import sleep\n'), ((14135, 14162), 'time.sleep', 'sleep', (['self.seconds_to_wait'], {}), '(self.seconds_to_wait)\n', (14140, 14162), False, 'from time import sleep\n'), ((14768, 14815), 'logging.debug', 'logging.debug', (['"""\t\tREQUEST: Setting power off"""'], {}), "('\\t\\tREQUEST: Setting power off')\n", (14781, 14815), False, 'import logging\n'), ((16051, 16136), 'csv.writer', 'csv.writer', (['output_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(output_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL\n )\n', (16061, 16136), False, 'import csv\n'), ((16473, 16582), 'request_builder.builder.build_command', 'build_command', ([], {'method_chosen_index': 'i', 'select_all_props': '(False)', 'protocol': "self.discovery_report['protocol']"}), "(method_chosen_index=i, select_all_props=False, protocol=self.\n discovery_report['protocol'])\n", (16486, 16582), False, 'from request_builder.builder import build_command\n'), ((16738, 16827), 'csv.writer', 'csv.writer', (['output_matrix_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_NONE'}), '(output_matrix_file, delimiter=\',\', quotechar=\'"\', quoting=csv.\n QUOTE_NONE)\n', (16748, 16827), False, 'import csv\n'), ((20022, 20090), 'logging.info', 'logging.info', (['"""----------------------------------------------------"""'], {}), "('----------------------------------------------------')\n", (20034, 20090), False, 'import logging\n'), ((20155, 20163), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (20160, 20163), False, 'from time import sleep\n'), ((20248, 20275), 'time.sleep', 'sleep', (['self.seconds_to_wait'], {}), '(self.seconds_to_wait)\n', (20253, 20275), False, 'from time import sleep\n'), ((20315, 20408), 'state_machine.state_machine_yeelight.compute_next_state_from_props', 'compute_next_state_from_props', (['FrameworkConfiguration.path', '(0)', '[]', 'self.discovery_report'], {}), '(FrameworkConfiguration.path, 0, [], self.\n discovery_report)\n', (20344, 20408), False, 'from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, get_optimal_policy, get_optimal_path\n'), ((27981, 28007), 'logging.debug', 'logging.debug', (['"""Q MATRIX:"""'], {}), "('Q MATRIX:')\n", (27994, 28007), False, 'import logging\n'), ((28020, 28036), 'logging.debug', 'logging.debug', (['Q'], {}), '(Q)\n', (28033, 28036), False, 'import logging\n'), ((29659, 29726), 'logging.info', 'logging.info', (["('Discovery report found at ' + discovery_report['ip'])"], {}), "('Discovery report found at ' + discovery_report['ip'])\n", (29671, 29726), False, 'import logging\n'), ((29735, 29761), 'logging.info', 'logging.info', (['"""Waiting..."""'], {}), "('Waiting...')\n", (29747, 29761), False, 'import logging\n'), ((29770, 29778), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (29775, 29778), False, 'from time import sleep\n'), ((6275, 6302), 'pathlib.Path', 'pathlib.Path', (["(log_dir + '/')"], {}), "(log_dir + '/')\n", (6287, 6302), False, 'import pathlib\n'), ((6948, 6987), 'pathlib.Path', 'pathlib.Path', (["(output_Q_params_dir + '/')"], {}), "(output_Q_params_dir + '/')\n", (6960, 6987), False, 'import pathlib\n'), ((7997, 8027), 'pathlib.Path', 'pathlib.Path', (["(output_dir + '/')"], {}), "(output_dir + '/')\n", (8009, 8027), False, 'import pathlib\n'), ((11116, 11182), 'logging.warning', 'logging.warning', (['"""Using an empty Q matrix instead of the old one."""'], {}), "('Using an empty Q matrix instead of the old one.')\n", (11131, 11182), False, 'import logging\n'), ((11340, 11349), 'numpy.sum', 'np.sum', (['Q'], {}), '(Q)\n', (11346, 11349), True, 'import numpy as np\n'), ((12221, 12287), 'logging.warning', 'logging.warning', (['"""Using an empty E matrix instead of the old one."""'], {}), "('Using an empty E matrix instead of the old one.')\n", (12236, 12287), False, 'import logging\n'), ((12445, 12454), 'numpy.sum', 'np.sum', (['E'], {}), '(E)\n', (12451, 12454), True, 'import numpy as np\n'), ((13238, 13331), 'csv.writer', 'csv.writer', (['partial_output_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(partial_output_file, delimiter=\',\', quotechar=\'"\', quoting=csv.\n QUOTE_MINIMAL)\n', (13248, 13331), False, 'import csv\n'), ((14627, 14654), 'time.sleep', 'sleep', (['self.seconds_to_wait'], {}), '(self.seconds_to_wait)\n', (14632, 14654), False, 'from time import sleep\n'), ((21521, 21636), 'request_builder.builder.build_command', 'build_command', ([], {'method_chosen_index': 'action1', 'select_all_props': '(False)', 'protocol': "self.discovery_report['protocol']"}), "(method_chosen_index=action1, select_all_props=False, protocol\n =self.discovery_report['protocol'])\n", (21534, 21636), False, 'from request_builder.builder import build_command\n'), ((21790, 21886), 'device_communication.client.operate_on_bulb_json', 'operate_on_bulb_json', (['json_string', 'self.discovery_report', "self.discovery_report['protocol']"], {}), "(json_string, self.discovery_report, self.\n discovery_report['protocol'])\n", (21810, 21886), False, 'from device_communication.client import operate_on_bulb, operate_on_bulb_json\n'), ((21933, 21960), 'time.sleep', 'sleep', (['self.seconds_to_wait'], {}), '(self.seconds_to_wait)\n', (21938, 21960), False, 'from time import sleep\n'), ((22005, 22116), 'state_machine.state_machine_yeelight.compute_next_state_from_props', 'compute_next_state_from_props', (['FrameworkConfiguration.path', 'state1', 'old_props_values', 'self.discovery_report'], {}), '(FrameworkConfiguration.path, state1,\n old_props_values, self.discovery_report)\n', (22034, 22116), False, 'from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, get_optimal_policy, get_optimal_path\n'), ((22394, 22490), 'state_machine.state_machine_yeelight.compute_reward_from_states', 'compute_reward_from_states', (['FrameworkConfiguration.path', 'state1', 'state2', 'self.storage_reward'], {}), '(FrameworkConfiguration.path, state1, state2,\n self.storage_reward)\n', (22420, 22490), False, 'from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, get_optimal_policy, get_optimal_path\n'), ((25490, 25509), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (25507, 25509), False, 'import logging\n'), ((25762, 25772), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (25767, 25772), False, 'from time import sleep\n'), ((28354, 28379), 'logging.debug', 'logging.debug', (['"""E matrix"""'], {}), "('E matrix')\n", (28367, 28379), False, 'import logging\n'), ((28396, 28412), 'logging.debug', 'logging.debug', (['E'], {}), '(E)\n', (28409, 28412), False, 'import logging\n'), ((29558, 29601), 'logging.debug', 'logging.debug', (['"""Received discovery report:"""'], {}), "('Received discovery report:')\n", (29571, 29601), False, 'import logging\n'), ((30459, 30510), 'logging.info', 'logging.info', (['"""####### Finish RL algorithm #######"""'], {}), "('####### Finish RL algorithm #######')\n", (30471, 30510), False, 'import logging\n'), ((30523, 30532), 'time.sleep', 'sleep', (['(50)'], {}), '(50)\n', (30528, 30532), False, 'from time import sleep\n'), ((14382, 14428), 'logging.debug', 'logging.debug', (['"""\t\tREQUEST: Setting power on"""'], {}), "('\\t\\tREQUEST: Setting power on')\n", (14395, 14428), False, 'import logging\n'), ((16604, 16627), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (16614, 16627), False, 'import json\n'), ((21225, 21234), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (21230, 21234), False, 'from time import sleep\n'), ((22255, 22334), 'logging.debug', 'logging.debug', (["('\\tFROM STATE ' + states[state1] + ' TO STATE ' + states[state2])"], {}), "('\\tFROM STATE ' + states[state1] + ' TO STATE ' + states[state2])\n", (22268, 22334), False, 'import logging\n'), ((22767, 22786), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (22784, 22786), False, 'import logging\n'), ((23009, 23019), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (23014, 23019), False, 'from time import sleep\n'), ((26085, 26144), 'logging.info', 'logging.info', (['"""- - - - - - - - - - - - - - - - - - - - - -"""'], {}), "('- - - - - - - - - - - - - - - - - - - - - -')\n", (26097, 26144), False, 'import logging\n'), ((26325, 26334), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (26330, 26334), False, 'from time import sleep\n'), ((27183, 27276), 'csv.writer', 'csv.writer', (['partial_output_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(partial_output_file, delimiter=\',\', quotechar=\'"\', quoting=csv.\n QUOTE_MINIMAL)\n', (27193, 27276), False, 'import csv\n'), ((28642, 28653), 'time.time', 'time.time', ([], {}), '()\n', (28651, 28653), False, 'import time\n'), ((30418, 30439), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (30437, 30439), False, 'import threading\n')] |
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import copy
num_classes = 2
img_height, img_width = 96, 96
channel = 3
GPU = False
torch.manual_seed(0)
class MobileNet_v1(torch.nn.Module):
def __init__(self):
class MobileNetBlock(torch.nn.Module):
def __init__(self, in_dim, out_dim, repeat=1, stride=1):
super(MobileNetBlock, self).__init__()
_module = []
for _ in range(repeat):
_module += [
torch.nn.Conv2d(in_dim, in_dim, kernel_size=3, padding=1, stride=stride, groups=in_dim),
torch.nn.BatchNorm2d(in_dim),
torch.nn.ReLU(),
torch.nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, stride=1),
torch.nn.BatchNorm2d(out_dim),
torch.nn.ReLU(),
]
self.module = torch.nn.Sequential(*_module)
def forward(self, x):
x = self.module(x)
return x
class Flatten(torch.nn.Module):
def forward(self, x):
x = x.view(x.size()[0], -1)
return x
super(MobileNet_v1, self).__init__()
self.module = torch.nn.Sequential(
#-----
# 1/1 x 1/1 x 3
#-----
torch.nn.Conv2d(channel, 32, kernel_size=3, padding=1, stride=2),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(),
#-----
# 1/2 x 1/2 x 32
#-----
MobileNetBlock(32, 64),
#-----
# 1/4 x 1/4 x 64
#-----
MobileNetBlock(64, 128, stride=2),
MobileNetBlock(128, 128),
#-----
# 1/8 x 1/8 x 128
#-----
MobileNetBlock(128, 256, stride=2),
MobileNetBlock(256, 256),
#-----
# 1/16 x 1/16 x 256
#-----
MobileNetBlock(256, 512, stride=2),
MobileNetBlock(512, 512, repeat=5),
#-----
# 1/32 x 1/32 x 1024
#-----
MobileNetBlock(512, 1024, stride=2),
MobileNetBlock(1024, 1024),
#torch.nn.AvgPool2d([img_height // 32, img_width // 32], stride=1, padding=0),
torch.nn.AdaptiveAvgPool2d([1,1]),
Flatten(),
torch.nn.Linear(1024, class_N),
torch.nn.Softmax(dim=1)
)
def forward(self, x):
x = self.module(x)
return x
CLS = ['akahara', 'madara']
# get train data
def data_load(path, hf=False, vf=False, rot=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
for i, cls in enumerate(CLS):
if cls in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot != False:
angle = rot
scale = 1
# show
a_num = 360 // rot
w_num = np.ceil(np.sqrt(a_num))
h_num = np.ceil(a_num / w_num)
count = 1
#plt.subplot(h_num, w_num, count)
#plt.axis('off')
#plt.imshow(x)
#plt.title("angle=0")
while angle < 360:
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(_x)
ts.append(t)
paths.append(path)
# show
#count += 1
#plt.subplot(h_num, w_num, count)
#plt.imshow(_x)
#plt.axis('off')
#plt.title("angle={}".format(angle))
angle += rot
#plt.show()
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = MobileNet_v1().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
model.train()
xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True, rot=10)
# training
mb = 32
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
loss_fn = torch.nn.CNLLLoss()
for i in range(500):
if mbi + mb > len(xs):
mb_ind = copy.copy(train_ind)[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
t = torch.tensor(ts[mb_ind], dtype=torch.long).to(device)
opt.zero_grad()
y = model(x)
#y = F.log_softmax(y, dim=1)
loss = loss_fn(torch.log(y), t)
loss.backward()
opt.step()
pred = y.argmax(dim=1, keepdim=True)
acc = pred.eq(t.view_as(pred)).sum().item() / mb
if (i + 1) % 50 == 0:
print("iter >>", i+1, ', loss >>', loss.item(), ', accuracy >>', acc)
torch.save(model.state_dict(), 'cnn.pt')
# test
def test():
device = torch.device("cuda" if GPU else "cpu")
model = MobileNet_v1().to(device)
model.eval()
model.load_state_dict(torch.load('cnn.pt'))
xs, ts, paths = data_load('../Dataset/test/images/')
for i in range(len(paths)):
x = xs[i]
t = ts[i]
path = paths[i]
x = np.expand_dims(x, axis=0)
x = torch.tensor(x, dtype=torch.float).to(device)
pred = model(x)
pred = F.softmax(pred, dim=1).detach().cpu().numpy()[0]
print("in {}, predicted probabilities >> {}".format(path, pred))
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
| [
"torch.nn.ReLU",
"numpy.sqrt",
"torch.nn.Sequential",
"torch.nn.CNLLLoss",
"numpy.array",
"copy.copy",
"torch.nn.functional.softmax",
"torch.nn.BatchNorm2d",
"argparse.ArgumentParser",
"numpy.random.seed",
"torch.nn.AdaptiveAvgPool2d",
"glob.glob",
"numpy.ceil",
"cv2.warpAffine",
"cv2.ge... | [((197, 217), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (214, 217), False, 'import torch\n'), ((2898, 2915), 'glob.glob', 'glob', (["(path + '/*')"], {}), "(path + '/*')\n", (2902, 2915), False, 'from glob import glob\n'), ((5086, 5116), 'numpy.array', 'np.array', (['xs'], {'dtype': 'np.float32'}), '(xs, dtype=np.float32)\n', (5094, 5116), True, 'import numpy as np\n'), ((5126, 5152), 'numpy.array', 'np.array', (['ts'], {'dtype': 'np.int'}), '(ts, dtype=np.int)\n', (5134, 5152), True, 'import numpy as np\n'), ((5262, 5300), 'torch.device', 'torch.device', (["('cuda' if GPU else 'cpu')"], {}), "('cuda' if GPU else 'cpu')\n", (5274, 5300), False, 'import torch\n'), ((5603, 5620), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5617, 5620), True, 'import numpy as np\n'), ((5625, 5653), 'numpy.random.shuffle', 'np.random.shuffle', (['train_ind'], {}), '(train_ind)\n', (5642, 5653), True, 'import numpy as np\n'), ((5669, 5688), 'torch.nn.CNLLLoss', 'torch.nn.CNLLLoss', ([], {}), '()\n', (5686, 5688), False, 'import torch\n'), ((6600, 6638), 'torch.device', 'torch.device', (["('cuda' if GPU else 'cpu')"], {}), "('cuda' if GPU else 'cpu')\n", (6612, 6638), False, 'import torch\n'), ((7209, 7274), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CNN implemented with Keras"""'}), "(description='CNN implemented with Keras')\n", (7232, 7274), False, 'import argparse\n'), ((2937, 2958), 'glob.glob', 'glob', (["(dir_path + '/*')"], {}), "(dir_path + '/*')\n", (2941, 2958), False, 'from glob import glob\n'), ((6720, 6740), 'torch.load', 'torch.load', (['"""cnn.pt"""'], {}), "('cnn.pt')\n", (6730, 6740), False, 'import torch\n'), ((6914, 6939), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (6928, 6939), True, 'import numpy as np\n'), ((1511, 1575), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['channel', '(32)'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(2)'}), '(channel, 32, kernel_size=3, padding=1, stride=2)\n', (1526, 1575), False, 'import torch\n'), ((1589, 1613), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1609, 1613), False, 'import torch\n'), ((1627, 1642), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1640, 1642), False, 'import torch\n'), ((2499, 2533), 'torch.nn.AdaptiveAvgPool2d', 'torch.nn.AdaptiveAvgPool2d', (['[1, 1]'], {}), '([1, 1])\n', (2525, 2533), False, 'import torch\n'), ((2569, 2599), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', 'class_N'], {}), '(1024, class_N)\n', (2584, 2599), False, 'import torch\n'), ((2613, 2636), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2629, 2636), False, 'import torch\n'), ((2976, 2992), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2986, 2992), False, 'import cv2\n'), ((5810, 5838), 'numpy.random.shuffle', 'np.random.shuffle', (['train_ind'], {}), '(train_ind)\n', (5827, 5838), True, 'import numpy as np\n'), ((6232, 6244), 'torch.log', 'torch.log', (['y'], {}), '(y)\n', (6241, 6244), False, 'import torch\n'), ((1026, 1055), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*_module'], {}), '(*_module)\n', (1045, 1055), False, 'import torch\n'), ((3902, 3924), 'numpy.ceil', 'np.ceil', (['(a_num / w_num)'], {}), '(a_num / w_num)\n', (3909, 3924), True, 'import numpy as np\n'), ((5771, 5791), 'copy.copy', 'copy.copy', (['train_ind'], {}), '(train_ind)\n', (5780, 5791), False, 'import copy\n'), ((6005, 6048), 'torch.tensor', 'torch.tensor', (['xs[mb_ind]'], {'dtype': 'torch.float'}), '(xs[mb_ind], dtype=torch.float)\n', (6017, 6048), False, 'import torch\n'), ((6072, 6114), 'torch.tensor', 'torch.tensor', (['ts[mb_ind]'], {'dtype': 'torch.long'}), '(ts[mb_ind], dtype=torch.long)\n', (6084, 6114), False, 'import torch\n'), ((6952, 6986), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float'}), '(x, dtype=torch.float)\n', (6964, 6986), False, 'import torch\n'), ((3009, 3047), 'cv2.resize', 'cv2.resize', (['x', '(img_width, img_height)'], {}), '(x, (img_width, img_height))\n', (3019, 3047), False, 'import cv2\n'), ((3862, 3876), 'numpy.sqrt', 'np.sqrt', (['a_num'], {}), '(a_num)\n', (3869, 3876), True, 'import numpy as np\n'), ((4265, 4299), 'numpy.zeros', 'np.zeros', (['(max_side, max_side, _c)'], {}), '((max_side, max_side, _c))\n', (4273, 4299), True, 'import numpy as np\n'), ((4481, 4548), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(max_side / 2, max_side / 2)', 'angle', 'scale'], {}), '((max_side / 2, max_side / 2), angle, scale)\n', (4504, 4548), False, 'import cv2\n'), ((4570, 4614), 'cv2.warpAffine', 'cv2.warpAffine', (['tmp', 'M', '(max_side, max_side)'], {}), '(tmp, M, (max_side, max_side))\n', (4584, 4614), False, 'import cv2\n'), ((579, 670), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['in_dim', 'in_dim'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'in_dim'}), '(in_dim, in_dim, kernel_size=3, padding=1, stride=stride,\n groups=in_dim)\n', (594, 670), False, 'import torch\n'), ((692, 720), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['in_dim'], {}), '(in_dim)\n', (712, 720), False, 'import torch\n'), ((746, 761), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (759, 761), False, 'import torch\n'), ((787, 855), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['in_dim', 'out_dim'], {'kernel_size': '(1)', 'padding': '(0)', 'stride': '(1)'}), '(in_dim, out_dim, kernel_size=1, padding=0, stride=1)\n', (802, 855), False, 'import torch\n'), ((881, 910), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['out_dim'], {}), '(out_dim)\n', (901, 910), False, 'import torch\n'), ((936, 951), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (949, 951), False, 'import torch\n'), ((7046, 7068), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (7055, 7068), True, 'import torch.nn.functional as F\n')] |
import argparse
import numpy as np
from pathlib import Path
import cv2
from model import get_model
from noise_model import get_noise_model
import sys
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
import tensorflow_datasets as tfds
def get_args():
parser = argparse.ArgumentParser(description="Test trained model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--image_dir", type=str, required=True, help="test image dir")
parser.add_argument("--model", type=str, default="srresnet", help="model architecture ('srresnet' or 'unet')")
# parser.add_argument("--weight_file", type=str, required=True, help="trained weight file")
parser.add_argument("--test_noise_model", type=str, default="gaussian,25,25", help="noise model for test images")
parser.add_argument("--output_dir", type=str, default=None, help="if set, save resulting images otherwise show result using imshow")
args = parser.parse_args()
return args
def get_image(image):
image = np.clip(image, 0, 255)
return image.astype(dtype=np.uint8)
def main():
height = 512
width = 512
noise = 'gauss'
mode = 'clean'
args = get_args()
image_dir = args.image_dir
weight_file = 'weights_{}_{}.hdf5'.format(noise, mode) #args.weight_file
if mode != 'clean':
val_noise_model = get_noise_model(args.test_noise_model)
else:
model = get_model(height, width, args.model)
model.load_weights(weight_file)
model.summary()
# saved_model
tf.saved_model.save(model, 'saved_model_{}_{}_{}_{}x{}'.format(args.model, noise, mode, height, width))
# pb
full_model = tf.function(lambda inputs: model(inputs))
full_model = full_model.get_concrete_function(inputs=[tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)])
frozen_func = convert_variables_to_constants_v2(full_model, lower_control_flow=False)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=".",
name="noise2noise_{}_{}_{}_{}x{}_float32.pb".format(args.model, noise, mode, height, width),
as_text=False)
# No Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_float32.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_model)
print("tflite convert complete! - noise2noise_{}_{}_{}_{}x{}_float32.tflite".format(args.model, noise, mode, height, width))
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_weight_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_model)
print('Weight Quantization complete! - noise2noise_{}_{}_{}_{}x{}_weight_quant.tflite'.format(args.model, noise, mode, height, width))
# Float16 Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_float16_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_quant_model)
print('Float16 Quantization complete! - noise2noise_{}_{}_{}_{}x{}_float16_quant.tflite'.format(args.model, noise, mode, height, width))
def representative_dataset_gen():
for data in raw_test_data.take(10):
image = data['image'].numpy()
image = tf.image.resize(image, (height, width))
image = image[np.newaxis,:,:,:]
# image = image / 127.5 - 1.0
yield [image]
raw_test_data, info = tfds.load(name="coco/2017", with_info=True, split="test", data_dir="~/TFDS", download=False)
# Integer Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_integer_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_quant_model)
print('Integer Quantization complete! - noise2noise_{}_{}_{}_{}x{}_integer_quant.tflite'.format(args.model, noise, mode, height, width))
# Full Integer Quantization - Input/Output=int8
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite'.format(args.model, noise, mode, height, width), 'wb') as w:
w.write(tflite_quant_model)
print('Integer Quantization complete! - noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite'.format(args.model, noise, mode, height, width))
# # EdgeTPU
# import subprocess
# result = subprocess.check_output(["edgetpu_compiler", "-s", "noise2noise_{}_{}_{}_{}x{}_full_integer_quant.tflite".format(args.model, noise, mode, height, width)])
# print(result)
sys.exit(0)
if args.output_dir:
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
image_paths = list(Path(image_dir).glob("*.*"))
for image_path in image_paths:
image = cv2.imread(str(image_path))
h, w, _ = image.shape
image = image[:(h // 16) * 16, :(w // 16) * 16] # for stride (maximum 16)
h, w, _ = image.shape
out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)
noise_image = val_noise_model(image)
pred = model.predict(np.expand_dims(noise_image, 0))
denoised_image = get_image(pred[0])
out_image[:, :w] = image
out_image[:, w:w * 2] = noise_image
out_image[:, w * 2:] = denoised_image
if args.output_dir:
cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + ".png", out_image)
else:
cv2.imshow("result", out_image)
key = cv2.waitKey(-1)
# "q": quit
if key == 113:
return 0
if __name__ == '__main__':
main()
| [
"numpy.clip",
"sys.exit",
"argparse.ArgumentParser",
"pathlib.Path",
"tensorflow.image.resize",
"tensorflow_datasets.load",
"noise_model.get_noise_model",
"cv2.imshow",
"tensorflow.TensorSpec",
"numpy.zeros",
"tensorflow.lite.TFLiteConverter.from_keras_model",
"tensorflow.python.framework.conv... | [((335, 453), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test trained model"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Test trained model', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (358, 453), False, 'import argparse\n'), ((1122, 1144), 'numpy.clip', 'np.clip', (['image', '(0)', '(255)'], {}), '(image, 0, 255)\n', (1129, 1144), True, 'import numpy as np\n'), ((1950, 2021), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_variables_to_constants_v2', (['full_model'], {'lower_control_flow': '(False)'}), '(full_model, lower_control_flow=False)\n', (1983, 2021), False, 'from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\n'), ((2373, 2420), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (2413, 2420), True, 'import tensorflow as tf\n'), ((2804, 2851), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (2844, 2851), True, 'import tensorflow as tf\n'), ((3317, 3364), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (3357, 3364), True, 'import tensorflow as tf\n'), ((4150, 4246), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': '"""coco/2017"""', 'with_info': '(True)', 'split': '"""test"""', 'data_dir': '"""~/TFDS"""', 'download': '(False)'}), "(name='coco/2017', with_info=True, split='test', data_dir='~/TFDS',\n download=False)\n", (4159, 4246), True, 'import tensorflow_datasets as tfds\n'), ((4311, 4358), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (4351, 4358), True, 'import tensorflow as tf\n'), ((4898, 4945), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (4938, 4945), True, 'import tensorflow as tf\n'), ((5837, 5848), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5845, 5848), False, 'import sys\n'), ((1458, 1496), 'noise_model.get_noise_model', 'get_noise_model', (['args.test_noise_model'], {}), '(args.test_noise_model)\n', (1473, 1496), False, 'from noise_model import get_noise_model\n'), ((1523, 1559), 'model.get_model', 'get_model', (['height', 'width', 'args.model'], {}), '(height, width, args.model)\n', (1532, 1559), False, 'from model import get_model\n'), ((5895, 5916), 'pathlib.Path', 'Path', (['args.output_dir'], {}), '(args.output_dir)\n', (5899, 5916), False, 'from pathlib import Path\n'), ((6268, 6307), 'numpy.zeros', 'np.zeros', (['(h, w * 3, 3)'], {'dtype': 'np.uint8'}), '((h, w * 3, 3), dtype=np.uint8)\n', (6276, 6307), True, 'import numpy as np\n'), ((3971, 4010), 'tensorflow.image.resize', 'tf.image.resize', (['image', '(height, width)'], {}), '(image, (height, width))\n', (3986, 4010), True, 'import tensorflow as tf\n'), ((6382, 6412), 'numpy.expand_dims', 'np.expand_dims', (['noise_image', '(0)'], {}), '(noise_image, 0)\n', (6396, 6412), True, 'import numpy as np\n'), ((6728, 6759), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'out_image'], {}), "('result', out_image)\n", (6738, 6759), False, 'import cv2\n'), ((6778, 6793), 'cv2.waitKey', 'cv2.waitKey', (['(-1)'], {}), '(-1)\n', (6789, 6793), False, 'import cv2\n'), ((1870, 1929), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['model.inputs[0].shape', 'model.inputs[0].dtype'], {}), '(model.inputs[0].shape, model.inputs[0].dtype)\n', (1883, 1929), True, 'import tensorflow as tf\n'), ((5995, 6010), 'pathlib.Path', 'Path', (['image_dir'], {}), '(image_dir)\n', (5999, 6010), False, 'from pathlib import Path\n')] |
# Copyright 2021 ETH Zurich and the NPBench authors. All rights reserved.
import numpy as np
def initialize(C_in, C_out, H, K, N, W):
from numpy.random import default_rng
rng = default_rng(42)
# NHWC data layout
input = rng.random((N, H, W, C_in), dtype=np.float32)
# Weights
weights = rng.random((K, K, C_in, C_out), dtype=np.float32)
bias = rng.random((C_out, ), dtype=np.float32)
return input, weights, bias
| [
"numpy.random.default_rng"
] | [((188, 203), 'numpy.random.default_rng', 'default_rng', (['(42)'], {}), '(42)\n', (199, 203), False, 'from numpy.random import default_rng\n')] |
"""Learning how to access an API using Python."""
import numpy as np
import requests
import json
# convert lat/lon/zoom to x/y
def convert_to_xy(lat, lon, zoom):
lat_rad = np.radians(lat)
n = 2.0 ** zoom
x = int((lon + 180.0) / 360.0 * n)
y = int((1.0 - np.arcsinh(np.tan(lat_rad)) / np.pi) / 2.0 * n)
return x, y
# website address
url = "https://accessibility-cloud.freetls.fastly.net/place-infos.json"
# load API key
f = open("./.apptoken", 'r')
api_key = f.read()
api_key = api_key.replace("\n", "")
f.close()
# determine scraping area in x/y/zoom coordinates (https://developers.planet.com/tutorials/slippy-maps-101/)
# view tiles via "https://a.tile.openstreetmap.org/<ZOOM>/<X>/<Y>.png"
# NOTE: zoom needs to be adapted such that totalFeatureCount is smaller than 1000
w_lat = 52.5007919
w_lon = 13.2839193
s_lat = 52.4759806
s_lon = 13.3650726
o_lat = 52.5028779
o_lon = 13.4696738
n_lat = 52.5491748
n_lon = 13.3900758
zoom = 15
x_min, _ = convert_to_xy(w_lat, w_lon, zoom)
_, y_min = convert_to_xy(n_lat, n_lon, zoom)
x_max, _ = convert_to_xy(o_lat, o_lon, zoom)
_, y_max = convert_to_xy(s_lat, s_lon, zoom)
x_coords = np.arange(x_min, x_max)
y_coords = np.arange(y_min, y_max)
# loop over coordinates
data = {}
obj_num = 0
print("Number of requests: %d" % (len(x_coords) * len(y_coords)))
for x in x_coords:
for y in y_coords:
# request parameters
params = {
"appToken": api_key,
"x": x,
"y": y,
"z": zoom,
}
# make API request
r = requests.get(url=url, params=params)
# successful request
if r.status_code == 200:
r_data = r.json()
# check if all objects were scraped
if r_data['featureCount'] != r_data['totalFeatureCount']:
raise RuntimeWarning("WARNING: Not all objects included. Increase zoom level!")
# add scraped data to dict
dict_key = '%d/%d/%d' % (zoom, x, y)
data[dict_key] = r_data
# count number of scrapped objects
obj_num += r_data['featureCount']
print("Request for tile %s successful." % dict_key)
# unsuccessful request
else:
raise RuntimeWarning("WARNING: Request for tile %d/%d/%d failed!" % (zoom, x, y))
# report number of scrapped objects
print("Objects found: %d" % obj_num)
# save data
out_file = "api_data.json"
f = open(out_file, 'w')
json.dump(data, f)
f.close() | [
"numpy.radians",
"numpy.tan",
"numpy.arange",
"requests.get",
"json.dump"
] | [((1163, 1186), 'numpy.arange', 'np.arange', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (1172, 1186), True, 'import numpy as np\n'), ((1198, 1221), 'numpy.arange', 'np.arange', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (1207, 1221), True, 'import numpy as np\n'), ((2512, 2530), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (2521, 2530), False, 'import json\n'), ((179, 194), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (189, 194), True, 'import numpy as np\n'), ((1574, 1610), 'requests.get', 'requests.get', ([], {'url': 'url', 'params': 'params'}), '(url=url, params=params)\n', (1586, 1610), False, 'import requests\n'), ((284, 299), 'numpy.tan', 'np.tan', (['lat_rad'], {}), '(lat_rad)\n', (290, 299), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
# q3. a
# (1)
def cal_log(x, y, sigma):
return (-1/(np.pi * np.power(sigma, 4)))\
* (1-(np.power(x, 2)+np.power(y, 2))/(2*np.power(sigma, 2)))\
* np.exp(-(np.power(x, 2)+np.power(y, 2))/(2*np.power(sigma, 2)))
def generate_log_kernel(sigma, tp):
max = cal_log(0, 0, sigma)
threshold = np.abs(max * tp)
y = 0
x = 0
while np.abs(cal_log(x, y, sigma)) > threshold:
x += 1
print(x)
height, width = 2*x-1, 2*x-1
kernel = np.zeros((height, width))
for i in range(x):
for j in range(x):
entry_val = cal_log(i, j, sigma)
kernel[-i+x-1, -j+x-1] = entry_val
kernel[i+x-1, -j+x-1] = entry_val
kernel[-i+x-1, j+x-1] = entry_val
kernel[i+x-1, j+x-1] = entry_val
return kernel
def svd(M):
u, s, v = np.linalg.svd(M)
s = s.tolist()
if(np.nonzero(s)) != 1:
return False
return True
# q3. b
# (2)
def cal_gaussian(mean, sigma, x):
result = np.divide(1, np.sqrt(2 * np.pi * np.power(sigma, 2))) * \
np.exp(- np.divide(np.power((x-mean), 2), (2 * np.power(sigma, 2))))
return result
def generate_gaussian_kernel(sigma, k, increment):
x_list = np.arange(-k, k, increment)
mean = np.mean(x_list)
y_list = []
for x in x_list:
y_list.append(cal_gaussian(mean, sigma, x))
return y_list
def cal_1d_log(sigma, x):
result = (np.divide(np.power(x, 2), np.power(sigma, 4))- np.divide(1, np.power(sigma, 2))) * \
np.exp(- np.divide(np.power(x, 2), (2 * np.power(sigma, 2)))) * \
np.divide(1, np.sqrt(2 * np.pi * np.power(sigma, 2)))
return result
def generate_1d_log_kernel(sigma, k, increment):
x_list = np.arange(-k, k, increment)
y_list = []
for x in x_list:
y_list.append(cal_1d_log(sigma, x))
return y_list, x_list
def cal_dog(g1, g2):
return np.array(g1)-np.array(g2)
# cite: http://kestrel.nmt.edu/~raymond/software/python_notes/paper004.html
def plot(dog, log, x_list, i):
x = x_list
a = dog
b = log
plt.plot(x, a, 'b-', label='DOG')
plt.plot(x, b, 'r--', label='LOG')
plt.legend(loc='upper left')
plt.xlabel('X')
plt.ylabel('Value')
plt.title("Scale(increasing) {}".format(i))
def plot_multiples(scales, initial_sigma, k, increment):
plt.figure(figsize=(20, 10))
i = 0
for scale in scales:
print("Iteration: {}".format(i+1))
gaussian_kernel_2 = generate_gaussian_kernel(initial_sigma, k, increment)
print("Gaussian kernel 2's length is: {}".format(len(gaussian_kernel_2)))
gaussian_kernel_1 = generate_gaussian_kernel(scale*initial_sigma, k, increment)
print("Gaussian kernel 1's length is: {}".format(len(gaussian_kernel_1)))
log_1d_kernel, x_list = generate_1d_log_kernel(scale*initial_sigma, k, increment)
print("1d log kernel's length is: {}".format(len(log_1d_kernel)))
dog = cal_dog(gaussian_kernel_1, gaussian_kernel_2)
plt.subplot(np.ceil(np.sqrt(len(scales))), np.floor(np.sqrt(len(scales))), i + 1)
plot(dog, log_1d_kernel, x_list, i + 1)
i += 1
if __name__ == '__main__':
start = datetime.now()
kernel = generate_log_kernel(1.4, 0.02)
print(kernel)
print("Can be svd? {}".format(svd(kernel)))
scales = np.arange(1, 3, 0.2)
plot_multiples(scales, 1, 15, 0.1)
plt.title("Initial sigma = 1")
plot_multiples(scales, 2, 15, 0.1)
plt.title("Initial sigma = 5")
plt.tight_layout()
plt.show()
print(datetime.now() - start)
| [
"numpy.abs",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"datetime.datetime.now",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"numpy.nonzero",
"num... | [((404, 420), 'numpy.abs', 'np.abs', (['(max * tp)'], {}), '(max * tp)\n', (410, 420), True, 'import numpy as np\n'), ((568, 593), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (576, 593), True, 'import numpy as np\n'), ((919, 935), 'numpy.linalg.svd', 'np.linalg.svd', (['M'], {}), '(M)\n', (932, 935), True, 'import numpy as np\n'), ((1299, 1326), 'numpy.arange', 'np.arange', (['(-k)', 'k', 'increment'], {}), '(-k, k, increment)\n', (1308, 1326), True, 'import numpy as np\n'), ((1338, 1353), 'numpy.mean', 'np.mean', (['x_list'], {}), '(x_list)\n', (1345, 1353), True, 'import numpy as np\n'), ((1816, 1843), 'numpy.arange', 'np.arange', (['(-k)', 'k', 'increment'], {}), '(-k, k, increment)\n', (1825, 1843), True, 'import numpy as np\n'), ((2163, 2196), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'a', '"""b-"""'], {'label': '"""DOG"""'}), "(x, a, 'b-', label='DOG')\n", (2171, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2235), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'b', '"""r--"""'], {'label': '"""LOG"""'}), "(x, b, 'r--', label='LOG')\n", (2209, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2268), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2250, 2268), True, 'import matplotlib.pyplot as plt\n'), ((2273, 2288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (2283, 2288), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2312), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (2303, 2312), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2452), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2434, 2452), True, 'import matplotlib.pyplot as plt\n'), ((3282, 3296), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3294, 3296), False, 'from datetime import datetime\n'), ((3420, 3440), 'numpy.arange', 'np.arange', (['(1)', '(3)', '(0.2)'], {}), '(1, 3, 0.2)\n', (3429, 3440), True, 'import numpy as np\n'), ((3484, 3514), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial sigma = 1"""'], {}), "('Initial sigma = 1')\n", (3493, 3514), True, 'import matplotlib.pyplot as plt\n'), ((3558, 3588), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial sigma = 5"""'], {}), "('Initial sigma = 5')\n", (3567, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3593, 3611), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3609, 3611), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3626), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3624, 3626), True, 'import matplotlib.pyplot as plt\n'), ((962, 975), 'numpy.nonzero', 'np.nonzero', (['s'], {}), '(s)\n', (972, 975), True, 'import numpy as np\n'), ((1985, 1997), 'numpy.array', 'np.array', (['g1'], {}), '(g1)\n', (1993, 1997), True, 'import numpy as np\n'), ((1998, 2010), 'numpy.array', 'np.array', (['g2'], {}), '(g2)\n', (2006, 2010), True, 'import numpy as np\n'), ((3637, 3651), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3649, 3651), False, 'from datetime import datetime\n'), ((147, 165), 'numpy.power', 'np.power', (['sigma', '(4)'], {}), '(sigma, 4)\n', (155, 165), True, 'import numpy as np\n'), ((298, 316), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (306, 316), True, 'import numpy as np\n'), ((1116, 1134), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (1124, 1134), True, 'import numpy as np\n'), ((1165, 1186), 'numpy.power', 'np.power', (['(x - mean)', '(2)'], {}), '(x - mean, 2)\n', (1173, 1186), True, 'import numpy as np\n'), ((1513, 1527), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (1521, 1527), True, 'import numpy as np\n'), ((1529, 1547), 'numpy.power', 'np.power', (['sigma', '(4)'], {}), '(sigma, 4)\n', (1537, 1547), True, 'import numpy as np\n'), ((1563, 1581), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (1571, 1581), True, 'import numpy as np\n'), ((1713, 1731), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (1721, 1731), True, 'import numpy as np\n'), ((186, 200), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (194, 200), True, 'import numpy as np\n'), ((201, 215), 'numpy.power', 'np.power', (['y', '(2)'], {}), '(y, 2)\n', (209, 215), True, 'import numpy as np\n'), ((220, 238), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (228, 238), True, 'import numpy as np\n'), ((264, 278), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (272, 278), True, 'import numpy as np\n'), ((279, 293), 'numpy.power', 'np.power', (['y', '(2)'], {}), '(y, 2)\n', (287, 293), True, 'import numpy as np\n'), ((1193, 1211), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (1201, 1211), True, 'import numpy as np\n'), ((1620, 1634), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (1628, 1634), True, 'import numpy as np\n'), ((1641, 1659), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (1649, 1659), True, 'import numpy as np\n')] |
from __future__ import print_function
import pytest
from hashlib import sha256
import json
import re
import sys
import numpy as np
import flowpipe.utilities as util
class WeirdObject(object):
"""An object that is not json serializable and has no bytes() interface."""
foo = "bar"
def test_node_encoder():
"""Test the custom JSONEncoder."""
valid_object = {"key": "value", "other_key": [1, 2, 3]}
json_string = json.dumps(valid_object)
recovered_json = json.loads(json_string)
for k, v in valid_object.items():
assert v == recovered_json[k]
bytes_object = {"key": "value", "other_key": bytes(42)}
json_string = json.dumps(bytes_object, cls=util.NodeEncoder)
recovered_json = json.loads(json_string)
for k, v in bytes_object.items():
assert v == recovered_json[k] \
or sha256(v).hexdigest() == recovered_json[k]
weird_object = {"key": "value", "other_key": WeirdObject()}
json_string = json.dumps(weird_object, cls=util.NodeEncoder)
recovered_json = json.loads(json_string)
for k, v in weird_object.items():
assert v == recovered_json[k] \
or re.search('WeirdObject object at', str(recovered_json[k])) \
or sha256(v).hexdigest() == recovered_json[k]
weird_np_array = {"key": "value", "other_key": np.arange(10)[::2]}
json_string = json.dumps(weird_np_array, cls=util.NodeEncoder)
recovered_json = json.loads(json_string)
for k, v in weird_np_array.items():
assert v == recovered_json[k]\
or sha256(bytes(v)).hexdigest() == recovered_json[k]
def test_get_hash():
"""Test the hashing function."""
number = 42
assert util.get_hash(number) == '73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049'
js = {"foo": "bar", "baz": {"zoom": "zulu"}}
assert util.get_hash(js) == '8336ea0f6e482df0c7a738c83a2b8d3357cf0234c29cfd232fa6627bdc54289e'
invalid_js = "kazoo{" # A generic string that's not json
if sys.version_info.major > 2:
assert util.get_hash(invalid_js) == 'c21e3435e752b72514e34139f116afee1f72cf496c1cc94c9087088c139dfb7d'
else:
assert util.get_hash(invalid_js) == '5324bcf2641f119108d1f99b92687b0af513e572c68dfed217344ffeff1f35a9'
x = WeirdObject()
assert util.get_hash(x) is None
| [
"hashlib.sha256",
"json.loads",
"json.dumps",
"flowpipe.utilities.get_hash",
"numpy.arange"
] | [((439, 463), 'json.dumps', 'json.dumps', (['valid_object'], {}), '(valid_object)\n', (449, 463), False, 'import json\n'), ((485, 508), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (495, 508), False, 'import json\n'), ((664, 710), 'json.dumps', 'json.dumps', (['bytes_object'], {'cls': 'util.NodeEncoder'}), '(bytes_object, cls=util.NodeEncoder)\n', (674, 710), False, 'import json\n'), ((732, 755), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (742, 755), False, 'import json\n'), ((975, 1021), 'json.dumps', 'json.dumps', (['weird_object'], {'cls': 'util.NodeEncoder'}), '(weird_object, cls=util.NodeEncoder)\n', (985, 1021), False, 'import json\n'), ((1043, 1066), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1053, 1066), False, 'import json\n'), ((1369, 1417), 'json.dumps', 'json.dumps', (['weird_np_array'], {'cls': 'util.NodeEncoder'}), '(weird_np_array, cls=util.NodeEncoder)\n', (1379, 1417), False, 'import json\n'), ((1439, 1462), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1449, 1462), False, 'import json\n'), ((1694, 1715), 'flowpipe.utilities.get_hash', 'util.get_hash', (['number'], {}), '(number)\n', (1707, 1715), True, 'import flowpipe.utilities as util\n'), ((1847, 1864), 'flowpipe.utilities.get_hash', 'util.get_hash', (['js'], {}), '(js)\n', (1860, 1864), True, 'import flowpipe.utilities as util\n'), ((2299, 2315), 'flowpipe.utilities.get_hash', 'util.get_hash', (['x'], {}), '(x)\n', (2312, 2315), True, 'import flowpipe.utilities as util\n'), ((1331, 1344), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1340, 1344), True, 'import numpy as np\n'), ((2048, 2073), 'flowpipe.utilities.get_hash', 'util.get_hash', (['invalid_js'], {}), '(invalid_js)\n', (2061, 2073), True, 'import flowpipe.utilities as util\n'), ((2169, 2194), 'flowpipe.utilities.get_hash', 'util.get_hash', (['invalid_js'], {}), '(invalid_js)\n', (2182, 2194), True, 'import flowpipe.utilities as util\n'), ((849, 858), 'hashlib.sha256', 'sha256', (['v'], {}), '(v)\n', (855, 858), False, 'from hashlib import sha256\n'), ((1236, 1245), 'hashlib.sha256', 'sha256', (['v'], {}), '(v)\n', (1242, 1245), False, 'from hashlib import sha256\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from unittest import TestCase
from unittest.mock import Mock, patch
from collections import OrderedDict
import numpy as np
from abc import abstractmethod
import pickle
import mxnet as mx
import os
from xfer import MetaModelRepurposer, SvmRepurposer, BnnRepurposer, GpRepurposer, load
from ..repurposer_test_utils import RepurposerTestUtils
@patch(RepurposerTestUtils.META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS)
class MetaModelRepurposerTestCase(TestCase):
_test_data_dir = 'tests/data/meta_model_repurposer_data/'
def setUp(self):
self.repurposer_class = MetaModelRepurposer
self.source_model = RepurposerTestUtils.create_mxnet_module()
self.source_model_layer_1 = RepurposerTestUtils.LAYER_FC1
self.source_model_layer_2 = RepurposerTestUtils.LAYER_FC2
self.source_model_layers = [self.source_model_layer_1, self.source_model_layer_2]
# Load data (features and labels) to run tests
self.features = np.loadtxt(self._test_data_dir + '_all_features.out')
self.labels = np.loadtxt(self._test_data_dir + '_labels.out')
self.n_classes = len(np.unique(self.labels))
self._train_indices = np.loadtxt(self._test_data_dir + '_train_indices.out').astype(int)
self._test_indices = np.loadtxt(self._test_data_dir + '_test_indices.out').astype(int)
self.n_test_instances = len(self._test_indices)
self.train_features = self.features[self._train_indices]
self.train_labels = self.labels[self._train_indices]
self.test_features = self.features[self._test_indices]
self.test_labels = self.labels[self._test_indices]
self.train_feature_dict = {'layer1': self.train_features}
self.test_feature_dict = {'layer1': self.test_features}
self.mock_object = Mock()
# Overridden in derived classes
self.target_model_path = None
self.expected_accuracy = None
self.minimum_expected_accuracy = None
def test_instantiation_valid_input(self, mock_model_handler):
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
feature_layer_names_in_source_model = [self.source_model_layer_1]
repurposer = self.repurposer_class(self.source_model, feature_layer_names_in_source_model)
self.assertTrue(repurposer.source_model == self.source_model)
self.assertTrue(repurposer.feature_layer_names == feature_layer_names_in_source_model)
def test_instantiation_source_model_is_none(self, mock_model_handler):
source_model_none = None
mock_feature_layer_names = Mock()
self.assertRaisesRegex(TypeError, "source_model must be a valid `mxnet.mod.Module` object",
self.repurposer_class, source_model_none, mock_feature_layer_names)
def test_instantiation_feature_layer_names_empty(self, mock_model_handler):
# Empty feature_layer_names list raises ValueError
feature_layer_names_empty = []
self.assertRaisesRegex(ValueError, "feature_layer_names cannot be empty",
self.repurposer_class, self.source_model, feature_layer_names_empty)
def test_instantiation_feature_layer_names_invalid_type(self, mock_model_handler):
# feature_layer_names not being a list raises TypeError
feature_layer_names_int = 1
self.assertRaisesRegex(TypeError, "feature_layer_names must be a list",
self.repurposer_class, self.source_model, feature_layer_names_int)
feature_layer_names_str = ''
self.assertRaisesRegex(TypeError, "feature_layer_names must be a list",
self.repurposer_class, self.source_model, feature_layer_names_str)
feature_layer_names_dict = {'': ''}
self.assertRaisesRegex(TypeError, "feature_layer_names must be a list",
self.repurposer_class, self.source_model, feature_layer_names_dict)
def test_instantiation_feature_layer_names_not_in_source_model(self, mock_model_handler):
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
# Some feature_layer_names not found in source_model
feature_layer_names_some_not_in_source_model = [self.source_model_layer_1, 'phantom_layer_2']
self.assertRaisesRegex(ValueError, "feature_layer_name 'phantom_layer_2' is not found in source_model",
self.repurposer_class, self.source_model, feature_layer_names_some_not_in_source_model)
# All feature_layer_names not found in source_model
feature_layer_names_all_not_in_source_model = ['phantom_layer_1', 'phantom_layer_2']
self.assertRaisesRegex(ValueError, "feature_layer_name 'phantom_layer_1' is not found in source_model",
self.repurposer_class, self.source_model, feature_layer_names_all_not_in_source_model)
def test_validate_before_predict(self, mock_model_handler):
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
# Create meta model repurposer object
repurposer = self.repurposer_class(self.source_model, self.source_model_layers)
# Target model is neither created through repurpose nor explicitly assigned
# So, calling predict should raise ValueError
self.assertRaisesRegex(ValueError, "Cannot predict because target_model is not initialized",
repurposer._validate_before_predict)
# Test valid input
repurposer.target_model = RepurposerTestUtils.create_mxnet_module()
repurposer._validate_before_predict()
def test_get_features_from_source_model(self, mock_model_handler):
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
# Dummy layer outputs to test with
layer1_output = np.array([[1.1, 1.2, 1.3], [1.4, 1.5, 1.6], [1.7, 1.8, 1.9]])
layer2_output = np.array([[2.1, 2.2], [2.3, 2.4], [2.5, 2.6]])
# Test with 1 feature layer
feature_dict = OrderedDict()
feature_dict['layer1'] = layer1_output
expected_feature_indices = OrderedDict()
expected_feature_indices['layer1'] = np.array([0, 1, 2])
expected_features = layer1_output
self._test_get_features_from_source_model(mock_model_handler, feature_dict, expected_features,
expected_feature_indices)
# Test with 2 feature layers: layer1, layer2
feature_dict = OrderedDict()
feature_dict['layer1'] = layer1_output
feature_dict['layer2'] = layer2_output
expected_feature_indices = OrderedDict()
expected_feature_indices['layer1'] = np.array([0, 1, 2])
expected_feature_indices['layer2'] = np.array([3, 4])
expected_features = np.array([[1.1, 1.2, 1.3, 2.1, 2.2], [1.4, 1.5, 1.6, 2.3, 2.4], [1.7, 1.8, 1.9, 2.5, 2.6]])
self._test_get_features_from_source_model(mock_model_handler, feature_dict, expected_features,
expected_feature_indices)
# Test with 2 feature layers: layer2, layer1
feature_dict = OrderedDict()
feature_dict['layer2'] = layer2_output
feature_dict['layer1'] = layer1_output
expected_feature_indices = OrderedDict()
expected_feature_indices['layer2'] = np.array([0, 1])
expected_feature_indices['layer1'] = np.array([2, 3, 4])
expected_features = np.array([[2.1, 2.2, 1.1, 1.2, 1.3], [2.3, 2.4, 1.4, 1.5, 1.6], [2.5, 2.6, 1.7, 1.8, 1.9]])
self._test_get_features_from_source_model(mock_model_handler, feature_dict, expected_features,
expected_feature_indices)
def _test_get_features_from_source_model(self, mock_model_handler, feature_dict, expected_features,
expected_feature_indices):
repurposer = self.repurposer_class(self.source_model, self.source_model_layers)
labels_from_model_handler = np.array([0, 1, 0])
mock_model_handler.return_value.get_layer_output.return_value = feature_dict, labels_from_model_handler
meta_model_data = repurposer.get_features_from_source_model(data_iterator=Mock())
self.assertTrue(np.array_equal(meta_model_data.features, expected_features))
self.assertTrue(np.array_equal(meta_model_data.labels, labels_from_model_handler))
RepurposerTestUtils.assert_feature_indices_equal(expected_feature_indices,
meta_model_data.feature_indices_per_layer)
def test_serialisation(self, mock_model_handler):
if self.repurposer_class == MetaModelRepurposer: # base class
return
self._test_save_load_repurposed_model(mock_model_handler, save_source_model=True)
self._test_save_load_repurposed_model(mock_model_handler, save_source_model=False)
def _test_save_load_repurposed_model(self, mock_model_handler, save_source_model):
# To speed-up unit test running time. Accuracy is validated in integration tests.
num_train_points = 2
self.train_features = self.train_features[:num_train_points]
self.train_labels = self.train_labels[:num_train_points]
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
file_path = 'test_serialisation'
RepurposerTestUtils._remove_files_with_prefix(file_path)
source_model = mx.module.Module.load('tests/data/testnetv1', 0, label_names=['softmaxoutput1_label'],
data_names=('data',))
repurposer = self.repurposer_class(source_model, self.source_model_layers)
if self.repurposer_class == BnnRepurposer:
repurposer = BnnRepurposer(source_model, self.source_model_layers, num_epochs=1,
num_samples_mc_prediction=15)
repurposer.target_model = repurposer._train_model_from_features(self.train_features, self.train_labels)
# Manually setting provide_data and provide_label because repurpose() is not called
repurposer.provide_data = [('data', (2, 3, 224, 224))]
repurposer.provide_label = [('softmaxoutput1_label', (2,))]
# Mocking iterator because get_layer_output is patched
mock_model_handler.return_value.get_layer_output.return_value = self.test_feature_dict, self.test_labels
results = repurposer.predict_label(test_iterator=self.mock_object)
assert not os.path.isfile(file_path + '.json')
if save_source_model:
assert not os.path.isfile(file_path + '_source-symbol.json')
assert not os.path.isfile(file_path + '_source-0000.params')
repurposer.save_repurposer(model_name=file_path, save_source_model=save_source_model)
assert os.path.isfile(file_path + '_source-symbol.json')
assert os.path.isfile(file_path + '_source-0000.params')
loaded_repurposer = load(file_path)
else:
repurposer.save_repurposer(model_name=file_path, save_source_model=save_source_model)
loaded_repurposer = load(file_path, source_model=repurposer.source_model)
assert os.path.isfile(file_path + '.json')
RepurposerTestUtils._remove_files_with_prefix(file_path)
results_loaded = loaded_repurposer.predict_label(test_iterator=self.mock_object)
assert type(repurposer) == type(loaded_repurposer)
self._assert_target_model_equal(repurposer.target_model, loaded_repurposer.target_model)
accuracy1 = np.mean(results == self.test_labels)
accuracy2 = np.mean(results_loaded == self.test_labels)
if self.repurposer_class == BnnRepurposer:
assert np.isclose(accuracy1, accuracy2, atol=0.1), 'Inconsistent accuracies: {}, {}.'.format(accuracy1,
accuracy2)
else:
assert accuracy1 == accuracy2, 'Inconsistent accuracies: {}, {}.'.format(accuracy1, accuracy2)
self._assert_attributes_equal(repurposer, loaded_repurposer)
def _assert_target_model_equal(self, model1, model2):
assert model1.__dict__.keys() == model2.__dict__.keys()
for key in model1.__dict__.keys():
if type(model1.__dict__[key]) == np.ndarray:
assert isinstance(model2.__dict__[key], np.ndarray)
assert np.array_equal(model1.__dict__[key], model2.__dict__[key])
elif type(model1.__dict__[key]) == tuple:
assert isinstance(model2.__dict__[key], tuple)
assert list(model1.__dict__[key]) == list(model2.__dict__[key])
else:
assert model1.__dict__[key] == model2.__dict__[key]
def _test_predict(self, mock_model_handler, mock_validate_method, test_predict_probability, expected_accuracy):
""" Test for predict wrapper in meta model base class """
# Patch model_handler and then create repurposer object
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
# Create repurposer
if self.repurposer_class == SvmRepurposer:
repurposer = self.repurposer_class(self.source_model, self.source_model_layers,
enable_probability_estimates=test_predict_probability)
elif self.repurposer_class == GpRepurposer:
repurposer = self.repurposer_class(self.source_model, self.source_model_layers, apply_l2_norm=True)
else:
repurposer = self.repurposer_class(self.source_model, self.source_model_layers)
# Identify which predict function to test
if test_predict_probability:
predict_function = repurposer.predict_probability
else:
predict_function = repurposer.predict_label
# Train or Load target model from file
if self.repurposer_class == GpRepurposer:
num_datapoints_train = 10
mock_model_handler.return_value.get_layer_output.return_value =\
{'l1': self.train_features[:num_datapoints_train]}, self.train_labels[:num_datapoints_train]
repurposer.repurpose(self.mock_object)
else:
with open(self.target_model_path, 'rb') as target_model:
repurposer.target_model = pickle.load(target_model)
# Call predict method and get prediction results
mock_model_handler.return_value.get_layer_output.return_value = self.test_feature_dict, self.test_labels
mock_validate_method.reset_mock()
results = predict_function(
test_iterator=self.mock_object) # Mocking iterator because get_layer_output is patched
# Check if predict called validate
self.assertTrue(mock_validate_method.call_count == 1,
"Predict expected to called {} once. Found {} calls".
format(RepurposerTestUtils.VALIDATE_PREDICT_METHOD_NAME, mock_validate_method.call_count))
self._validate_prediction_results(results, test_predict_probability, expected_accuracy)
def _test_predict_from_features(self, test_predict_probability, expected_accuracy):
""" Used to test 'predict_from_features' implementation in derived classes """
# Create repurposer
if self.repurposer_class == SvmRepurposer:
repurposer = self.repurposer_class(self.source_model, self.source_model_layers,
enable_probability_estimates=test_predict_probability)
else:
repurposer = self.repurposer_class(self.source_model, self.source_model_layers)
# Load target model from file
with open(self.target_model_path, 'rb') as target_model:
repurposer.target_model = pickle.load(target_model)
if test_predict_probability:
results = repurposer._predict_probability_from_features(self.test_features)
else:
results = repurposer._predict_label_from_features(self.test_features)
self._validate_prediction_results(results, test_predict_probability, expected_accuracy)
def _validate_prediction_results(self, results, test_predict_probability, expected_accuracy, num_predictions=None):
if num_predictions is None:
test_labels = self.test_labels
n_test_instances = self.n_test_instances
else:
assert num_predictions < len(self.test_labels), 'More predictions ({}), than labels ({})'.format(
num_predictions, len(self.test_labels))
test_labels = self.test_labels[:num_predictions]
n_test_instances = len(test_labels)
# Validate type of prediction results
self.assertTrue(type(results) == np.ndarray,
"Prediction results expected to be numpy array. Instead got: {}".format(type(results)))
# Validate shape of prediction results
expected_shape = (n_test_instances, self.n_classes) if test_predict_probability else (
n_test_instances,)
self.assertTrue(results.shape == expected_shape,
"Prediction results shape is incorrect. Expected: {}. Got: {}".format(expected_shape,
results.shape))
# Validate if prediction probabilities sum to 1
if test_predict_probability:
probability_sum = np.sum(results, axis=1)
array_of_ones = np.ones(shape=(n_test_instances,))
self.assertTrue(np.allclose(probability_sum, array_of_ones), "Sum of predicted probabilities is not 1")
# Validate accuracy of prediction results
labels = np.argmax(results, axis=1) if test_predict_probability else results
accuracy = np.mean(labels == test_labels)
self.assertTrue(np.isclose(accuracy, expected_accuracy),
"Prediction accuracy is incorrect. Expected: {}. Actual: {}".format(expected_accuracy,
accuracy))
def _run_common_repurposer_tests(self, repurposer):
# Target model is not initialized yet
self.assertTrue(repurposer.target_model is None, "Target model not expected to be initialized at this point")
# Call repurpose
repurposer.repurpose(self.mock_object)
# Validate target model is now set
self.assertTrue(repurposer.target_model is not None, "Repurpose failed to set target model")
# Validate trained model
self._validate_trained_model(repurposer.target_model)
def _test_repurpose_calls_validate(self, mock_model_handler, mock_validate_method):
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
# Use subset of train_feature_dict and train_labels to speed up test
N = 2
train_feature_dict = {'layer1': self.train_feature_dict['layer1'][:N]}
train_labels = self.train_labels[:N]
mock_model_handler.return_value.get_layer_output.return_value = train_feature_dict, train_labels
repurposer = self.repurposer_class(self.source_model, self.source_model_layers)
mock_validate_method.reset_mock()
repurposer.repurpose(self.mock_object)
self.assertTrue(mock_validate_method.call_count == 1,
"Repurpose expected to called {} once. Found {} calls".
format(RepurposerTestUtils.VALIDATE_REPURPOSE_METHOD_NAME, mock_validate_method.call_count))
def _assert_attributes_equal(self, repurposer1, repurposer2):
RepurposerTestUtils._assert_common_attributes_equal(repurposer1, repurposer2)
@abstractmethod
def _validate_trained_model(self, target_model):
pass
| [
"numpy.mean",
"collections.OrderedDict",
"numpy.allclose",
"unittest.mock.Mock",
"numpy.unique",
"numpy.isclose",
"numpy.ones",
"pickle.load",
"numpy.argmax",
"os.path.isfile",
"numpy.array",
"mxnet.module.Module.load",
"numpy.sum",
"numpy.array_equal",
"xfer.load",
"xfer.BnnRepurposer... | [((1017, 1085), 'unittest.mock.patch', 'patch', (['RepurposerTestUtils.META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS'], {}), '(RepurposerTestUtils.META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS)\n', (1022, 1085), False, 'from unittest.mock import Mock, patch\n'), ((1639, 1692), 'numpy.loadtxt', 'np.loadtxt', (["(self._test_data_dir + '_all_features.out')"], {}), "(self._test_data_dir + '_all_features.out')\n", (1649, 1692), True, 'import numpy as np\n'), ((1715, 1762), 'numpy.loadtxt', 'np.loadtxt', (["(self._test_data_dir + '_labels.out')"], {}), "(self._test_data_dir + '_labels.out')\n", (1725, 1762), True, 'import numpy as np\n'), ((2469, 2475), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2473, 2475), False, 'from unittest.mock import Mock, patch\n'), ((3282, 3288), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3286, 3288), False, 'from unittest.mock import Mock, patch\n'), ((6609, 6670), 'numpy.array', 'np.array', (['[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6], [1.7, 1.8, 1.9]]'], {}), '([[1.1, 1.2, 1.3], [1.4, 1.5, 1.6], [1.7, 1.8, 1.9]])\n', (6617, 6670), True, 'import numpy as np\n'), ((6695, 6741), 'numpy.array', 'np.array', (['[[2.1, 2.2], [2.3, 2.4], [2.5, 2.6]]'], {}), '([[2.1, 2.2], [2.3, 2.4], [2.5, 2.6]])\n', (6703, 6741), True, 'import numpy as np\n'), ((6802, 6815), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6813, 6815), False, 'from collections import OrderedDict\n'), ((6898, 6911), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6909, 6911), False, 'from collections import OrderedDict\n'), ((6957, 6976), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (6965, 6976), True, 'import numpy as np\n'), ((7275, 7288), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7286, 7288), False, 'from collections import OrderedDict\n'), ((7418, 7431), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7429, 7431), False, 'from collections import OrderedDict\n'), ((7477, 7496), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (7485, 7496), True, 'import numpy as np\n'), ((7542, 7558), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (7550, 7558), True, 'import numpy as np\n'), ((7587, 7683), 'numpy.array', 'np.array', (['[[1.1, 1.2, 1.3, 2.1, 2.2], [1.4, 1.5, 1.6, 2.3, 2.4], [1.7, 1.8, 1.9, 2.5,\n 2.6]]'], {}), '([[1.1, 1.2, 1.3, 2.1, 2.2], [1.4, 1.5, 1.6, 2.3, 2.4], [1.7, 1.8, \n 1.9, 2.5, 2.6]])\n', (7595, 7683), True, 'import numpy as np\n'), ((7935, 7948), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7946, 7948), False, 'from collections import OrderedDict\n'), ((8078, 8091), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8089, 8091), False, 'from collections import OrderedDict\n'), ((8137, 8153), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (8145, 8153), True, 'import numpy as np\n'), ((8199, 8218), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (8207, 8218), True, 'import numpy as np\n'), ((8247, 8343), 'numpy.array', 'np.array', (['[[2.1, 2.2, 1.1, 1.2, 1.3], [2.3, 2.4, 1.4, 1.5, 1.6], [2.5, 2.6, 1.7, 1.8,\n 1.9]]'], {}), '([[2.1, 2.2, 1.1, 1.2, 1.3], [2.3, 2.4, 1.4, 1.5, 1.6], [2.5, 2.6, \n 1.7, 1.8, 1.9]])\n', (8255, 8343), True, 'import numpy as np\n'), ((8819, 8838), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (8827, 8838), True, 'import numpy as np\n'), ((10291, 10404), 'mxnet.module.Module.load', 'mx.module.Module.load', (['"""tests/data/testnetv1"""', '(0)'], {'label_names': "['softmaxoutput1_label']", 'data_names': "('data',)"}), "('tests/data/testnetv1', 0, label_names=[\n 'softmaxoutput1_label'], data_names=('data',))\n", (10312, 10404), True, 'import mxnet as mx\n'), ((12057, 12092), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.json')"], {}), "(file_path + '.json')\n", (12071, 12092), False, 'import os\n'), ((12423, 12459), 'numpy.mean', 'np.mean', (['(results == self.test_labels)'], {}), '(results == self.test_labels)\n', (12430, 12459), True, 'import numpy as np\n'), ((12480, 12523), 'numpy.mean', 'np.mean', (['(results_loaded == self.test_labels)'], {}), '(results_loaded == self.test_labels)\n', (12487, 12523), True, 'import numpy as np\n'), ((18822, 18852), 'numpy.mean', 'np.mean', (['(labels == test_labels)'], {}), '(labels == test_labels)\n', (18829, 18852), True, 'import numpy as np\n'), ((1792, 1814), 'numpy.unique', 'np.unique', (['self.labels'], {}), '(self.labels)\n', (1801, 1814), True, 'import numpy as np\n'), ((9065, 9124), 'numpy.array_equal', 'np.array_equal', (['meta_model_data.features', 'expected_features'], {}), '(meta_model_data.features, expected_features)\n', (9079, 9124), True, 'import numpy as np\n'), ((9150, 9215), 'numpy.array_equal', 'np.array_equal', (['meta_model_data.labels', 'labels_from_model_handler'], {}), '(meta_model_data.labels, labels_from_model_handler)\n', (9164, 9215), True, 'import numpy as np\n'), ((10604, 10705), 'xfer.BnnRepurposer', 'BnnRepurposer', (['source_model', 'self.source_model_layers'], {'num_epochs': '(1)', 'num_samples_mc_prediction': '(15)'}), '(source_model, self.source_model_layers, num_epochs=1,\n num_samples_mc_prediction=15)\n', (10617, 10705), False, 'from xfer import MetaModelRepurposer, SvmRepurposer, BnnRepurposer, GpRepurposer, load\n'), ((11346, 11381), 'os.path.isfile', 'os.path.isfile', (["(file_path + '.json')"], {}), "(file_path + '.json')\n", (11360, 11381), False, 'import os\n'), ((11676, 11725), 'os.path.isfile', 'os.path.isfile', (["(file_path + '_source-symbol.json')"], {}), "(file_path + '_source-symbol.json')\n", (11690, 11725), False, 'import os\n'), ((11745, 11794), 'os.path.isfile', 'os.path.isfile', (["(file_path + '_source-0000.params')"], {}), "(file_path + '_source-0000.params')\n", (11759, 11794), False, 'import os\n'), ((11827, 11842), 'xfer.load', 'load', (['file_path'], {}), '(file_path)\n', (11831, 11842), False, 'from xfer import MetaModelRepurposer, SvmRepurposer, BnnRepurposer, GpRepurposer, load\n'), ((11987, 12040), 'xfer.load', 'load', (['file_path'], {'source_model': 'repurposer.source_model'}), '(file_path, source_model=repurposer.source_model)\n', (11991, 12040), False, 'from xfer import MetaModelRepurposer, SvmRepurposer, BnnRepurposer, GpRepurposer, load\n'), ((12595, 12637), 'numpy.isclose', 'np.isclose', (['accuracy1', 'accuracy2'], {'atol': '(0.1)'}), '(accuracy1, accuracy2, atol=0.1)\n', (12605, 12637), True, 'import numpy as np\n'), ((16726, 16751), 'pickle.load', 'pickle.load', (['target_model'], {}), '(target_model)\n', (16737, 16751), False, 'import pickle\n'), ((18464, 18487), 'numpy.sum', 'np.sum', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (18470, 18487), True, 'import numpy as np\n'), ((18516, 18550), 'numpy.ones', 'np.ones', ([], {'shape': '(n_test_instances,)'}), '(shape=(n_test_instances,))\n', (18523, 18550), True, 'import numpy as np\n'), ((18735, 18761), 'numpy.argmax', 'np.argmax', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (18744, 18761), True, 'import numpy as np\n'), ((18877, 18916), 'numpy.isclose', 'np.isclose', (['accuracy', 'expected_accuracy'], {}), '(accuracy, expected_accuracy)\n', (18887, 18916), True, 'import numpy as np\n'), ((1846, 1900), 'numpy.loadtxt', 'np.loadtxt', (["(self._test_data_dir + '_train_indices.out')"], {}), "(self._test_data_dir + '_train_indices.out')\n", (1856, 1900), True, 'import numpy as np\n'), ((1942, 1995), 'numpy.loadtxt', 'np.loadtxt', (["(self._test_data_dir + '_test_indices.out')"], {}), "(self._test_data_dir + '_test_indices.out')\n", (1952, 1995), True, 'import numpy as np\n'), ((9033, 9039), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (9037, 9039), False, 'from unittest.mock import Mock, patch\n'), ((11436, 11485), 'os.path.isfile', 'os.path.isfile', (["(file_path + '_source-symbol.json')"], {}), "(file_path + '_source-symbol.json')\n", (11450, 11485), False, 'import os\n'), ((11509, 11558), 'os.path.isfile', 'os.path.isfile', (["(file_path + '_source-0000.params')"], {}), "(file_path + '_source-0000.params')\n", (11523, 11558), False, 'import os\n'), ((13313, 13371), 'numpy.array_equal', 'np.array_equal', (['model1.__dict__[key]', 'model2.__dict__[key]'], {}), '(model1.__dict__[key], model2.__dict__[key])\n', (13327, 13371), True, 'import numpy as np\n'), ((15258, 15283), 'pickle.load', 'pickle.load', (['target_model'], {}), '(target_model)\n', (15269, 15283), False, 'import pickle\n'), ((18579, 18622), 'numpy.allclose', 'np.allclose', (['probability_sum', 'array_of_ones'], {}), '(probability_sum, array_of_ones)\n', (18590, 18622), True, 'import numpy as np\n')] |
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import cv2
class pose:
# constructor
def __init__(self):
self.TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
self.trt_runtime = trt.Runtime(self.TRT_LOGGER)
self.load_engine('donModel.plan')
self.allocate_buffers(trt.float32)
def load_engine(self, plan_path):
with open(plan_path, 'rb') as f:
engine_data = f.read()
self.engine = self.trt_runtime.deserialize_cuda_engine(engine_data)
def allocate_buffers(self, data_type):
# Determine dimensions and create page-locked memory buffers (which won't be swapped to disk) to hold host inputs/outputs.
self.h_input = cuda.pagelocked_empty(trt.volume(self.engine.get_binding_shape(0)), dtype=trt.nptype(data_type))
self.h_output = cuda.pagelocked_empty(trt.volume(self.engine.get_binding_shape(1)), dtype=trt.nptype(data_type))
# Allocate device memory for inputs and outputs.
self.d_input = cuda.mem_alloc(self.h_input.nbytes)
self.d_output = cuda.mem_alloc(self.h_output.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
self.stream = cuda.Stream()
def do_inference(self, data):
preprocessed = np.asarray(data).ravel()
np.copyto(self.h_input, preprocessed)
with self.engine.create_execution_context() as context:
# Transfer input data to the GPU.
cuda.memcpy_htod_async(self.d_input, self.h_input, self.stream)
# Run inference.
context.profiler = trt.Profiler()
context.execute(batch_size=1, bindings=[int(self.d_input), int(self.d_output)])
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(self.h_output, self.d_output, self.stream)
# Synchronize the stream
self.stream.synchronize()
# Return the host output.
out = self.h_output
return out
def detection(self, depth_data):
# perform pose detection
depth_data = (depth_data/65536).astype(np.float32)
out = self.do_inference(depth_data)
print(out)
def main():
pose_detect = pose()
# load depth data as np array
input_file_path = 'data.npy'
depth_data = np.load(input_file_path)
print(depth_data.shape)
pose_detect.detection(depth_data)
if __name__ == '__main__':
main()
| [
"numpy.copyto",
"tensorrt.nptype",
"pycuda.driver.mem_alloc",
"tensorrt.Profiler",
"pycuda.driver.Stream",
"numpy.asarray",
"pycuda.driver.memcpy_htod_async",
"tensorrt.Logger",
"tensorrt.Runtime",
"numpy.load",
"pycuda.driver.memcpy_dtoh_async"
] | [((2420, 2444), 'numpy.load', 'np.load', (['input_file_path'], {}), '(input_file_path)\n', (2427, 2444), True, 'import numpy as np\n'), ((188, 218), 'tensorrt.Logger', 'trt.Logger', (['trt.Logger.WARNING'], {}), '(trt.Logger.WARNING)\n', (198, 218), True, 'import tensorrt as trt\n'), ((246, 274), 'tensorrt.Runtime', 'trt.Runtime', (['self.TRT_LOGGER'], {}), '(self.TRT_LOGGER)\n', (257, 274), True, 'import tensorrt as trt\n'), ((1059, 1094), 'pycuda.driver.mem_alloc', 'cuda.mem_alloc', (['self.h_input.nbytes'], {}), '(self.h_input.nbytes)\n', (1073, 1094), True, 'import pycuda.driver as cuda\n'), ((1119, 1155), 'pycuda.driver.mem_alloc', 'cuda.mem_alloc', (['self.h_output.nbytes'], {}), '(self.h_output.nbytes)\n', (1133, 1155), True, 'import pycuda.driver as cuda\n'), ((1255, 1268), 'pycuda.driver.Stream', 'cuda.Stream', ([], {}), '()\n', (1266, 1268), True, 'import pycuda.driver as cuda\n'), ((1372, 1409), 'numpy.copyto', 'np.copyto', (['self.h_input', 'preprocessed'], {}), '(self.h_input, preprocessed)\n', (1381, 1409), True, 'import numpy as np\n'), ((1534, 1597), 'pycuda.driver.memcpy_htod_async', 'cuda.memcpy_htod_async', (['self.d_input', 'self.h_input', 'self.stream'], {}), '(self.d_input, self.h_input, self.stream)\n', (1556, 1597), True, 'import pycuda.driver as cuda\n'), ((1659, 1673), 'tensorrt.Profiler', 'trt.Profiler', ([], {}), '()\n', (1671, 1673), True, 'import tensorrt as trt\n'), ((1833, 1898), 'pycuda.driver.memcpy_dtoh_async', 'cuda.memcpy_dtoh_async', (['self.h_output', 'self.d_output', 'self.stream'], {}), '(self.h_output, self.d_output, self.stream)\n', (1855, 1898), True, 'import pycuda.driver as cuda\n'), ((835, 856), 'tensorrt.nptype', 'trt.nptype', (['data_type'], {}), '(data_type)\n', (845, 856), True, 'import tensorrt as trt\n'), ((956, 977), 'tensorrt.nptype', 'trt.nptype', (['data_type'], {}), '(data_type)\n', (966, 977), True, 'import tensorrt as trt\n'), ((1339, 1355), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1349, 1355), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoder-only language model configurations."""
import math
from typing import List
import jax
import jax.numpy as jnp
from lingvo.jax import base_input
from lingvo.jax import base_model_params
from lingvo.jax import layers
from lingvo.jax import model
from lingvo.jax import model_registry
from lingvo.jax import optimizers
from lingvo.jax import py_utils
from lingvo.jax import schedules
from lingvo.jax.tasks.lm import input_generator
import numpy as np
InstantiableParams = py_utils.InstantiableParams
NestedMap = py_utils.NestedMap
WeightInit = py_utils.WeightInit
class SyntheticDataset(base_model_params.BaseModelParams):
"""Synthetic LM dataset."""
PERCORE_BATCH_SIZE = 16
MAX_SEQ_LEN = 1024
def _dataset_common(self, is_training) -> InstantiableParams:
num_local_devices = jax.local_device_count()
batch_size = self.PERCORE_BATCH_SIZE * num_local_devices
input_p = input_generator.SyntheticLmData.Params()
if is_training:
input_p.batch_size = batch_size
else:
# TODO(zhangqiaorjc): Is this batch size too big for test?
input_p.batch_size = batch_size
input_p.seq_len = self.MAX_SEQ_LEN
p = base_input.LingvoInputAdaptor.Params().Set(
input=input_p, is_training=is_training)
return p
def datasets(self) -> List[InstantiableParams]:
"""Returns a list of dataset parameters."""
return [
self._dataset_common(is_training=True),
self._dataset_common(is_training=False)
]
## Data parallel training.
@model_registry.register_model
class LmCloudTransformerAdam(SyntheticDataset):
r"""32-layer Transformer LM using Adam."""
NUM_LAYERS = 32
VOCAB_SIZE = 32000
NUM_HEADS = 16
MODEL_DIMS = 1024
HIDDEN_DIMS = MODEL_DIMS * 4
DROPOUT_PROB = 0.0
LEARNING_RATE = 1e-3
ENABLE_WHILE_LOOP = True
def task(self) -> InstantiableParams:
"""Returns the task parameters."""
vocab_size = self.VOCAB_SIZE
num_layers = self.NUM_LAYERS
num_heads = self.NUM_HEADS
model_dims = self.MODEL_DIMS
hidden_dims = self.HIDDEN_DIMS
dropout_prob = self.DROPOUT_PROB
model_p = model.LanguageModel.Params().Set(name='xformer_lm')
model_p.lm.packed_input = True
model_p.lm.model_dims = model_dims
model_p.lm.hidden_dims = hidden_dims
model_p.lm.num_layers = num_layers
model_p.lm.num_heads = num_heads
model_p.lm.vocab_size = vocab_size
model_p.lm.softmax_tpl.scale_sqrt_depth = True
model_p.lm.stacked_transformer_tpl = layers.StackedTransformer.Params()
model_p.lm.stacked_transformer_tpl.enable_while_loop = (
self.ENABLE_WHILE_LOOP)
model_p.lm.stacked_transformer_tpl.dropout_prob = dropout_prob
transformer_layer_p = (
model_p.lm.stacked_transformer_tpl.transformer_layer_params_tpl)
transformer_layer_p.tr_atten_tpl.atten_logit_cap = 50.0
transformer_layer_p.tr_atten_tpl.use_bias = False
softmax_init = WeightInit.Gaussian(1.0 / math.sqrt(model_dims))
model_p.lm.softmax_tpl.params_init = softmax_init
lp = model_p.train.learner
lp.loss_name = 'total_loss'
lp.optimizer = optimizers.Adam.Params().Set(
beta1=0.9,
beta2=0.99,
weight_decay=1e-3,
clip_gradient_norm_to_value=5.0)
lp.optimizer.learning_rate = self.LEARNING_RATE
lp.optimizer.lr_schedule = (
schedules.LinearRampupExponentialDecay.Params().Set(
warmup=4000,
decay_start=4001,
decay_end=300000,
min_ratio=0.1,
max=1.0))
return model_p
@model_registry.register_model
class LmCloudTransformerAdamTest(LmCloudTransformerAdam):
NUM_LAYERS = 2
## SPMD Model parallel training.
class LmCloudSpmd(SyntheticDataset):
r"""Base config for an SPMD model."""
NUM_LAYERS = 10
MODEL_DIMS = 2048
# Autodiff remat.
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
# Sub-class has to specify a mesh.
MESH_SHAPE = None
def task(self) -> InstantiableParams:
"""Returns the task parameters."""
vocab_size = 32000
num_layers = self.NUM_LAYERS
model_dims = self.MODEL_DIMS
hidden_dims = model_dims * 4
dims_per_head = 128
assert model_dims % dims_per_head == 0
num_heads = int(model_dims / dims_per_head)
dropout_prob = 0.0
model_p = model.LanguageModel.Params().Set(name='xformer_lm')
model_p.lm.packed_input = True
model_p.lm.model_dims = model_dims
model_p.lm.hidden_dims = hidden_dims
model_p.lm.num_layers = num_layers
model_p.lm.num_heads = num_heads
model_p.lm.vocab_size = vocab_size
model_p.lm.softmax_tpl.scale_sqrt_depth = True
model_p.lm.softmax_tpl.soft_cap_logits = 30.0
model_p.lm.stacked_transformer_tpl = layers.StackedTransformer.Params()
model_p.lm.stacked_transformer_tpl.enable_while_loop = True
model_p.lm.stacked_transformer_tpl.checkpoint_policy = (
self.CHECKPOINT_POLICY)
model_p.lm.stacked_transformer_tpl.dropout_prob = dropout_prob
transformer_layer_p = (
model_p.lm.stacked_transformer_tpl.transformer_layer_params_tpl)
transformer_layer_p.tr_atten_tpl.atten_logit_cap = 50.0
transformer_layer_p.tr_atten_tpl.use_bias = False
transformer_layer_p.tr_atten_tpl.combine_qkv = True
transformer_layer_p.tr_fflayer_tpl.activation = 'GELU'
softmax_init = WeightInit.Gaussian(1.0 / math.sqrt(model_dims))
model_p.lm.softmax_tpl.params_init = softmax_init
# Enable bf16.
model_p.fprop_dtype = jnp.bfloat16
lp = model_p.train.learner
lp.loss_name = 'total_loss'
lp.optimizer = optimizers.Adam.Params().Set(
beta1=0.9,
beta2=0.99,
weight_decay=1e-3,
clip_gradient_norm_to_value=5.0)
lp.optimizer.learning_rate = 2.5e-4
lp.optimizer.lr_schedule = (
schedules.LinearRampupExponentialDecay.Params().Set(
warmup=4000,
decay_start=4001,
decay_end=300000,
min_ratio=0.1,
max=1.0))
# Set sharding annotations.
mesh_shape = self.MESH_SHAPE
device_count = np.prod(mesh_shape)
device_ids_mesh = np.arange(device_count).reshape(mesh_shape)
model_p.device_mesh = device_ids_mesh
replica_axis = 'replica'
data_axis = 'data'
mdl_axis = 'mdl'
mesh_axis_names = [replica_axis, data_axis, mdl_axis]
model_p.train.inputs_split_mapping = NestedMap(
map_1d=((replica_axis, data_axis),),
map_2d=((replica_axis, data_axis), None))
model_p.train.decoder_inputs_split_mapping = NestedMap(
map_1d=((replica_axis, data_axis),))
model_p.train.decoder_states_split_mapping = NestedMap(
map_0d=None,
map_4d=(None, (replica_axis, data_axis), mdl_axis, None),
# 5d inputs are for the decoder states of shape [layers, seq_len,
# batch_size, num_heads, dims_per_head]
map_5d=(None, None, (replica_axis, data_axis), mdl_axis, None),
)
model_p.train.save_interval_steps = 5000
model_p.mesh_axis_names = mesh_axis_names
model_p.lm = model_p.lm.cls.set_sharding_params_v1(
model_p.lm,
replica_axis=replica_axis,
data_axis=data_axis,
mdl_axis=mdl_axis,
device_ids_mesh=device_ids_mesh,
mesh_axis_names=mesh_axis_names)
return model_p
@model_registry.register_model
class LmCloudSpmdTest(LmCloudSpmd):
r"""SPMD model with small params for local CPU test run.
Global batch size = 1 * 1 * 1 * 4 = 4
"""
PERCORE_BATCH_SIZE = 4
NUM_LAYERS = 2
MODEL_DIMS = 64
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 1, 1]
@model_registry.register_model
class LmCloudSpmd2B(LmCloudSpmd):
r"""SPMD model with 2B params.
Global batch size = 2 * 2 * 1 * 32 = 128
"""
PERCORE_BATCH_SIZE = 32
NUM_LAYERS = 18
MODEL_DIMS = 3072
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 4, 1]
@model_registry.register_model
class LmCloudSpmd32B(LmCloudSpmd):
r"""SPMD model with 32B params.
Global batch size = 4 * 4 * 4 * 8 = 512
"""
PERCORE_BATCH_SIZE = 8
NUM_LAYERS = 40
MODEL_DIMS = 8192
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 16, 4]
@model_registry.register_model
class LmCloudSpmd64B(LmCloudSpmd):
r"""SPMD model with 64B params.
Global batch size = 4 * 4 * 8 * 8 = 1024
"""
PERCORE_BATCH_SIZE = 8
NUM_LAYERS = 51
MODEL_DIMS = 10240
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 16, 8]
@model_registry.register_model
class LmCloudSpmd128B(LmCloudSpmd):
r"""SPMD model with 128B params.
Global batch size = 4 * 8 * 8 * 4 = 1024
"""
PERCORE_BATCH_SIZE = 4
NUM_LAYERS = 71
MODEL_DIMS = 12288
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 64, 4]
@model_registry.register_model
class LmCloudSpmd256B(LmCloudSpmd):
r"""SPMD model with 256B params.
Global batch size = 4 * 8 * 8 * 8 = 2048
"""
PERCORE_BATCH_SIZE = 4
NUM_LAYERS = 80
MODEL_DIMS = 16384
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 64, 8]
@model_registry.register_model
class LmCloudSpmd512B(LmCloudSpmd):
r"""SPMD model with 512B params.
Global batch size = 4 * 8 * 8 * 16 = 4096
"""
PERCORE_BATCH_SIZE = 4
NUM_LAYERS = 102
MODEL_DIMS = 20480
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 64, 16]
@model_registry.register_model
class LmCloudSpmd1024B(LmCloudSpmd):
r"""SPMD model with 1024B params.
Global batch size = 2 * 8 * 16 * 16 = 4096
"""
PERCORE_BATCH_SIZE = 2
NUM_LAYERS = 142
MODEL_DIMS = 24576
CHECKPOINT_POLICY = layers.AutodiffCheckpointType.SAVE_NOTHING
MESH_SHAPE = [1, 256, 8]
| [
"numpy.prod",
"lingvo.jax.tasks.lm.input_generator.SyntheticLmData.Params",
"jax.local_device_count",
"lingvo.jax.optimizers.Adam.Params",
"math.sqrt",
"lingvo.jax.layers.StackedTransformer.Params",
"lingvo.jax.model.LanguageModel.Params",
"lingvo.jax.schedules.LinearRampupExponentialDecay.Params",
... | [((1429, 1453), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (1451, 1453), False, 'import jax\n'), ((1529, 1569), 'lingvo.jax.tasks.lm.input_generator.SyntheticLmData.Params', 'input_generator.SyntheticLmData.Params', ([], {}), '()\n', (1567, 1569), False, 'from lingvo.jax.tasks.lm import input_generator\n'), ((3112, 3146), 'lingvo.jax.layers.StackedTransformer.Params', 'layers.StackedTransformer.Params', ([], {}), '()\n', (3144, 3146), False, 'from lingvo.jax import layers\n'), ((5348, 5382), 'lingvo.jax.layers.StackedTransformer.Params', 'layers.StackedTransformer.Params', ([], {}), '()\n', (5380, 5382), False, 'from lingvo.jax import layers\n'), ((6692, 6711), 'numpy.prod', 'np.prod', (['mesh_shape'], {}), '(mesh_shape)\n', (6699, 6711), True, 'import numpy as np\n'), ((1788, 1826), 'lingvo.jax.base_input.LingvoInputAdaptor.Params', 'base_input.LingvoInputAdaptor.Params', ([], {}), '()\n', (1824, 1826), False, 'from lingvo.jax import base_input\n'), ((2738, 2766), 'lingvo.jax.model.LanguageModel.Params', 'model.LanguageModel.Params', ([], {}), '()\n', (2764, 2766), False, 'from lingvo.jax import model\n'), ((3567, 3588), 'math.sqrt', 'math.sqrt', (['model_dims'], {}), '(model_dims)\n', (3576, 3588), False, 'import math\n'), ((3727, 3751), 'lingvo.jax.optimizers.Adam.Params', 'optimizers.Adam.Params', ([], {}), '()\n', (3749, 3751), False, 'from lingvo.jax import optimizers\n'), ((3957, 4004), 'lingvo.jax.schedules.LinearRampupExponentialDecay.Params', 'schedules.LinearRampupExponentialDecay.Params', ([], {}), '()\n', (4002, 4004), False, 'from lingvo.jax import schedules\n'), ((4923, 4951), 'lingvo.jax.model.LanguageModel.Params', 'model.LanguageModel.Params', ([], {}), '()\n', (4949, 4951), False, 'from lingvo.jax import model\n'), ((5983, 6004), 'math.sqrt', 'math.sqrt', (['model_dims'], {}), '(model_dims)\n', (5992, 6004), False, 'import math\n'), ((6202, 6226), 'lingvo.jax.optimizers.Adam.Params', 'optimizers.Adam.Params', ([], {}), '()\n', (6224, 6226), False, 'from lingvo.jax import optimizers\n'), ((6420, 6467), 'lingvo.jax.schedules.LinearRampupExponentialDecay.Params', 'schedules.LinearRampupExponentialDecay.Params', ([], {}), '()\n', (6465, 6467), False, 'from lingvo.jax import schedules\n'), ((6734, 6757), 'numpy.arange', 'np.arange', (['device_count'], {}), '(device_count)\n', (6743, 6757), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
'''
plt.figure()
plt.subplot(1,2,1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
plt.subplot(1,2,2)
plt.plot(exponential_data, '-o')
plt.subplot(1,2,1)
plt.plot(exponential_data,'-x')
#New and Better Figure
plt.figure()
ax1 = plt.subplot(1,2,1)
plt.plot(linear_data,'-o')
ax2 = plt.subplot(1,2,2, sharey=ax1)
plt.plot(exponential_data,'-x')
plt.figure()
# the right hand side is equivalent shorthand syntax
plt.subplot(1,2,1) == plt.subplot(121)
#create a 3x3 grid of subplots
fig, ((ax1,ax2,ax3), (ax4,ax5,ax6),(ax7,ax8,ax9)) = plt.subplots(3,3, sharex=True,sharey=True)
ax5.plot(linear_data, '-')
#set inside tick labels to visible
for ax in plt.gcf().get_axes():
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_visible(True)
#Redraw
plt.gcf().canvas.draw()
#Histograms
#create 2x2 grid of axis subplots
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
#draw n = 10, 100, 1000, and 10000 samples from the normal distribution
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample =np.random.normal(loc=0.0,scale=1.0,size=sample_size)
axs[n].hist(sample,bins=100)
axs[n].set_title( 'n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
# use gridspec to partition the figure into subplots
import matplotlib.gridspec as gridspec
plt.figure()
gspec = gridspec.GridSpec(3,3)
top_histogram = plt.subplot(gspec[0,1:])
side_histogram = plt.subplot(gspec[1:,0])
lower_right = plt.subplot(gspec[1:,1:])
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
lower_right.scatter(X,Y)
top_histogram.hist(X,bins=100)
s=side_histogram.hist(Y,bins=100,orientation='horizontal')
# clear the histograms and plot normed histograms
top_histogram.clear()
top_histogram.hist(X, bins=100, normed=True)
side_histogram.clear()
side_histogram.hist(Y, bins=100, orientation='horizontal', normed=True)
# flip the side histogram's x axis
side_histogram.invert_xaxis()
# change axes limits
for ax in [top_histogram, lower_right]:
ax.set_xlim(0, 1)
for ax in [side_histogram, lower_right]:
ax.set_ylim(-5, 5)
#Box Plots
import pandas as pd
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size = 10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal':normal_sample,
'random':random_sample,
'gamma': gamma_sample})
df.describe()
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's colmns
_ = plt.boxplot([df['normal'],df['random'],df['gamma']], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
plt.figure()
plt.boxplot([df['normal'], df['random'], df['gamma']], whis='range')
#overlay axis on top of another
ax2 =mpl_il.inset_axes(plt.gca(),width='60%',height='40%',loc=2)
ax2. hist(df['gamma'],bins=100)
ax2.margins(x=0.5)
#switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
#if 'whis' argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma']])
'''
# Heat Maps
plt.figure()
Y= np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np. random.random(size=10000)
_ = plt.hist2d(X,Y,bins=100)
plt.figure()
_ = plt.hist2d(X,Y, bins=25)
#add a colorbar legend
plt.colorbar()
| [
"numpy.random.normal",
"matplotlib.pyplot.hist2d",
"numpy.random.random",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure"
] | [((3724, 3736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3734, 3736), True, 'import matplotlib.pyplot as plt\n'), ((3743, 3791), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(10000)'}), '(loc=0.0, scale=1.0, size=10000)\n', (3759, 3791), True, 'import numpy as np\n'), ((3797, 3825), 'numpy.random.random', 'np.random.random', ([], {'size': '(10000)'}), '(size=10000)\n', (3813, 3825), True, 'import numpy as np\n'), ((3832, 3858), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['X', 'Y'], {'bins': '(100)'}), '(X, Y, bins=100)\n', (3842, 3858), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3872), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3870, 3872), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3903), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['X', 'Y'], {'bins': '(25)'}), '(X, Y, bins=25)\n', (3888, 3903), True, 'import matplotlib.pyplot as plt\n'), ((3928, 3942), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3940, 3942), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright 2019 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import sys
import numpy as np
import dex_binary_object as dbo
sys.path.append("../jax")
from examples import datasets
def oneHotToInt(xs):
xsInt = np.sum(xs * np.arange(10)[None,:], axis=1).astype(np.int64)
print(xsInt.shape)
assert np.max(xsInt) == 9
return xsInt
data = tuple(x.astype(np.float64) for x in datasets.mnist())
train_images, train_labels, test_images, test_labels = data
train_images_unflat = train_images.reshape((60000, 28, 28))
test_images_unflat = test_images.reshape( (10000, 28, 28))
train_labels_int = oneHotToInt(train_labels)
test_labels_int = oneHotToInt(test_labels)
data_out = (train_images_unflat, train_labels_int,
test_images_unflat, test_labels_int)
with open("scratch/mnist.dxbo", "w") as f:
dbo.dump(data_out, f)
| [
"numpy.max",
"dex_binary_object.dump",
"examples.datasets.mnist",
"sys.path.append",
"numpy.arange"
] | [((257, 282), 'sys.path.append', 'sys.path.append', (['"""../jax"""'], {}), "('../jax')\n", (272, 282), False, 'import sys\n'), ((949, 970), 'dex_binary_object.dump', 'dbo.dump', (['data_out', 'f'], {}), '(data_out, f)\n', (957, 970), True, 'import dex_binary_object as dbo\n'), ((435, 448), 'numpy.max', 'np.max', (['xsInt'], {}), '(xsInt)\n', (441, 448), True, 'import numpy as np\n'), ((513, 529), 'examples.datasets.mnist', 'datasets.mnist', ([], {}), '()\n', (527, 529), False, 'from examples import datasets\n'), ((357, 370), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (366, 370), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as dist
from torch.utils.data import DataLoader, TensorDataset
from torchvision.utils import save_image, make_grid
from torchvision import datasets, transforms
import numpy as np
import math
from numpy import prod, sqrt
from .vae import VAE
from pvae.utils import Constants
data_size = torch.Size([1, 28, 28])
data_dim = int(prod(data_size))
def extra_hidden_layer(hidden_dim, non_lin):
return nn.Sequential(nn.Linear(hidden_dim, hidden_dim), non_lin)
# Classes
class Enc(nn.Module):
""" Generate latent parameters for MNIST. """
def __init__(self, latent_dim, non_lin, prior_aniso, num_hidden_layers=1, hidden_dim=100):
super(Enc, self).__init__()
modules = []
modules.append(nn.Sequential(nn.Linear(data_dim, hidden_dim), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.fc21 = nn.Linear(hidden_dim, latent_dim)
self.fc22 = nn.Linear(hidden_dim, latent_dim if prior_aniso else 1)
def forward(self, x):
e = self.enc(x.view(*x.size()[:-3], -1)) # flatten data
mu = self.fc21(e)
return mu, F.softplus(self.fc22(e)).expand(mu.size()) + Constants.eta
class Dec(nn.Module):
""" Generate observation parameters for MNIST. """
def __init__(self, latent_dim, non_lin, num_hidden_layers=1, hidden_dim=100):
super(Dec, self).__init__()
modules = []
modules.append(nn.Sequential(nn.Linear(latent_dim, hidden_dim), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc31 = nn.Linear(hidden_dim, data_dim)
def forward(self, z):
p = self.fc31(self.dec(z))
d = p.view(*z.size()[:-1], *data_size) # reshape data
return torch.tensor(1.0).to(z.device), d
class Mnist(VAE):
""" Derive a specific sub-class of a VAE for MNIST. """
def __init__(self, params):
super(Mnist, self).__init__(
dist.Normal, # prior distribution
dist.Normal, # posterior distribution
dist.RelaxedBernoulli, # likelihood distribution
Enc(params.latent_dim, getattr(nn, params.nl)(), params.prior_aniso, params.num_hidden_layers, params.hidden_dim),
Dec(params.latent_dim, getattr(nn, params.nl)(), params.num_hidden_layers, params.hidden_dim),
params
)
self._pz_mu = nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False)
self._pz_logvar = nn.Parameter(torch.zeros(1, 1), requires_grad=params.learn_prior_std)
self.modelName = 'Mnist'
def init_last_layer_bias(self, train_loader):
with torch.no_grad():
p = torch.zeros(prod(data_size[1:]), device=self._pz_mu.device)
N = 0
for i, (data, _) in enumerate(train_loader):
data = data.to(self._pz_mu.device)
B = data.size(0)
N += B
p += data.view(-1, prod(data_size[1:])).sum(0)
p /= N
p += 1e-4
self.dec.fc31.bias.set_(p.log() - (1 - p).log())
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
# this is required if using the relaxedBernoulli because it doesn't
# handle scoring values that are actually 0. or 1.
tx = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda p: p.clamp(Constants.eta, 1 - Constants.eta))
])
train_loader = DataLoader(
datasets.MNIST('data', train=True, download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(
datasets.MNIST('data', train=False, download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N, K = 64, 9
mean, means, samples = super(Mnist, self).generate(N, K)
save_image(mean.sigmoid().data.cpu(), '{}/gen_mean_{:03d}.png'.format(runPath, epoch))
save_image(means.data.cpu(), '{}/gen_means_{:03d}.png'.format(runPath, epoch))
def reconstruct(self, data, runPath, epoch):
recon = super(Mnist, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon])
save_image(comp.data.cpu(), '{}/recon_{:03d}.png'.format(runPath, epoch))
| [
"numpy.prod",
"torch.nn.Sequential",
"torch.tensor",
"torch.nn.Linear",
"torchvision.datasets.MNIST",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torch.Size",
"torch.zeros",
"torch.cat"
] | [((382, 405), 'torch.Size', 'torch.Size', (['[1, 28, 28]'], {}), '([1, 28, 28])\n', (392, 405), False, 'import torch\n'), ((421, 436), 'numpy.prod', 'prod', (['data_size'], {}), '(data_size)\n', (425, 436), False, 'from numpy import prod, sqrt\n'), ((510, 543), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (519, 543), True, 'import torch.nn as nn\n'), ((992, 1015), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (1005, 1015), True, 'import torch.nn as nn\n'), ((1036, 1069), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'latent_dim'], {}), '(hidden_dim, latent_dim)\n', (1045, 1069), True, 'import torch.nn as nn\n'), ((1090, 1145), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(latent_dim if prior_aniso else 1)'], {}), '(hidden_dim, latent_dim if prior_aniso else 1)\n', (1099, 1145), True, 'import torch.nn as nn\n'), ((1763, 1786), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (1776, 1786), True, 'import torch.nn as nn\n'), ((1807, 1838), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'data_dim'], {}), '(hidden_dim, data_dim)\n', (1816, 1838), True, 'import torch.nn as nn\n'), ((4611, 4639), 'torch.cat', 'torch.cat', (['[data[:8], recon]'], {}), '([data[:8], recon])\n', (4620, 4639), False, 'import torch\n'), ((2636, 2669), 'torch.zeros', 'torch.zeros', (['(1)', 'params.latent_dim'], {}), '(1, params.latent_dim)\n', (2647, 2669), False, 'import torch\n'), ((2731, 2748), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (2742, 2748), False, 'import torch\n'), ((2885, 2900), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2898, 2900), False, 'import torch\n'), ((3838, 3901), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'tx'}), "('data', train=True, download=True, transform=tx)\n", (3852, 3901), False, 'from torchvision import datasets, transforms\n'), ((4011, 4075), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""data"""'], {'train': '(False)', 'download': '(True)', 'transform': 'tx'}), "('data', train=False, download=True, transform=tx)\n", (4025, 4075), False, 'from torchvision import datasets, transforms\n'), ((826, 857), 'torch.nn.Linear', 'nn.Linear', (['data_dim', 'hidden_dim'], {}), '(data_dim, hidden_dim)\n', (835, 857), True, 'import torch.nn as nn\n'), ((1595, 1628), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'hidden_dim'], {}), '(latent_dim, hidden_dim)\n', (1604, 1628), True, 'import torch.nn as nn\n'), ((2930, 2949), 'numpy.prod', 'prod', (['data_size[1:]'], {}), '(data_size[1:])\n', (2934, 2949), False, 'from numpy import prod, sqrt\n'), ((3674, 3695), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3693, 3695), False, 'from torchvision import datasets, transforms\n'), ((1978, 1995), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (1990, 1995), False, 'import torch\n'), ((3195, 3214), 'numpy.prod', 'prod', (['data_size[1:]'], {}), '(data_size[1:])\n', (3199, 3214), False, 'from numpy import prod, sqrt\n')] |
import numpy as np
"""
output a list of points consumable by openscad polygon function
"""
def wave(degs, scale=10):
pts = []
for i in xrange(degs):
rad = i*np.pi/180.0
x = float(i/180.0*scale)
y=np.sin(rad) * scale
pts.append([x, y])
return pts
def pwave(degs, scale=20):
# default scale of 20 would give: 0 < x < 40 units
print("points = {};".format(wave(degs, scale=scale)))
print("*** finito ***")
return 0
if __name__ == "__main__":
pwave(359, scale=100)
| [
"numpy.sin"
] | [((229, 240), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (235, 240), True, 'import numpy as np\n')] |
import numpy as np
import torch
import gtimer as gt
import lifelong_rl.torch.pytorch_util as ptu
from lifelong_rl.trainers.lisp.mb_skill import MBSkillTrainer
import lifelong_rl.util.pythonplusplus as ppp
from lifelong_rl.util.eval_util import create_stats_ordered_dict
class LiSPTrainer(MBSkillTrainer):
"""
Lifelong Skill-Space Planning (Lu et al. 2020).
Learning skills using model-based rollouts with a skill-practice distribution.
Should be combined with an MPC planner for acting.
"""
def __init__(
self,
skill_practice_dist, # Associated skill-practice distribution for generating latents
skill_practice_trainer, # Associated trainer for skill-practice distribution (ex. SAC)
practice_train_steps=32, # Number of training steps for skill-practice distribution
practice_batch_size=256, # Batch size of training skill-practice distribution
num_unif_train_calls=0, # Optionally, don't use skill practice distribution early on
epsilon_greedy=0., # Optionally, sample latents from uniform with probability eps
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.skill_practice_dist = skill_practice_dist
self.skill_practice_trainer = skill_practice_trainer
self.practice_train_steps = practice_train_steps
self.practice_batch_size = practice_batch_size
self.num_unif_train_calls = num_unif_train_calls
self.epsilon_greedy = epsilon_greedy
def generate_latents(self, obs):
if self._train_calls < self.num_unif_train_calls:
return super().generate_latents(obs)
latents, *_ = self.skill_practice_dist(ptu.from_numpy(obs))
latents = ptu.get_numpy(latents)
if self.epsilon_greedy > 0:
unif_r = np.random.uniform(0, 1, size=latents.shape[0])
eps_replace = unif_r < self.epsilon_greedy
unif_latents = super().generate_latents(obs[eps_replace])
latents[eps_replace] = unif_latents
return latents
def train_from_torch(self, batch):
super().train_from_torch(batch)
if self._train_calls % self.train_every > 0:
return
for _ in range(self.practice_train_steps):
batch = ppp.sample_batch(
self.practice_batch_size,
observations=self._obs[:self._cur_replay_size],
next_observations=self._next_obs[:self._cur_replay_size],
actions=self._latents[:self._cur_replay_size],
rewards=self._rewards[:self._cur_replay_size],
)
batch = ptu.np_to_pytorch_batch(batch)
self.skill_practice_trainer.train_from_torch(batch)
for k, v in self.skill_practice_trainer.get_diagnostics().items():
self.eval_statistics['prior_trainer/' + k] = v
def train_from_buffer(self, reward_kwargs=None):
"""
Compute intrinsic reward: approximate lower bound to I(s'; z | s)
"""
if self.relabel_rewards:
rewards, (logp, logp_altz, denom), reward_diagnostics = self.calculate_intrinsic_rewards(
self._obs[:self._cur_replay_size],
self._next_obs[:self._cur_replay_size],
self._latents[:self._cur_replay_size],
reward_kwargs=reward_kwargs
)
orig_rewards = rewards.copy()
rewards, postproc_dict = self.reward_postprocessing(rewards, reward_kwargs=reward_kwargs)
reward_diagnostics.update(postproc_dict)
self._rewards[:self._cur_replay_size] = np.expand_dims(rewards, axis=-1)
gt.stamp('intrinsic reward calculation', unique=False)
"""
Train policy
"""
state_latents = np.concatenate([self._obs, self._latents], axis=-1)[:self._cur_replay_size]
next_state_latents = np.concatenate(
[self._true_next_obs, self._latents], axis=-1)[:self._cur_replay_size]
for _ in range(self.num_policy_updates):
batch = ppp.sample_batch(
self.policy_batch_size,
observations=state_latents,
next_observations=next_state_latents,
actions=self._actions[:self._cur_replay_size],
rewards=self._rewards[:self._cur_replay_size],
)
batch = ptu.np_to_pytorch_batch(batch)
self.policy_trainer.train_from_torch(batch)
gt.stamp('policy training', unique=False)
"""
Diagnostics
"""
if self._need_to_update_eval_statistics:
self.eval_statistics.update(self.policy_trainer.eval_statistics)
if self.relabel_rewards:
self.eval_statistics.update(reward_diagnostics)
self.eval_statistics.update(create_stats_ordered_dict(
'Discriminator Log Pis',
logp,
))
self.eval_statistics.update(create_stats_ordered_dict(
'Discriminator Alt Log Pis',
logp_altz,
))
self.eval_statistics.update(create_stats_ordered_dict(
'Intrinsic Reward Denominator',
denom,
))
# Adjustment so intrinsic rewards are over last epoch
if self._ptr < self._epoch_size:
if self._ptr == 0:
inds = np.r_[len(rewards)-self._epoch_size:len(rewards)]
else:
inds = np.r_[0:self._ptr,len(rewards)-self._ptr:len(rewards)]
else:
inds = np.r_[self._ptr-self._epoch_size:self._ptr]
self.eval_statistics.update(create_stats_ordered_dict(
'Intrinsic Rewards (Original)',
orig_rewards[inds],
))
self.eval_statistics.update(create_stats_ordered_dict(
'Intrinsic Rewards (Processed)',
rewards[inds],
))
self._n_train_steps_total += 1
def end_epoch(self, epoch):
super().end_epoch(epoch)
self.skill_practice_trainer.end_epoch(epoch)
@property
def networks(self):
return self.skill_practice_trainer.networks + self.policy_trainer.networks + [
self.discriminator, self.skill_practice_dist,
]
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot['skill_practice'] = self.skill_practice_dist
for k, v in self.skill_practice_trainer.get_snapshot().items():
snapshot['skill_practice_trainer/' + k] = v
return snapshot
| [
"lifelong_rl.torch.pytorch_util.get_numpy",
"lifelong_rl.torch.pytorch_util.from_numpy",
"lifelong_rl.util.pythonplusplus.sample_batch",
"lifelong_rl.torch.pytorch_util.np_to_pytorch_batch",
"lifelong_rl.util.eval_util.create_stats_ordered_dict",
"numpy.expand_dims",
"numpy.random.uniform",
"numpy.con... | [((1808, 1830), 'lifelong_rl.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['latents'], {}), '(latents)\n', (1821, 1830), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((4561, 4602), 'gtimer.stamp', 'gt.stamp', (['"""policy training"""'], {'unique': '(False)'}), "('policy training', unique=False)\n", (4569, 4602), True, 'import gtimer as gt\n'), ((1769, 1788), 'lifelong_rl.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['obs'], {}), '(obs)\n', (1783, 1788), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((1888, 1934), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'latents.shape[0]'}), '(0, 1, size=latents.shape[0])\n', (1905, 1934), True, 'import numpy as np\n'), ((2356, 2612), 'lifelong_rl.util.pythonplusplus.sample_batch', 'ppp.sample_batch', (['self.practice_batch_size'], {'observations': 'self._obs[:self._cur_replay_size]', 'next_observations': 'self._next_obs[:self._cur_replay_size]', 'actions': 'self._latents[:self._cur_replay_size]', 'rewards': 'self._rewards[:self._cur_replay_size]'}), '(self.practice_batch_size, observations=self._obs[:self.\n _cur_replay_size], next_observations=self._next_obs[:self.\n _cur_replay_size], actions=self._latents[:self._cur_replay_size],\n rewards=self._rewards[:self._cur_replay_size])\n', (2372, 2612), True, 'import lifelong_rl.util.pythonplusplus as ppp\n'), ((2714, 2744), 'lifelong_rl.torch.pytorch_util.np_to_pytorch_batch', 'ptu.np_to_pytorch_batch', (['batch'], {}), '(batch)\n', (2737, 2744), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((3703, 3735), 'numpy.expand_dims', 'np.expand_dims', (['rewards'], {'axis': '(-1)'}), '(rewards, axis=-1)\n', (3717, 3735), True, 'import numpy as np\n'), ((3749, 3803), 'gtimer.stamp', 'gt.stamp', (['"""intrinsic reward calculation"""'], {'unique': '(False)'}), "('intrinsic reward calculation', unique=False)\n", (3757, 3803), True, 'import gtimer as gt\n'), ((3875, 3926), 'numpy.concatenate', 'np.concatenate', (['[self._obs, self._latents]'], {'axis': '(-1)'}), '([self._obs, self._latents], axis=-1)\n', (3889, 3926), True, 'import numpy as np\n'), ((3980, 4041), 'numpy.concatenate', 'np.concatenate', (['[self._true_next_obs, self._latents]'], {'axis': '(-1)'}), '([self._true_next_obs, self._latents], axis=-1)\n', (3994, 4041), True, 'import numpy as np\n'), ((4149, 4358), 'lifelong_rl.util.pythonplusplus.sample_batch', 'ppp.sample_batch', (['self.policy_batch_size'], {'observations': 'state_latents', 'next_observations': 'next_state_latents', 'actions': 'self._actions[:self._cur_replay_size]', 'rewards': 'self._rewards[:self._cur_replay_size]'}), '(self.policy_batch_size, observations=state_latents,\n next_observations=next_state_latents, actions=self._actions[:self.\n _cur_replay_size], rewards=self._rewards[:self._cur_replay_size])\n', (4165, 4358), True, 'import lifelong_rl.util.pythonplusplus as ppp\n'), ((4465, 4495), 'lifelong_rl.torch.pytorch_util.np_to_pytorch_batch', 'ptu.np_to_pytorch_batch', (['batch'], {}), '(batch)\n', (4488, 4495), True, 'import lifelong_rl.torch.pytorch_util as ptu\n'), ((4922, 4978), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Discriminator Log Pis"""', 'logp'], {}), "('Discriminator Log Pis', logp)\n", (4947, 4978), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n'), ((5083, 5148), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Discriminator Alt Log Pis"""', 'logp_altz'], {}), "('Discriminator Alt Log Pis', logp_altz)\n", (5108, 5148), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n'), ((5253, 5317), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Intrinsic Reward Denominator"""', 'denom'], {}), "('Intrinsic Reward Denominator', denom)\n", (5278, 5317), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n'), ((5868, 5945), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Intrinsic Rewards (Original)"""', 'orig_rewards[inds]'], {}), "('Intrinsic Rewards (Original)', orig_rewards[inds])\n", (5893, 5945), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n'), ((6050, 6123), 'lifelong_rl.util.eval_util.create_stats_ordered_dict', 'create_stats_ordered_dict', (['"""Intrinsic Rewards (Processed)"""', 'rewards[inds]'], {}), "('Intrinsic Rewards (Processed)', rewards[inds])\n", (6075, 6123), False, 'from lifelong_rl.util.eval_util import create_stats_ordered_dict\n')] |
from typing import Tuple
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
from dft_dummy.crystal_utils import calc_reciprocal, project_points
from dft_dummy.symmetry import (
calc_overlap_matrix,
check_symmetry,
possible_unitary_rotations,
)
def reduce_kpts(kpts: np.ndarray, vec: np.ndarray) -> Tuple[int, np.ndarray]:
"""reduce kpoints to the irreducible ones
Args:
kpts (np.ndarray): Nx3 kmesh in the 1st Brillouin Zone in crystal coordinates
vec (np.ndarray): 3x3 lattice basis vectors
Returns:
Tuple[int, np.ndarray]: number of irreducible kpoints and
label array that maps the `kpts` to each (irreducible) kind.
"""
overlap = calc_overlap_matrix(vec)
symmetry_ops = possible_unitary_rotations()
valid_ops = [
(i, sym)
for i, sym in enumerate(symmetry_ops)
if check_symmetry(sym, vec, overlap)
]
rvec = calc_reciprocal(vec)
nkpts = len(kpts)
def update_graph_matrix(sym, kcart):
krot = sym.dot(kcart) # rotated in cartisian system
krot_crys = vec.dot(krot) # back to crystal system
krot_crys_moved = krot_crys - np.floor(krot_crys) # move to the 1st BZ
# find the index of the moved point in the original mesh
norms = np.linalg.norm(kpts - krot_crys_moved, axis=1)
idx = np.arange(len(norms))[norms < 1e-6]
graph_matrix[i, idx] = 1
graph_matrix = csr_matrix((nkpts, nkpts), dtype=int)
for i, kcrys in enumerate(kpts):
kcart = project_points(rvec, kcrys).ravel() # flatten as it is one point
for _, sym in valid_ops:
update_graph_matrix(sym, kcart)
update_graph_matrix(-sym, kcart) # inverse symmetry
return connected_components(
csgraph=graph_matrix, directed=False, return_labels=True
)
| [
"dft_dummy.symmetry.calc_overlap_matrix",
"scipy.sparse.csgraph.connected_components",
"dft_dummy.symmetry.check_symmetry",
"dft_dummy.crystal_utils.calc_reciprocal",
"numpy.floor",
"dft_dummy.crystal_utils.project_points",
"numpy.linalg.norm",
"scipy.sparse.csr_matrix",
"dft_dummy.symmetry.possible... | [((766, 790), 'dft_dummy.symmetry.calc_overlap_matrix', 'calc_overlap_matrix', (['vec'], {}), '(vec)\n', (785, 790), False, 'from dft_dummy.symmetry import calc_overlap_matrix, check_symmetry, possible_unitary_rotations\n'), ((810, 838), 'dft_dummy.symmetry.possible_unitary_rotations', 'possible_unitary_rotations', ([], {}), '()\n', (836, 838), False, 'from dft_dummy.symmetry import calc_overlap_matrix, check_symmetry, possible_unitary_rotations\n'), ((983, 1003), 'dft_dummy.crystal_utils.calc_reciprocal', 'calc_reciprocal', (['vec'], {}), '(vec)\n', (998, 1003), False, 'from dft_dummy.crystal_utils import calc_reciprocal, project_points\n'), ((1500, 1537), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(nkpts, nkpts)'], {'dtype': 'int'}), '((nkpts, nkpts), dtype=int)\n', (1510, 1537), False, 'from scipy.sparse import csr_matrix\n'), ((1811, 1889), 'scipy.sparse.csgraph.connected_components', 'connected_components', ([], {'csgraph': 'graph_matrix', 'directed': '(False)', 'return_labels': '(True)'}), '(csgraph=graph_matrix, directed=False, return_labels=True)\n', (1831, 1889), False, 'from scipy.sparse.csgraph import connected_components\n'), ((1350, 1396), 'numpy.linalg.norm', 'np.linalg.norm', (['(kpts - krot_crys_moved)'], {'axis': '(1)'}), '(kpts - krot_crys_moved, axis=1)\n', (1364, 1396), True, 'import numpy as np\n'), ((931, 964), 'dft_dummy.symmetry.check_symmetry', 'check_symmetry', (['sym', 'vec', 'overlap'], {}), '(sym, vec, overlap)\n', (945, 964), False, 'from dft_dummy.symmetry import calc_overlap_matrix, check_symmetry, possible_unitary_rotations\n'), ((1227, 1246), 'numpy.floor', 'np.floor', (['krot_crys'], {}), '(krot_crys)\n', (1235, 1246), True, 'import numpy as np\n'), ((1591, 1618), 'dft_dummy.crystal_utils.project_points', 'project_points', (['rvec', 'kcrys'], {}), '(rvec, kcrys)\n', (1605, 1618), False, 'from dft_dummy.crystal_utils import calc_reciprocal, project_points\n')] |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
from unittest import TestCase
import pandas as pd
import numpy as np
from preprocess import (
DataProcessor,
feature_columns_names,
label_column,
feature_columns_dtype,
label_column_dtype,
)
class TestPreProcess(TestCase):
def test_process_data(self):
expected_output = [
[10, 1.34, -0.27, -1.22, 1.22, 1.41, -1.22, 0, 0, 0, 1],
[7, -0.27, -1.07, 0, 0, -0.71, 1.22, 0, 1, 0, 0],
[5, -1.07, 1.34, 1.22, -1.22, -0.71, 0, 0, 0, 1, 0]
]
input_df = pd.DataFrame(
[
["M", 5, 0.3, 1, 0.3, 2, 1, 0, 10],
["F", 3, 0.2, 2, 0.2, 1, 3, 0, 7],
["I", 2, 0.5, 3, 0.1, 1, 2, 0, 5]
],
columns=feature_columns_names + [label_column],
)
input_df = input_df.astype(
DataProcessor.merge_two_dicts(feature_columns_dtype, label_column_dtype)
)
data_processor = DataProcessor(input_df)
output_data = data_processor.process()
round_output = np.around(output_data, 2)
np.testing.assert_array_equal(round_output, expected_output)
| [
"numpy.testing.assert_array_equal",
"numpy.around",
"pandas.DataFrame",
"preprocess.DataProcessor.merge_two_dicts",
"preprocess.DataProcessor"
] | [((1565, 1743), 'pandas.DataFrame', 'pd.DataFrame', (["[['M', 5, 0.3, 1, 0.3, 2, 1, 0, 10], ['F', 3, 0.2, 2, 0.2, 1, 3, 0, 7], [\n 'I', 2, 0.5, 3, 0.1, 1, 2, 0, 5]]"], {'columns': '(feature_columns_names + [label_column])'}), "([['M', 5, 0.3, 1, 0.3, 2, 1, 0, 10], ['F', 3, 0.2, 2, 0.2, 1, \n 3, 0, 7], ['I', 2, 0.5, 3, 0.1, 1, 2, 0, 5]], columns=\n feature_columns_names + [label_column])\n", (1577, 1743), True, 'import pandas as pd\n'), ((1987, 2010), 'preprocess.DataProcessor', 'DataProcessor', (['input_df'], {}), '(input_df)\n', (2000, 2010), False, 'from preprocess import DataProcessor, feature_columns_names, label_column, feature_columns_dtype, label_column_dtype\n'), ((2081, 2106), 'numpy.around', 'np.around', (['output_data', '(2)'], {}), '(output_data, 2)\n', (2090, 2106), True, 'import numpy as np\n'), ((2115, 2175), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['round_output', 'expected_output'], {}), '(round_output, expected_output)\n', (2144, 2175), True, 'import numpy as np\n'), ((1879, 1951), 'preprocess.DataProcessor.merge_two_dicts', 'DataProcessor.merge_two_dicts', (['feature_columns_dtype', 'label_column_dtype'], {}), '(feature_columns_dtype, label_column_dtype)\n', (1908, 1951), False, 'from preprocess import DataProcessor, feature_columns_names, label_column, feature_columns_dtype, label_column_dtype\n')] |
# -*- coding: utf-8 -*-
"""
.. Authors
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
Contains the XicsrtPlasmaGeneric class.
"""
import logging
import numpy as np
from xicsrt.util import profiler
from xicsrt.tools import xicsrt_spread
from xicsrt.tools.xicsrt_doc import dochelper
from xicsrt.objects._GeometryObject import GeometryObject
from xicsrt.sources._XicsrtSourceFocused import XicsrtSourceFocused
@dochelper
class XicsrtPlasmaGeneric(GeometryObject):
"""
A generic plasma object.
Plasma object will generate a set of ray bundles where each ray bundle
has the properties of the plasma at one particular real-space point.
Each bundle is modeled by a SourceFocused object.
.. Note::
If a `voxel` type bundle is used rays may be generated outside of the
defined plasma volume (as defined by xsize, ysize and zsize). The bundle
*centers* are randomly distributed throughout the plasma volume, but this
means that if a bundle is (randomly) placed near the edges of the plasma
then the bundle voxel volume may extend past the plasma boundary. This
behavior is expected. If it is important to have a sharp plasma boundary
then consider using the 'point' bundle_type instead.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filter_objects = []
def default_config(self):
"""
xsize
The size of this element along the xaxis direction.
ysize
The size of this element along the yaxis direction.
zsize
The size of this element along the zaxis direction.
angular_dist : string ('isotropic')
The type of angular distribution to use for the emitted rays.
Available distributions: 'isotropic', 'isotropic_xy', 'flat',
'flat_xy', 'gaussian', and 'gaussian_flat'.
See `XicsrtSourceGeneric` for documentation of each distribution.
Warning: Only the 'isotropic' distribution is currently supported!
spread: float (None) [radians]
The angular spread for the emission cone. The spread defines the
half-angle of the cone. See 'angular_dist' in :any:`XicsrtSourceGeneric`
for detailed documentation.
spread_radius: float (None) [meters]
If specified, the spread will be calculated for each bundle such that
the spotsize at the target matches the given radius. This is useful
when working with very extended plasma sources.
This options is incompatible with 'spread'.
use_poisson
No documentation yet. Please help improve XICSRT!
wavelength_dist : string ('voigt')
No documentation yet. Please help improve XICSRT!
wavelength : float (1.0) [Angstroms]
No documentation yet. Please help improve XICSRT!
mass_number : float (1.0) [au]
No documentation yet. Please help improve XICSRT!
linewidth : float (0.0) [1/s]
No documentation yet. Please help improve XICSRT!
emissivity : float (0.0) [ph/m^3]
No documentation yet. Please help improve XICSRT!
temperature : float (0.0) [eV]
No documentation yet. Please help improve XICSRT!
velocity : float (0.0) [m/s]
No documentation yet. Please help improve XICSRT!
time_resolution : float (1e-3) [s]
No documentation yet. Please help improve XICSRT!
bundle_type : string ('voxel')
Define how the origin of rays within the bundle should be distributed.
Available options are: 'voxel' or 'point'.
bundle_volume : float (1e-3) [m^3]
The volume in which the rays within the bundle should distributed.
if bundle_type is 'point' this will not affect the distribution,
though it will still affect the number of bundles if bundle_count
is set to None.
bundle_count : int (None)
The number of bundles to generate. If set to `None` then this number
will be automatically determined by volume/bundle_volume. This default
means that each bundle represents exactly the given `bundle_volume` in
the plasma. For high quality raytracing studies this value should
generally be set to a value much larger than volume/bundle_volume!
max_rays : int (1e7)
No documentation yet. Please help improve XICSRT!
max_bundles : int (1e7)
No documentation yet. Please help improve XICSRT!
filters
No documentation yet. Please help improve XICSRT!
"""
config = super().default_config()
config['xsize'] = 0.0
config['ysize'] = 0.0
config['zsize'] = 0.0
config['angular_dist'] = 'isotropic'
config['spread'] = None
config['spread_radius'] = None
config['target'] = None
config['use_poisson'] = False
config['wavelength_dist'] = 'voigt'
config['wavelength'] = 1.0
config['wavelength_range'] = None
config['mass_number'] = 1.0
config['linewidth'] = 0.0
config['emissivity'] = 0.0
config['temperature'] = 0.0
config['velocity'] = 0.0
config['time_resolution'] = 1e-3
config['bundle_type'] = 'voxel'
config['bundle_volume'] = 1e-6
config['bundle_count'] = None
config['max_rays'] = int(1e7)
config['max_bundles'] = int(1e7)
config['filters'] = []
return config
def initialize(self):
super().initialize()
self.param['max_rays'] = int(self.param['max_rays'])
self.param['volume'] = self.config['xsize'] * self.config['ysize'] * self.config['zsize']
if self.param['bundle_count'] is None:
self.param['bundle_count'] = self.param['volume']/self.param['bundle_volume']
self.param['bundle_count'] = int(np.round(self.param['bundle_count']))
if self.param['bundle_count'] < 1:
raise Exception(f'Bundle volume is larger than the plasma volume.')
if self.param['bundle_count'] > self.param['max_bundles']:
raise ValueError(
f"Current settings will produce too many bundles ({self.param['bundle_count']:0.2e}). "
f"Increase the bundle_volume, explicitly set bundle_count or increase max_bundles.")
def setup_bundles(self):
self.log.debug('Starting setup_bundles')
if self.param['bundle_type'] == 'point':
self.param['voxel_size'] = 0.0
elif self.param['bundle_type'] == 'voxel':
self.param['voxel_size'] = self.param['bundle_volume'] ** (1/3)
# These values should be overwritten in a derived class.
bundle_input = {}
bundle_input['origin'] = np.zeros([self.param['bundle_count'], 3], dtype = np.float64)
bundle_input['temperature'] = np.ones([self.param['bundle_count']], dtype = np.float64)
bundle_input['emissivity'] = np.ones([self.param['bundle_count']], dtype = np.float64)
bundle_input['velocity'] = np.zeros([self.param['bundle_count'], 3], dtype = np.float64)
bundle_input['mask'] = np.ones([self.param['bundle_count']], dtype = np.bool)
bundle_input['spread'] = np.zeros([self.param['bundle_count']], dtype = np.float64)
bundle_input['solid_angle'] = np.zeros([self.param['bundle_count']], dtype = np.float64)
# randomly spread the bundles around the plasma box
offset = np.zeros((self.param['bundle_count'], 3))
offset[:,0] = np.random.uniform(-1 * self.param['xsize'] /2, self.param['xsize'] /2, self.param['bundle_count'])
offset[:,1] = np.random.uniform(-1 * self.param['ysize']/2, self.param['ysize']/2, self.param['bundle_count'])
offset[:,2] = np.random.uniform(-1 * self.param['zsize'] /2, self.param['zsize'] /2, self.param['bundle_count'])
bundle_input['origin'][:] = self.point_to_external(offset)
# Setup the bundle spread and solid angle.
bundle_input = self.setup_bundle_spread(bundle_input)
return bundle_input
def setup_bundle_spread(self, bundle_input):
"""
Calculate the spread and solid angle for each bundle.
If the config option 'spread_radius' is provide the spread will be
determined for each bundle by a spotsize at the target.
Note: Even if the idea of a spread radius is added to the generic
source object we still need to calculate and save the results
here so that we can correctly calcuate the bundle intensities.
"""
if self.param['spread_radius'] is not None:
vector = bundle_input['origin'] - self.param['target']
dist = np.linalg.norm(vector, axis=1)
spread = np.arctan(self.param['spread_radius']/dist)
else:
spread = self.param['spread']
bundle_input['spread'][:] = spread
# For the time being the fuction solid_angle is not vectorized, so a
# loop is necessary.
for ii in range(len(bundle_input['spread'])):
bundle_input['solid_angle'][ii] = xicsrt_spread.solid_angle(bundle_input['spread'][ii])
return bundle_input
def get_emissivity(self, rho):
return self.param['emissivity']
def get_temperature(self, rho):
return self.param['temperature']
def get_velocity(self, rho):
return self.param['velocity']
def bundle_generate(self, bundle_input):
self.log.debug('Starting bundle_generate')
return bundle_input
def bundle_filter(self, bundle_input):
self.log.debug('Starting bundle_filter')
for filter in self.filter_objects:
bundle_input = filter.filter(bundle_input)
return bundle_input
def create_sources(self, bundle_input):
"""
Generate rays from a list of bundles.
bundle_input
a list containing dictionaries containing the locations, emissivities,
temperatures and velocitities and of all ray bundles to be emitted.
"""
rays_list = []
count_rays_in_bundle = []
m = bundle_input['mask']
# Check if the number of rays generated will exceed max ray limits.
# This is only approximate since poisson statistics may be in use.
predicted_rays = int(np.sum(
bundle_input['emissivity'][m]
* self.param['time_resolution']
* self.param['bundle_volume']
* bundle_input['solid_angle'][m] / (4 * np.pi)
* self.param['volume']
/ (self.param['bundle_count'] * self.param['bundle_volume'])))
self.log.debug(f'Predicted rays: {predicted_rays:0.2e}')
if predicted_rays > self.param['max_rays']:
raise ValueError(
f"Current settings will produce too many rays ({predicted_rays:0.2e}). "
f"Please reduce integration time or adjust other parameters.")
# Bundle generation loop
for ii in range(self.param['bundle_count']):
if not bundle_input['mask'][ii]:
continue
profiler.start("Ray Bundle Generation")
source_config = dict()
# Specially dependent parameters
source_config['origin'] = bundle_input['origin'][ii]
source_config['temperature'] = bundle_input['temperature'][ii]
source_config['velocity'] = bundle_input['velocity'][ii]
source_config['spread'] = bundle_input['spread'][ii]
# Calculate the total number of photons to launch from this bundle
# volume. Since the source can use poisson statistics, this should
# be of floating point type.
intensity = (bundle_input['emissivity'][ii]
* self.param['time_resolution']
* self.param['bundle_volume']
* bundle_input['solid_angle'][ii] / (4 * np.pi))
# Scale the number of photons based on the number of bundles.
#
# Ultimately we allow bundle_volume and bundle_count to be
# independent, which means that a bundle representing a volume in
# the plasma can be launched from virtual volume of a different
# size.
#
# In order to allow this while maintaining overall photon statistics
# from the plasma, we normalize the intensity so that each bundle
# represents a volume of plasma_volume/bundle_count.
#
# In doing so bundle_volume cancels out, but I am leaving the
# calculation separate for clarity.
intensity *= self.param['volume'] / (self.param['bundle_count'] * self.param['bundle_volume'])
source_config['intensity'] = intensity
# constants
source_config['xsize'] = self.param['voxel_size']
source_config['ysize'] = self.param['voxel_size']
source_config['zsize'] = self.param['voxel_size']
source_config['zaxis'] = self.param['zaxis']
source_config['xaxis'] = self.param['xaxis']
source_config['target'] = self.param['target']
source_config['mass_number'] = self.param['mass_number']
source_config['wavelength_dist'] = self.param['wavelength_dist']
source_config['wavelength'] = self.param['wavelength']
source_config['wavelength_range'] = self.param['wavelength_range']
source_config['linewidth'] = self.param['linewidth']
source_config['angular_dist'] = self.param['angular_dist']
source_config['use_poisson'] = self.param['use_poisson']
#create ray bundle sources and generate bundled rays
source = XicsrtSourceFocused(source_config)
bundled_rays = source.generate_rays()
rays_list.append(bundled_rays)
count_rays_in_bundle.append(len(bundled_rays['mask']))
profiler.stop("Ray Bundle Generation")
profiler.start('Ray Bundle Collection')
# append bundled rays together to form a single ray dictionary.
# create the final ray dictionary
total_rays = np.int(np.sum(count_rays_in_bundle))
rays = dict()
rays['origin'] = np.zeros((total_rays,3), dtype=np.float64)
rays['direction'] = np.zeros((total_rays,3), dtype=np.float64)
rays['wavelength'] = np.zeros((total_rays), dtype=np.float64)
rays['weight'] = np.zeros((total_rays), dtype=np.float64)
rays['mask'] = np.ones((total_rays), dtype=np.bool)
index = 0
for ii, num_rays in enumerate(count_rays_in_bundle):
rays['origin'][index:index+num_rays] = rays_list[ii]['origin']
rays['direction'][index:index+num_rays] = rays_list[ii]['direction']
rays['wavelength'][index:index+num_rays] = rays_list[ii]['wavelength']
rays['weight'][index:index+num_rays] = rays_list[ii]['weight']
rays['mask'][index:index+num_rays] = rays_list[ii]['mask']
index += num_rays
profiler.stop('Ray Bundle Collection')
if len(rays['mask']) == 0:
raise ValueError('No rays generated. Check plasma input parameters')
self.log.debug('Bundles Generated: {:0.4e}'.format(
len(m[m])))
self.log.debug('Rays per bundle, mean: {:0.0f}'.format(
np.mean(count_rays_in_bundle)))
self.log.debug('Rays per bundle, median: {:0.0f}'.format(
np.median(count_rays_in_bundle)))
self.log.debug('Rays per bundle, max: {:0d}'.format(
np.max(count_rays_in_bundle)))
self.log.debug('Rays per bundle, min: {:0d}'.format(
np.min(count_rays_in_bundle)))
return rays
def generate_rays(self):
## Create an empty list of ray bundles
bundle_input = self.setup_bundles()
## Apply filters to filter out ray bundles
bundle_input = self.bundle_filter(bundle_input)
## Populate that list with ray bundle parameters, like emissivity
bundle_input = self.bundle_generate(bundle_input)
## Use the list to generate ray sources
rays = self.create_sources(bundle_input)
return rays
| [
"numpy.mean",
"numpy.median",
"numpy.ones",
"xicsrt.tools.xicsrt_spread.solid_angle",
"xicsrt.sources._XicsrtSourceFocused.XicsrtSourceFocused",
"numpy.linalg.norm",
"numpy.min",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"xicsrt.util.profiler.stop",
"numpy.random.uniform",
"xicsrt.util.profil... | [((7034, 7093), 'numpy.zeros', 'np.zeros', (["[self.param['bundle_count'], 3]"], {'dtype': 'np.float64'}), "([self.param['bundle_count'], 3], dtype=np.float64)\n", (7042, 7093), True, 'import numpy as np\n'), ((7135, 7190), 'numpy.ones', 'np.ones', (["[self.param['bundle_count']]"], {'dtype': 'np.float64'}), "([self.param['bundle_count']], dtype=np.float64)\n", (7142, 7190), True, 'import numpy as np\n'), ((7232, 7287), 'numpy.ones', 'np.ones', (["[self.param['bundle_count']]"], {'dtype': 'np.float64'}), "([self.param['bundle_count']], dtype=np.float64)\n", (7239, 7287), True, 'import numpy as np\n'), ((7329, 7388), 'numpy.zeros', 'np.zeros', (["[self.param['bundle_count'], 3]"], {'dtype': 'np.float64'}), "([self.param['bundle_count'], 3], dtype=np.float64)\n", (7337, 7388), True, 'import numpy as np\n'), ((7430, 7482), 'numpy.ones', 'np.ones', (["[self.param['bundle_count']]"], {'dtype': 'np.bool'}), "([self.param['bundle_count']], dtype=np.bool)\n", (7437, 7482), True, 'import numpy as np\n'), ((7524, 7580), 'numpy.zeros', 'np.zeros', (["[self.param['bundle_count']]"], {'dtype': 'np.float64'}), "([self.param['bundle_count']], dtype=np.float64)\n", (7532, 7580), True, 'import numpy as np\n'), ((7622, 7678), 'numpy.zeros', 'np.zeros', (["[self.param['bundle_count']]"], {'dtype': 'np.float64'}), "([self.param['bundle_count']], dtype=np.float64)\n", (7630, 7678), True, 'import numpy as np\n'), ((7767, 7808), 'numpy.zeros', 'np.zeros', (["(self.param['bundle_count'], 3)"], {}), "((self.param['bundle_count'], 3))\n", (7775, 7808), True, 'import numpy as np\n'), ((7831, 7935), 'numpy.random.uniform', 'np.random.uniform', (["(-1 * self.param['xsize'] / 2)", "(self.param['xsize'] / 2)", "self.param['bundle_count']"], {}), "(-1 * self.param['xsize'] / 2, self.param['xsize'] / 2,\n self.param['bundle_count'])\n", (7848, 7935), True, 'import numpy as np\n'), ((7952, 8056), 'numpy.random.uniform', 'np.random.uniform', (["(-1 * self.param['ysize'] / 2)", "(self.param['ysize'] / 2)", "self.param['bundle_count']"], {}), "(-1 * self.param['ysize'] / 2, self.param['ysize'] / 2,\n self.param['bundle_count'])\n", (7969, 8056), True, 'import numpy as np\n'), ((8071, 8175), 'numpy.random.uniform', 'np.random.uniform', (["(-1 * self.param['zsize'] / 2)", "(self.param['zsize'] / 2)", "self.param['bundle_count']"], {}), "(-1 * self.param['zsize'] / 2, self.param['zsize'] / 2,\n self.param['bundle_count'])\n", (8088, 8175), True, 'import numpy as np\n'), ((14537, 14576), 'xicsrt.util.profiler.start', 'profiler.start', (['"""Ray Bundle Collection"""'], {}), "('Ray Bundle Collection')\n", (14551, 14576), False, 'from xicsrt.util import profiler\n'), ((14820, 14863), 'numpy.zeros', 'np.zeros', (['(total_rays, 3)'], {'dtype': 'np.float64'}), '((total_rays, 3), dtype=np.float64)\n', (14828, 14863), True, 'import numpy as np\n'), ((14893, 14936), 'numpy.zeros', 'np.zeros', (['(total_rays, 3)'], {'dtype': 'np.float64'}), '((total_rays, 3), dtype=np.float64)\n', (14901, 14936), True, 'import numpy as np\n'), ((14966, 15004), 'numpy.zeros', 'np.zeros', (['total_rays'], {'dtype': 'np.float64'}), '(total_rays, dtype=np.float64)\n', (14974, 15004), True, 'import numpy as np\n'), ((15037, 15075), 'numpy.zeros', 'np.zeros', (['total_rays'], {'dtype': 'np.float64'}), '(total_rays, dtype=np.float64)\n', (15045, 15075), True, 'import numpy as np\n'), ((15108, 15142), 'numpy.ones', 'np.ones', (['total_rays'], {'dtype': 'np.bool'}), '(total_rays, dtype=np.bool)\n', (15115, 15142), True, 'import numpy as np\n'), ((15648, 15686), 'xicsrt.util.profiler.stop', 'profiler.stop', (['"""Ray Bundle Collection"""'], {}), "('Ray Bundle Collection')\n", (15661, 15686), False, 'from xicsrt.util import profiler\n'), ((6142, 6178), 'numpy.round', 'np.round', (["self.param['bundle_count']"], {}), "(self.param['bundle_count'])\n", (6150, 6178), True, 'import numpy as np\n'), ((9023, 9053), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {'axis': '(1)'}), '(vector, axis=1)\n', (9037, 9053), True, 'import numpy as np\n'), ((9075, 9120), 'numpy.arctan', 'np.arctan', (["(self.param['spread_radius'] / dist)"], {}), "(self.param['spread_radius'] / dist)\n", (9084, 9120), True, 'import numpy as np\n'), ((9426, 9479), 'xicsrt.tools.xicsrt_spread.solid_angle', 'xicsrt_spread.solid_angle', (["bundle_input['spread'][ii]"], {}), "(bundle_input['spread'][ii])\n", (9451, 9479), False, 'from xicsrt.tools import xicsrt_spread\n'), ((10653, 10897), 'numpy.sum', 'np.sum', (["(bundle_input['emissivity'][m] * self.param['time_resolution'] * self.param\n ['bundle_volume'] * bundle_input['solid_angle'][m] / (4 * np.pi) * self\n .param['volume'] / (self.param['bundle_count'] * self.param[\n 'bundle_volume']))"], {}), "(bundle_input['emissivity'][m] * self.param['time_resolution'] * self\n .param['bundle_volume'] * bundle_input['solid_angle'][m] / (4 * np.pi) *\n self.param['volume'] / (self.param['bundle_count'] * self.param[\n 'bundle_volume']))\n", (10659, 10897), True, 'import numpy as np\n'), ((11443, 11482), 'xicsrt.util.profiler.start', 'profiler.start', (['"""Ray Bundle Generation"""'], {}), "('Ray Bundle Generation')\n", (11457, 11482), False, 'from xicsrt.util import profiler\n'), ((14280, 14314), 'xicsrt.sources._XicsrtSourceFocused.XicsrtSourceFocused', 'XicsrtSourceFocused', (['source_config'], {}), '(source_config)\n', (14299, 14314), False, 'from xicsrt.sources._XicsrtSourceFocused import XicsrtSourceFocused\n'), ((14489, 14527), 'xicsrt.util.profiler.stop', 'profiler.stop', (['"""Ray Bundle Generation"""'], {}), "('Ray Bundle Generation')\n", (14502, 14527), False, 'from xicsrt.util import profiler\n'), ((14723, 14751), 'numpy.sum', 'np.sum', (['count_rays_in_bundle'], {}), '(count_rays_in_bundle)\n', (14729, 14751), True, 'import numpy as np\n'), ((15985, 16014), 'numpy.mean', 'np.mean', (['count_rays_in_bundle'], {}), '(count_rays_in_bundle)\n', (15992, 16014), True, 'import numpy as np\n'), ((16095, 16126), 'numpy.median', 'np.median', (['count_rays_in_bundle'], {}), '(count_rays_in_bundle)\n', (16104, 16126), True, 'import numpy as np\n'), ((16205, 16233), 'numpy.max', 'np.max', (['count_rays_in_bundle'], {}), '(count_rays_in_bundle)\n', (16211, 16233), True, 'import numpy as np\n'), ((16312, 16340), 'numpy.min', 'np.min', (['count_rays_in_bundle'], {}), '(count_rays_in_bundle)\n', (16318, 16340), True, 'import numpy as np\n')] |
import multiprocessing
import pickle
import random
import sys
from collections import defaultdict
from math import ceil, sqrt
import numpy as np
from scipy.stats import norm, skewnorm
from tqdm import tqdm
sys.path.append('..')
import features
INCOME_SECURITY = {
'employee_wage': 0.8,
'state_wage': 0.9,
'dividents': 0.5,
'rent': 0.6,
'other': 0.1,
}
EXPENSE_SECURITY = {
'housing': 0.8,
'car_service': 0.5,
'taxes': 0.8,
'alimony': 0.8,
'credits': 0.5,
'insurance': 0.8,
'other': 0.5,
}
PROPERTY_SECURITY = {
'apartment': 0.7,
'house': 0.5,
'car': 0.3,
}
def calc_responsibility(fdict):
result = 0.6
if fdict['education'] >= 3 or \
fdict['purpose:education'] or fdict['purpose:real_estate']:
result += 0.1
dependents = fdict['dependents']
if dependents >= 2:
result += 0.3
elif dependents == 1:
result += 0.2
elif fdict['married']:
result += 0.1
for feature in ['has_overdue_debts', 'missed_deadlines', 'was_bankrupt']:
if fdict[feature]:
result *= 0.5
return result
SIGMA_COEFF = 0.05
def calc_balance_distr(fdict, resp):
balance_mean = 0
balance_var = 0
for cat in features.CUM_FEATURES['income']:
value = fdict['income:' + cat]
if value > 0:
distr = skewnorm(-4, value, value * SIGMA_COEFF / INCOME_SECURITY[cat] / resp)
balance_mean += distr.mean()
balance_var += distr.var()
for cat in features.CUM_FEATURES['expense']:
value = fdict['expense:' + cat]
if value > 0:
distr = skewnorm(4, value, value * SIGMA_COEFF / EXPENSE_SECURITY[cat] / resp)
balance_mean -= distr.mean()
balance_var += distr.var()
duration_in_months = 12 * fdict['duration']
balance_mean *= duration_in_months
balance_var *= duration_in_months ** 2
for cat in features.CUM_FEATURES['property']:
price = fdict['property:' + cat]
if price > 0:
distr = skewnorm(4, price, price * SIGMA_COEFF / PROPERTY_SECURITY[cat] / resp)
balance_mean += distr.mean()
balance_var += distr.var()
return norm(balance_mean, sqrt(balance_var))
def cond_expect(norm_distr, a):
"""
Let X = norm_distr(). This function returns E(X | X < a) * P{X < a}.
To proof use:
https://en.wikipedia.org/wiki/List_of_integrals_of_Gaussian_functions
"""
mu, sigma = norm_distr.args
return mu * norm_distr.cdf(a) - sigma ** 2 * norm_distr.pdf(a)
TARGET_INTEREST = 1.10
MAX_INTEREST = 10.00
def calc_interest_rate(fdict):
resp = calc_responsibility(fdict)
balance_distr = calc_balance_distr(fdict, resp)
credit_amount = fdict['credit_amount']
duration = fdict['duration']
bank_wants = credit_amount * TARGET_INTEREST ** duration
lo = TARGET_INTEREST
hi = MAX_INTEREST
for _ in range(15):
middle = (lo + hi) / 2
bank_asks = credit_amount * middle ** duration
default_proba = balance_distr.cdf(bank_asks)
bank_takes = cond_expect(balance_distr, bank_asks) + \
(1 - default_proba) * bank_asks
if bank_takes < bank_wants:
lo = middle
else:
hi = middle
return ceil(lo * 1e3) / 1e3
def test_calc_interest_rate():
fdict = {
'credit_amount': 3 * 10 ** 6,
'duration': 2,
'education': 3,
'married': 1,
'dependents': 2,
'purpose:real_estate': 1,
'income:state_wage': 200000,
'expense:housing': 28000,
'expense:other': 9000,
}
print(calc_interest_rate(defaultdict(int, fdict)))
MAX_CUM_VALUE = {
'income': 200000,
'expense': 50000,
'property': 5 * 10 ** 6,
}
def generate_input():
fdict = {}
for feature, (min, max) in features.NUM_FEATURES.items():
fdict[feature] = random.randint(min, max)
for feature, cats in features.CAT_FEATURES.items():
value = random.choice(cats)
for cat in cats:
fdict[feature + ':' + cat] = 0
fdict[feature + ':' + value] = 1
for feature, cats in features.CUM_FEATURES.items():
for cat in cats:
if random.random() < 0.5:
value = random.randint(1, MAX_CUM_VALUE[feature])
else:
value = 0
fdict[feature + ':' + cat] = value
return fdict
def generate_pair(_):
fdict = generate_input()
X_i = features.feature_dict_to_array(fdict)
y_i = calc_interest_rate(fdict)
return X_i, y_i
def generate_data(size):
pool = multiprocessing.Pool()
X = []
y = []
for X_i, y_i in tqdm(pool.imap_unordered(generate_pair, range(size)), total=size):
X.append(X_i)
y.append(y_i)
return np.array(X), np.array(y)
DATA_FILENAME = 'data.pickle'
def main():
data = generate_data(10 ** 6)
with open(DATA_FILENAME, 'wb') as f:
pickle.dump(data, f)
print('[+] Saved to {}'.format(DATA_FILENAME))
if __name__ == '__main__':
main()
| [
"random.choice",
"math.ceil",
"pickle.dump",
"features.feature_dict_to_array",
"math.sqrt",
"features.CUM_FEATURES.items",
"numpy.array",
"scipy.stats.skewnorm",
"collections.defaultdict",
"features.NUM_FEATURES.items",
"multiprocessing.Pool",
"features.CAT_FEATURES.items",
"random.random",
... | [((208, 229), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (223, 229), False, 'import sys\n'), ((3895, 3924), 'features.NUM_FEATURES.items', 'features.NUM_FEATURES.items', ([], {}), '()\n', (3922, 3924), False, 'import features\n'), ((4002, 4031), 'features.CAT_FEATURES.items', 'features.CAT_FEATURES.items', ([], {}), '()\n', (4029, 4031), False, 'import features\n'), ((4205, 4234), 'features.CUM_FEATURES.items', 'features.CUM_FEATURES.items', ([], {}), '()\n', (4232, 4234), False, 'import features\n'), ((4538, 4575), 'features.feature_dict_to_array', 'features.feature_dict_to_array', (['fdict'], {}), '(fdict)\n', (4568, 4575), False, 'import features\n'), ((4670, 4692), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (4690, 4692), False, 'import multiprocessing\n'), ((2249, 2266), 'math.sqrt', 'sqrt', (['balance_var'], {}), '(balance_var)\n', (2253, 2266), False, 'from math import ceil, sqrt\n'), ((3329, 3346), 'math.ceil', 'ceil', (['(lo * 1000.0)'], {}), '(lo * 1000.0)\n', (3333, 3346), False, 'from math import ceil, sqrt\n'), ((3951, 3975), 'random.randint', 'random.randint', (['min', 'max'], {}), '(min, max)\n', (3965, 3975), False, 'import random\n'), ((4049, 4068), 'random.choice', 'random.choice', (['cats'], {}), '(cats)\n', (4062, 4068), False, 'import random\n'), ((4858, 4869), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4866, 4869), True, 'import numpy as np\n'), ((4871, 4882), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4879, 4882), True, 'import numpy as np\n'), ((5013, 5033), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (5024, 5033), False, 'import pickle\n'), ((1367, 1437), 'scipy.stats.skewnorm', 'skewnorm', (['(-4)', 'value', '(value * SIGMA_COEFF / INCOME_SECURITY[cat] / resp)'], {}), '(-4, value, value * SIGMA_COEFF / INCOME_SECURITY[cat] / resp)\n', (1375, 1437), False, 'from scipy.stats import norm, skewnorm\n'), ((1650, 1720), 'scipy.stats.skewnorm', 'skewnorm', (['(4)', 'value', '(value * SIGMA_COEFF / EXPENSE_SECURITY[cat] / resp)'], {}), '(4, value, value * SIGMA_COEFF / EXPENSE_SECURITY[cat] / resp)\n', (1658, 1720), False, 'from scipy.stats import norm, skewnorm\n'), ((2066, 2137), 'scipy.stats.skewnorm', 'skewnorm', (['(4)', 'price', '(price * SIGMA_COEFF / PROPERTY_SECURITY[cat] / resp)'], {}), '(4, price, price * SIGMA_COEFF / PROPERTY_SECURITY[cat] / resp)\n', (2074, 2137), False, 'from scipy.stats import norm, skewnorm\n'), ((3703, 3726), 'collections.defaultdict', 'defaultdict', (['int', 'fdict'], {}), '(int, fdict)\n', (3714, 3726), False, 'from collections import defaultdict\n'), ((4276, 4291), 'random.random', 'random.random', ([], {}), '()\n', (4289, 4291), False, 'import random\n'), ((4323, 4364), 'random.randint', 'random.randint', (['(1)', 'MAX_CUM_VALUE[feature]'], {}), '(1, MAX_CUM_VALUE[feature])\n', (4337, 4364), False, 'import random\n')] |
########################################################################################
#
# Forge
# Copyright (C) 2018 <NAME>, Oxford Robotics Institute and
# Department of Statistics, University of Oxford
#
# email: <EMAIL>
# webpage: http://akosiorek.github.io/
# github: https://github.com/akosiorek/forge/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################################
from builtins import range
import numpy as np
import itertools
import tensorflow as tf
def tensors_from_data(data_dict, batch_size, axes=None, shuffle=False):
"""Turns a dict of numpy.ndarrays into a dict of minibatch tensors.
Arrays are split into minibatches of `batch_size` along `axes`. If `axes` is None,
then all arrays are split along axis==0. Tensors can iterate sequentially over the
passed arrays if shuffle=False or in a random order if shuffle=True.
:param data_dict: dict of {key: nump.ndarray}.
:param batch_size: integer
:param axes: dict of {k: integer} or None
:param shuffle: boolean.
:return: dict of {key: tf.Tensor}
"""
keys = list(data_dict.keys())
if axes is None:
axes = {k: 0 for k in keys}
key = keys[0]
ax = axes[key]
n_entries = data_dict[key].shape[ax]
if shuffle:
def idx_fun():
return np.random.choice(n_entries, batch_size, replace=False)
else:
rolling_idx = itertools.cycle(range(0, n_entries - batch_size + 1, batch_size))
def idx_fun():
start = next(rolling_idx)
end = start + batch_size
return np.arange(start, end)
def data_fun():
idx = idx_fun()
minibatch = []
for k in keys:
item = data_dict[k]
minibatch_item = item.take(idx, axes[k])
minibatch.append(minibatch_item)
return minibatch
minibatch = data_fun()
types = [getattr(tf, str(m.dtype)) for m in minibatch]
tensors = tf.py_func(data_fun, [], types)
for t, m in zip(tensors, minibatch):
t.set_shape(m.shape)
tensors = data_dict.__class__({k: v for k, v in zip(keys, tensors)})
return tensors | [
"numpy.random.choice",
"tensorflow.py_func",
"numpy.arange",
"builtins.range"
] | [((2605, 2636), 'tensorflow.py_func', 'tf.py_func', (['data_fun', '[]', 'types'], {}), '(data_fun, [], types)\n', (2615, 2636), True, 'import tensorflow as tf\n'), ((1963, 2017), 'numpy.random.choice', 'np.random.choice', (['n_entries', 'batch_size'], {'replace': '(False)'}), '(n_entries, batch_size, replace=False)\n', (1979, 2017), True, 'import numpy as np\n'), ((2067, 2115), 'builtins.range', 'range', (['(0)', '(n_entries - batch_size + 1)', 'batch_size'], {}), '(0, n_entries - batch_size + 1, batch_size)\n', (2072, 2115), False, 'from builtins import range\n'), ((2235, 2256), 'numpy.arange', 'np.arange', (['start', 'end'], {}), '(start, end)\n', (2244, 2256), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_cointegration_detection [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_cointegration_detection&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from arpym.estimation import cointegration_fp, fit_var1
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-parameters)
t_in = 1260 # length of the in-sample time series (days)
t_ = 2268 # length of the complete series (in and out-of-sample) (days)
u = 0.35 # coefficient of linear combination
l_select = 3 # selected eigenvector
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step00): Load data
# +
tau = np.array([1, 2, 3, 5, 7, 10])
path = '../../../databases/global-databases/fixed-income/db_yields'
x = pd.read_csv(path + '/data.csv', header=0, index_col=0)
x = x[tau.astype(float).astype(str)].tail(t_).values
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step01): Select the in-sample and out-of-sample series
# +
x_in = x[:t_in, :] # in-sample series
x_out = x[t_in:, :] # out-of-sample series
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step02): Cointegrated eigenvectors
# +
c_hat, _ = cointegration_fp(x_in)
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step03): In sample and out-of-sample cointegrated series
# +
# store cointegrated vectors
c_hat_sel = np.zeros((c_hat.shape[0], 3))
c_hat_sel[:, 0] = c_hat[:, l_select+1]
c_hat_sel[:, 1] = c_hat[:, l_select]
c_hat_sel[:, 2] = (1 - u) * c_hat[:, l_select + 1] + u * \
c_hat[:, l_select]
# in-sample cointegrated series (basis points)
y_in = x_in @ c_hat_sel * 10000
# out-of-sample cointegrated series (basis points)
y_out = x_out @ c_hat_sel * 10000
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_cointegration_detection-implementation-step04): AR(1) long term parameters
# +
exp_infty = np.zeros(3)
sd_infty = np.zeros(3)
tau_halflife = np.zeros(3)
for k in range(3):
# AR1 fit
b_hat, mu_hat_epsi, sig2_hat_epsi = fit_var1(y_in[:, [k]])
# long-run expectation
exp_infty[k] = mu_hat_epsi / (1 - b_hat)
# long-run standard deviation
sd_infty[k] = np.sqrt(sig2_hat_epsi / (1 - b_hat ** 2))
# half life
tau_halflife[k] = -np.log(2) / np.log(abs(b_hat))
# -
# ## Plots
# +
plt.style.use('arpm')
for k in range(3):
fig = plt.figure()
min_y = min(min(y_in[:, k]), min(y_out[:, k]))
max_y = max(max(y_in[:, k]), max(y_out[:, k]))
t = np.arange(t_)/252
plt.axis([0, t[-1], min_y, max_y])
plt.xlabel('time (years)')
plt.ylabel('basis points')
plt.xticks()
plt.yticks()
insample = plt.plot(t[:t_in], y_in[:, k], color='k', linewidth=1)
outofsample = plt.plot(t[t_in:], y_out[:, k], color='b', linewidth=1)
expect = plt.plot(t, np.tile(exp_infty[k], t_), color='g')
up_sd = plt.plot(t, np.tile(exp_infty[k] + 2 * sd_infty[k], t_),
color='r')
plt.plot(t, np.tile(exp_infty[k] - 2 * sd_infty[k], t_),
color='r')
plt.legend(handles=[insample[0], expect[0], up_sd[0], outofsample[0]],
labels=['In-Sample', 'In-Sample Mean',
'+/- 2 In-Sample St. Dev', 'Out-of-Sample'], loc=2)
if k == 0:
plt.title(('Series = {index}-th Eigvect. In-Sample Mean-Reversion ' +
'Half-Life = ' +
' {halflife:.0f} days.').format(index=l_select,
halflife=tau_halflife[k]))
elif k == 1:
plt.title(('Series = {index}-th Eigvect. In-Sample Mean-Reversion ' +
'Half-Life = ' +
' {halflife:.0f} days.').format(index=l_select+1,
halflife=tau_halflife[k]))
else:
plt.title(('Series = {a:1.2f} x {index}-th Eigvect. + ' +
'{a2:1.2f} x {index2}-th Eigvect.' +
'\nIn-Sample Mean-Reversion Half-Life ' +
'= {halflife:.0f} days.').format(a=np.sqrt(1-u**2),
index=l_select,
a2=u**2,
index2=l_select+1,
halflife=tau_halflife[k]))
add_logo(fig)
plt.tight_layout()
| [
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"arpym.estimation.cointegration_fp",
"numpy.log",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.axis",
"numpy.tile... | [((1263, 1292), 'numpy.array', 'np.array', (['[1, 2, 3, 5, 7, 10]'], {}), '([1, 2, 3, 5, 7, 10])\n', (1271, 1292), True, 'import numpy as np\n'), ((1365, 1419), 'pandas.read_csv', 'pd.read_csv', (["(path + '/data.csv')"], {'header': '(0)', 'index_col': '(0)'}), "(path + '/data.csv', header=0, index_col=0)\n", (1376, 1419), True, 'import pandas as pd\n'), ((1881, 1903), 'arpym.estimation.cointegration_fp', 'cointegration_fp', (['x_in'], {}), '(x_in)\n', (1897, 1903), False, 'from arpym.estimation import cointegration_fp, fit_var1\n'), ((2114, 2143), 'numpy.zeros', 'np.zeros', (['(c_hat.shape[0], 3)'], {}), '((c_hat.shape[0], 3))\n', (2122, 2143), True, 'import numpy as np\n'), ((2627, 2638), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2635, 2638), True, 'import numpy as np\n'), ((2650, 2661), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2658, 2661), True, 'import numpy as np\n'), ((2677, 2688), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2685, 2688), True, 'import numpy as np\n'), ((3047, 3068), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""arpm"""'], {}), "('arpm')\n", (3060, 3068), True, 'import matplotlib.pyplot as plt\n'), ((2764, 2786), 'arpym.estimation.fit_var1', 'fit_var1', (['y_in[:, [k]]'], {}), '(y_in[:, [k]])\n', (2772, 2786), False, 'from arpym.estimation import cointegration_fp, fit_var1\n'), ((2913, 2954), 'numpy.sqrt', 'np.sqrt', (['(sig2_hat_epsi / (1 - b_hat ** 2))'], {}), '(sig2_hat_epsi / (1 - b_hat ** 2))\n', (2920, 2954), True, 'import numpy as np\n'), ((3099, 3111), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3109, 3111), True, 'import matplotlib.pyplot as plt\n'), ((3245, 3279), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, t[-1], min_y, max_y]'], {}), '([0, t[-1], min_y, max_y])\n', (3253, 3279), True, 'import matplotlib.pyplot as plt\n'), ((3284, 3310), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (years)"""'], {}), "('time (years)')\n", (3294, 3310), True, 'import matplotlib.pyplot as plt\n'), ((3315, 3341), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""basis points"""'], {}), "('basis points')\n", (3325, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3358), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (3356, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3363, 3375), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (3373, 3375), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3445), 'matplotlib.pyplot.plot', 'plt.plot', (['t[:t_in]', 'y_in[:, k]'], {'color': '"""k"""', 'linewidth': '(1)'}), "(t[:t_in], y_in[:, k], color='k', linewidth=1)\n", (3399, 3445), True, 'import matplotlib.pyplot as plt\n'), ((3464, 3519), 'matplotlib.pyplot.plot', 'plt.plot', (['t[t_in:]', 'y_out[:, k]'], {'color': '"""b"""', 'linewidth': '(1)'}), "(t[t_in:], y_out[:, k], color='b', linewidth=1)\n", (3472, 3519), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3942), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[insample[0], expect[0], up_sd[0], outofsample[0]]', 'labels': "['In-Sample', 'In-Sample Mean', '+/- 2 In-Sample St. Dev', 'Out-of-Sample']", 'loc': '(2)'}), "(handles=[insample[0], expect[0], up_sd[0], outofsample[0]],\n labels=['In-Sample', 'In-Sample Mean', '+/- 2 In-Sample St. Dev',\n 'Out-of-Sample'], loc=2)\n", (3783, 3942), True, 'import matplotlib.pyplot as plt\n'), ((5073, 5086), 'arpym.tools.add_logo', 'add_logo', (['fig'], {}), '(fig)\n', (5081, 5086), False, 'from arpym.tools import add_logo\n'), ((5091, 5109), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5107, 5109), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3236), 'numpy.arange', 'np.arange', (['t_'], {}), '(t_)\n', (3232, 3236), True, 'import numpy as np\n'), ((3545, 3570), 'numpy.tile', 'np.tile', (['exp_infty[k]', 't_'], {}), '(exp_infty[k], t_)\n', (3552, 3570), True, 'import numpy as np\n'), ((3607, 3650), 'numpy.tile', 'np.tile', (['(exp_infty[k] + 2 * sd_infty[k])', 't_'], {}), '(exp_infty[k] + 2 * sd_infty[k], t_)\n', (3614, 3650), True, 'import numpy as np\n'), ((3700, 3743), 'numpy.tile', 'np.tile', (['(exp_infty[k] - 2 * sd_infty[k])', 't_'], {}), '(exp_infty[k] - 2 * sd_infty[k], t_)\n', (3707, 3743), True, 'import numpy as np\n'), ((2995, 3004), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3001, 3004), True, 'import numpy as np\n'), ((4773, 4792), 'numpy.sqrt', 'np.sqrt', (['(1 - u ** 2)'], {}), '(1 - u ** 2)\n', (4780, 4792), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn import datasets, linear_model
from __future__ import division
class LRPI:
def __init__(self, normalize=False, n_jobs=1, t_value = 2.13144955):
self.normalize = normalize
self.n_jobs = n_jobs
self.LR = linear_model.LinearRegression(normalize=self.normalize, n_jobs= self.n_jobs)
self.t_value = t_value
def fit(self, X_train, y_train):
self.X_train = pd.DataFrame(X_train.values)
self.y_train = pd.DataFrame(y_train.values)
self.LR.fit(self.X_train, self.y_train)
X_train_fit = self.LR.predict(self.X_train)
self.MSE = np.power(self.y_train.subtract(X_train_fit), 2).sum(axis=0) / (self.X_train.shape[0] - self.X_train.shape[1] - 1)
self.X_train.loc[:, 'const_one'] = 1
self.XTX_inv = np.linalg.inv(np.dot(np.transpose(self.X_train.values) , self.X_train.values))
def predict(self, X_test):
self.X_test = pd.DataFrame(X_test.values)
self.pred = self.LR.predict(self.X_test)
self.X_test.loc[: , 'const_one'] =1
SE = [np.dot(np.transpose(self.X_test.values[i]) , np.dot(self.XTX_inv, self.X_test.values[i]) ) for i in range(len(self.X_test)) ]
results = pd.DataFrame(self.pred , columns=['Pred'])
results.loc[:,"lower"] = results['Pred'].subtract((self.t_value)* (np.sqrt(self.MSE.values + np.multiply(SE,self.MSE.values) )), axis=0)
results.loc[:,"upper"] = results['Pred'].add((self.t_value)* (np.sqrt(self.MSE.values + np.multiply(SE,self.MSE.values) )), axis=0)
return results | [
"numpy.multiply",
"numpy.dot",
"pandas.DataFrame",
"numpy.transpose",
"sklearn.linear_model.LinearRegression"
] | [((282, 357), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {'normalize': 'self.normalize', 'n_jobs': 'self.n_jobs'}), '(normalize=self.normalize, n_jobs=self.n_jobs)\n', (311, 357), False, 'from sklearn import datasets, linear_model\n'), ((459, 487), 'pandas.DataFrame', 'pd.DataFrame', (['X_train.values'], {}), '(X_train.values)\n', (471, 487), True, 'import pandas as pd\n'), ((511, 539), 'pandas.DataFrame', 'pd.DataFrame', (['y_train.values'], {}), '(y_train.values)\n', (523, 539), True, 'import pandas as pd\n'), ((991, 1018), 'pandas.DataFrame', 'pd.DataFrame', (['X_test.values'], {}), '(X_test.values)\n', (1003, 1018), True, 'import pandas as pd\n'), ((1270, 1311), 'pandas.DataFrame', 'pd.DataFrame', (['self.pred'], {'columns': "['Pred']"}), "(self.pred, columns=['Pred'])\n", (1282, 1311), True, 'import pandas as pd\n'), ((871, 904), 'numpy.transpose', 'np.transpose', (['self.X_train.values'], {}), '(self.X_train.values)\n', (883, 904), True, 'import numpy as np\n'), ((1133, 1168), 'numpy.transpose', 'np.transpose', (['self.X_test.values[i]'], {}), '(self.X_test.values[i])\n', (1145, 1168), True, 'import numpy as np\n'), ((1171, 1214), 'numpy.dot', 'np.dot', (['self.XTX_inv', 'self.X_test.values[i]'], {}), '(self.XTX_inv, self.X_test.values[i])\n', (1177, 1214), True, 'import numpy as np\n'), ((1423, 1455), 'numpy.multiply', 'np.multiply', (['SE', 'self.MSE.values'], {}), '(SE, self.MSE.values)\n', (1434, 1455), True, 'import numpy as np\n'), ((1564, 1596), 'numpy.multiply', 'np.multiply', (['SE', 'self.MSE.values'], {}), '(SE, self.MSE.values)\n', (1575, 1596), True, 'import numpy as np\n')] |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.tseries.offsets import BDay
import stock.utils.symbol_util
from stock.marketdata.storefactory import get_store
from stock.globalvar import *
from config import store_type
import tushare as ts
def get_last_trading_date(today):
yest = today - BDay(1)
folder = TICK_DIR["daily"]
while True:
yest_str = yest.strftime("%Y-%m-%d")
filepath = os.path.join(folder, yest_str + ".csv")
if os.path.isfile(filepath):
break
yest = yest - BDay(1)
return yest
def get_last_trading_date(today):
yest = today - BDay(1)
folder = TICK_DIR["daily"]
while True:
yest_str = yest.strftime("%Y-%m-%d")
filepath = os.path.join(folder, yest_str + ".csv")
if os.path.isfile(filepath):
break
yest = yest - BDay(1)
return yest
def get_industry():
df_industry = stock.utils.symbol_util.load_industry()
df_res = df_industry.groupby("exsymbol")["industry"].agg({"industry": lambda x: ",".join(x)})
return df_res
def get_concept():
df = stock.utils.symbol_util.load_concept()
df_res = df.groupby("exsymbol")["concept"].agg({"concept": lambda x: ",".join(x)})
return df_res
def get_zhangting(today):
today_str = today.strftime("%Y-%m-%d")
df_today = stock.utils.symbol_util.get_realtime_by_date(today_str)
yest = get_last_trading_date(today)
yest_str = yest.strftime("%Y-%m-%d")
df_yest = stock.utils.symbol_util.get_realtime_by_date(yest_str)
df_yest["zt_price"] = np.round(df_yest["yest_close"] * 1.1, 2)
df_yest["diff2zt"] = df_yest["zt_price"] - df_yest["close"]
df_zt = df_yest[(df_yest.diff2zt<1e-3) & (df_yest.lt_mcap>0) & (df_yest.volume>0)].copy()
df_today["range"] = (df_today["high"] - df_today["low"]) / df_today["yest_close"]
df_today["body"] = np.absolute((df_today["open"]-df_today["close"])/df_today["yest_close"])
df_zt = df_zt.merge(df_today[["range", "body"]], how="left", left_index=True, right_index=True)
df_zt.loc[:, "turnover"] = df_zt["volume"]/(df_zt["lt_mcap"]/df_zt["close"]*1e6)
df_zt.loc[:, "fengdan"] = df_zt["b1_v"] * df_zt["b1_p"] *100 / df_zt["lt_mcap"] / 1e8
df_zt.loc[:, "fengdan_money"] = df_zt["b1_v"]*df_zt["b1_p"]/1e6
df_industry = get_industry()
df_res = df_zt.merge(df_industry, how="left", left_index=True, right_index=True)
columns = ["fengdan", "fengdan_money", "lt_mcap", "turnover", "range", "body", "industry"]
print("========================== zhangting ==========================")
print(df_res[columns].sort_values("range", ascending=True))
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
today = None
if len(sys.argv) == 1:
today = pd.datetime.today()
else:
today = pd.datetime.strptime(sys.argv[1], "%Y-%m-%d")
get_zhangting(today)
| [
"numpy.absolute",
"pandas.set_option",
"pandas.datetime.today",
"pandas.datetime.strptime",
"pandas.tseries.offsets.BDay",
"numpy.round"
] | [((1601, 1641), 'numpy.round', 'np.round', (["(df_yest['yest_close'] * 1.1)", '(2)'], {}), "(df_yest['yest_close'] * 1.1, 2)\n", (1609, 1641), True, 'import numpy as np\n'), ((1909, 1985), 'numpy.absolute', 'np.absolute', (["((df_today['open'] - df_today['close']) / df_today['yest_close'])"], {}), "((df_today['open'] - df_today['close']) / df_today['yest_close'])\n", (1920, 1985), True, 'import numpy as np\n'), ((2712, 2751), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (2725, 2751), True, 'import pandas as pd\n'), ((2756, 2798), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (2769, 2798), True, 'import pandas as pd\n'), ((340, 347), 'pandas.tseries.offsets.BDay', 'BDay', (['(1)'], {}), '(1)\n', (344, 347), False, 'from pandas.tseries.offsets import BDay\n'), ((654, 661), 'pandas.tseries.offsets.BDay', 'BDay', (['(1)'], {}), '(1)\n', (658, 661), False, 'from pandas.tseries.offsets import BDay\n'), ((2859, 2878), 'pandas.datetime.today', 'pd.datetime.today', ([], {}), '()\n', (2876, 2878), True, 'import pandas as pd\n'), ((2905, 2950), 'pandas.datetime.strptime', 'pd.datetime.strptime', (['sys.argv[1]', '"""%Y-%m-%d"""'], {}), "(sys.argv[1], '%Y-%m-%d')\n", (2925, 2950), True, 'import pandas as pd\n'), ((576, 583), 'pandas.tseries.offsets.BDay', 'BDay', (['(1)'], {}), '(1)\n', (580, 583), False, 'from pandas.tseries.offsets import BDay\n'), ((890, 897), 'pandas.tseries.offsets.BDay', 'BDay', (['(1)'], {}), '(1)\n', (894, 897), False, 'from pandas.tseries.offsets import BDay\n')] |
import unittest
import os
import numpy as np
import pandas as pd
from pyinterpolate.semivariance.semivariogram_fit.fit_semivariance import TheoreticalSemivariogram
from pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance import calculate_semivariance
from pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance import calculate_weighted_semivariance
class TestFitSemivariance(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFitSemivariance, self).__init__(*args, **kwargs)
my_dir = os.path.dirname(__file__)
path = os.path.join(my_dir, '../sample_data/armstrong_data.npy')
self.dataset = np.load(path)
self.step_size = 1.1
self.max_range = 10
def test_fit_semivariance(self):
new_col = np.arange(1, len(self.dataset) + 1)
dataset_weights = np.zeros((self.dataset.shape[0], self.dataset.shape[1] + 1))
dataset_weights[:, :-1] = self.dataset
dataset_weights[:, -1] = new_col
# Calculate weighted and non-weighted semivariance
gamma_w = calculate_weighted_semivariance(dataset_weights, self.step_size, self.max_range)
gamma_non = calculate_semivariance(self.dataset, self.step_size, self.max_range)
# Fit semivariance - find optimal models
t_non_weighted = TheoreticalSemivariogram(self.dataset, gamma_non)
t_weighted = TheoreticalSemivariogram(dataset_weights[:, :-1], gamma_w)
model_non_weighted = t_non_weighted.find_optimal_model(weighted=False, number_of_ranges=8) # linear
model_weighted = t_weighted.find_optimal_model(weighted=False, number_of_ranges=8) # linear
self.assertEqual(model_non_weighted, 'exponential', "Non-weighted model should be exponential")
self.assertEqual(model_weighted, 'spherical', "Weighted model should be spherical")
def test_fit_semivariance_io(self):
# Prepare fake model for fit semivariance class
fake_theoretical_smv = TheoreticalSemivariogram(None, None, False)
nugget = 0
sill = 20
srange = 40
fake_theoretical_smv.nugget = nugget
fake_theoretical_smv.sill = sill
fake_theoretical_smv.range = srange
fmn = 'linear'
fake_theoretical_smv.chosen_model_name = fmn
my_dir = os.path.dirname(__file__)
file_path = os.path.join(my_dir, '../sample_data/mock_model.csv')
fake_theoretical_smv.export_model(file_path)
# Clear model paramas and name
fake_theoretical_smv.nugget = None
fake_theoretical_smv.sill = None
fake_theoretical_smv.range = None
fake_theoretical_smv.chosen_model_name = None
# Check if now model is not the same
assert fake_theoretical_smv.nugget != nugget
assert fake_theoretical_smv.range != srange
assert fake_theoretical_smv.sill != sill
assert fake_theoretical_smv.chosen_model_name != fmn
# Import params
fake_theoretical_smv.import_model(file_path)
# Check if params are the same as at the beginning
self.assertEqual(fake_theoretical_smv.nugget, nugget, "Problem with import/export of semivariogram nugget")
self.assertEqual(fake_theoretical_smv.sill, sill, "Problem with import/export of semivariogram sill")
self.assertEqual(fake_theoretical_smv.range, srange, "Problem with import/export of semivariogram range")
self.assertEqual(fake_theoretical_smv.chosen_model_name, fmn, "Problem with import/export of semivariogram "
"name")
def test_semivariance_export(self):
gamma = calculate_semivariance(self.dataset, self.step_size, self.max_range)
theo_model = TheoreticalSemivariogram(self.dataset, gamma)
theo_model.find_optimal_model(number_of_ranges=8)
my_dir = os.path.dirname(__file__)
filepath = os.path.join(my_dir, '../sample_data/test_semivariance_export.csv')
theo_model.export_semivariance(filepath)
df = pd.read_csv(filepath)
columns = ['lag', 'experimental', 'theoretical']
for c in columns:
self.assertIn(c, df.columns, f'DataFrame is corrupted, missing {c} column')
EXPECTED_LEN = 9
self.assertEqual(len(df), EXPECTED_LEN, f'DataFrame len should be {EXPECTED_LEN} but it is {len(df)}')
if __name__ == '__main__':
unittest.main()
| [
"pandas.read_csv",
"pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance.calculate_weighted_semivariance",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"pyinterpolate.semivariance.semivariogram_fit.fit_semivariance.TheoreticalSemivariogram",
"unittest.main",
"numpy.load",
... | [((4465, 4480), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4478, 4480), False, 'import unittest\n'), ((569, 594), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (584, 594), False, 'import os\n'), ((610, 667), 'os.path.join', 'os.path.join', (['my_dir', '"""../sample_data/armstrong_data.npy"""'], {}), "(my_dir, '../sample_data/armstrong_data.npy')\n", (622, 667), False, 'import os\n'), ((692, 705), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (699, 705), True, 'import numpy as np\n'), ((883, 943), 'numpy.zeros', 'np.zeros', (['(self.dataset.shape[0], self.dataset.shape[1] + 1)'], {}), '((self.dataset.shape[0], self.dataset.shape[1] + 1))\n', (891, 943), True, 'import numpy as np\n'), ((1111, 1196), 'pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance.calculate_weighted_semivariance', 'calculate_weighted_semivariance', (['dataset_weights', 'self.step_size', 'self.max_range'], {}), '(dataset_weights, self.step_size, self.max_range\n )\n', (1142, 1196), False, 'from pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance import calculate_weighted_semivariance\n'), ((1212, 1280), 'pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance.calculate_semivariance', 'calculate_semivariance', (['self.dataset', 'self.step_size', 'self.max_range'], {}), '(self.dataset, self.step_size, self.max_range)\n', (1234, 1280), False, 'from pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance import calculate_semivariance\n'), ((1356, 1405), 'pyinterpolate.semivariance.semivariogram_fit.fit_semivariance.TheoreticalSemivariogram', 'TheoreticalSemivariogram', (['self.dataset', 'gamma_non'], {}), '(self.dataset, gamma_non)\n', (1380, 1405), False, 'from pyinterpolate.semivariance.semivariogram_fit.fit_semivariance import TheoreticalSemivariogram\n'), ((1427, 1485), 'pyinterpolate.semivariance.semivariogram_fit.fit_semivariance.TheoreticalSemivariogram', 'TheoreticalSemivariogram', (['dataset_weights[:, :-1]', 'gamma_w'], {}), '(dataset_weights[:, :-1], gamma_w)\n', (1451, 1485), False, 'from pyinterpolate.semivariance.semivariogram_fit.fit_semivariance import TheoreticalSemivariogram\n'), ((2023, 2066), 'pyinterpolate.semivariance.semivariogram_fit.fit_semivariance.TheoreticalSemivariogram', 'TheoreticalSemivariogram', (['None', 'None', '(False)'], {}), '(None, None, False)\n', (2047, 2066), False, 'from pyinterpolate.semivariance.semivariogram_fit.fit_semivariance import TheoreticalSemivariogram\n'), ((2349, 2374), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2364, 2374), False, 'import os\n'), ((2395, 2448), 'os.path.join', 'os.path.join', (['my_dir', '"""../sample_data/mock_model.csv"""'], {}), "(my_dir, '../sample_data/mock_model.csv')\n", (2407, 2448), False, 'import os\n'), ((3715, 3783), 'pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance.calculate_semivariance', 'calculate_semivariance', (['self.dataset', 'self.step_size', 'self.max_range'], {}), '(self.dataset, self.step_size, self.max_range)\n', (3737, 3783), False, 'from pyinterpolate.semivariance.semivariogram_estimation.calculate_semivariance import calculate_semivariance\n'), ((3805, 3850), 'pyinterpolate.semivariance.semivariogram_fit.fit_semivariance.TheoreticalSemivariogram', 'TheoreticalSemivariogram', (['self.dataset', 'gamma'], {}), '(self.dataset, gamma)\n', (3829, 3850), False, 'from pyinterpolate.semivariance.semivariogram_fit.fit_semivariance import TheoreticalSemivariogram\n'), ((3926, 3951), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3941, 3951), False, 'import os\n'), ((3971, 4038), 'os.path.join', 'os.path.join', (['my_dir', '"""../sample_data/test_semivariance_export.csv"""'], {}), "(my_dir, '../sample_data/test_semivariance_export.csv')\n", (3983, 4038), False, 'import os\n'), ((4101, 4122), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (4112, 4122), True, 'import pandas as pd\n')] |
from networkx import from_numpy_matrix, set_node_attributes, relabel_nodes, DiGraph
from numpy import matrix
from data import DISTANCES, DEMANDS_DROP
import sys
sys.path.append("../../")
from vrpy import VehicleRoutingProblem
# Transform distance matrix to DiGraph
A = matrix(DISTANCES, dtype=[("cost", int)])
G = from_numpy_matrix(A, create_using=DiGraph())
# Set demands
set_node_attributes(G, values=DEMANDS_DROP, name="demand")
# Relabel depot
G = relabel_nodes(G, {0: "Source", 17: "Sink"})
if __name__ == "__main__":
prob = VehicleRoutingProblem(G, load_capacity=15, drop_penalty=1000, num_vehicles=4)
prob.solve()
print(prob.best_value)
print(prob.best_routes)
print(prob.best_routes_cost)
print(prob.best_routes_load)
print(prob.node_load)
| [
"networkx.relabel_nodes",
"networkx.DiGraph",
"networkx.set_node_attributes",
"vrpy.VehicleRoutingProblem",
"numpy.matrix",
"sys.path.append"
] | [((162, 187), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (177, 187), False, 'import sys\n'), ((271, 311), 'numpy.matrix', 'matrix', (['DISTANCES'], {'dtype': "[('cost', int)]"}), "(DISTANCES, dtype=[('cost', int)])\n", (277, 311), False, 'from numpy import matrix\n'), ((376, 434), 'networkx.set_node_attributes', 'set_node_attributes', (['G'], {'values': 'DEMANDS_DROP', 'name': '"""demand"""'}), "(G, values=DEMANDS_DROP, name='demand')\n", (395, 434), False, 'from networkx import from_numpy_matrix, set_node_attributes, relabel_nodes, DiGraph\n'), ((456, 503), 'networkx.relabel_nodes', 'relabel_nodes', (['G', "{(0): 'Source', (17): 'Sink'}"], {}), "(G, {(0): 'Source', (17): 'Sink'})\n", (469, 503), False, 'from networkx import from_numpy_matrix, set_node_attributes, relabel_nodes, DiGraph\n'), ((540, 617), 'vrpy.VehicleRoutingProblem', 'VehicleRoutingProblem', (['G'], {'load_capacity': '(15)', 'drop_penalty': '(1000)', 'num_vehicles': '(4)'}), '(G, load_capacity=15, drop_penalty=1000, num_vehicles=4)\n', (561, 617), False, 'from vrpy import VehicleRoutingProblem\n'), ((350, 359), 'networkx.DiGraph', 'DiGraph', ([], {}), '()\n', (357, 359), False, 'from networkx import from_numpy_matrix, set_node_attributes, relabel_nodes, DiGraph\n')] |
# coding=utf-8
# Author: <NAME>
# Date: Aug 06, 2019
#
# Description: Plots results of screened DM genes
#
# Instructions:
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import matplotlib as mpl
from matplotlib import colors
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def value_to_color(x, cmap, norm):
rgb = cmap(norm(x))[:3]
return colors.rgb2hex(rgb)
def calc_control_mean_std_fert_rate(x):
fertrate = x['hatched'] / x['eggs']
return pd.Series({'mean fert-rate': fertrate.mean(), 'std fert-rate': fertrate.std()})
if __name__ == '__main__':
# Load genes
df = pd.read_csv('../02-core_genes/results/pipeline-core/DM_meiotic_genes.csv', index_col=0, usecols=['id_gene', 'gene'])
# Load Screened data
dfs = pd.read_csv('data/core_DM_screened_2020-10-21.csv', index_col=0)
# Load Control data
dfc = pd.read_csv('data/screened_DM_controls.csv', index_col=0)
dfc = dfc.groupby(dfc.index).apply(calc_control_mean_std_fert_rate)
# Load FPKM data
dfFPKM = pd.read_csv('../02-core_genes/results/FPKM/DM/DM-FPKM-spermatocyte.csv.gz', index_col=0, usecols=['id_gene', 'FPKM'])
dfs_only = dfs.loc[~dfs['FT1 eggs'].isnull(), :]
status_cats = ['Screened', 'To be crossed', 'Pending', 'Reorder']
dfs['Status'] = pd.Categorical(dfs['Status'], categories=status_cats, ordered=True)
df['Status'] = dfs['Status']
cols = ['FT1 eggs', 'FT1 hatched', 'FT2 eggs', 'FT2 hatched', 'FT3 eggs', 'FT3 hatched', 'FT4 eggs', 'FT4 hatched']
df[cols] = dfs_only[cols]
# Only plot screened genes
df = df.loc[df['Status'] == 'Screened', :]
# Calculations
df['total-eggs'] = 0
df['total-hatched'] = 0
for ft in range(1, 5):
col_eggs = 'FT{:d} eggs'.format(ft)
col_hatched = 'FT{:d} hatched'.format(ft)
col_fertate = 'FT{:d} fert-rate'.format(ft)
df[col_fertate] = df[col_hatched] / df[col_eggs]
df['total-eggs'] += df[col_eggs]
df['total-hatched'] += df[col_hatched]
# Mean/SD
df['mean fert-rate'] = df[['FT1 fert-rate', 'FT2 fert-rate', 'FT3 fert-rate', 'FT4 fert-rate']].mean(axis=1)
df['std fert-rate'] = df[['FT1 fert-rate', 'FT2 fert-rate', 'FT3 fert-rate', 'FT4 fert-rate']].std(axis=1)
print(dfs.head())
print(dfs.loc[dfs['MM pheno code'].str.len() > 1, :])
print('---')
print(dfs['Previous ref to RNAi working?'].value_counts())
df['RNAi'] = dfs['Previous ref to RNAi working?']
df['our-DM-code'] = dfs['Our DM pheno code']
df['ext-DM-code'] = dfs['Others DM pheno code']
df['ext-MM-code'] = dfs['MM pheno code']
df['ext-HS-code'] = dfs['HS pheno code']
print(df.head)
# FPKM
df['FPKM'] = dfFPKM['FPKM']
df['logFPKM'] = df['FPKM'].apply(lambda x: np.log2(x + 1))
maxfpkm, minfpkm = df['logFPKM'].max(), df['logFPKM'].min()
codemap = {
}
code_label = {
'A': 'Meiotic',
'B': 'Post-meiotic',
'C': 'Gametes',
'D': 'Pre-meiotic',
'E': 'General impairment of spermatogenesis',
'F': 'Undetectable',
'G': 'Unspecified ',
'H': 'Non-germ cell autonomous'
}
code_color = {
'A': '#d62728',
'B': '#ce6dbd',
'C': '#756bb1',
'D': '#c7e9c0',
'E': '#9edae5',
'F': '#fdd0a2',
'G': '#dadaeb',
'H': '#bdbdbd'
}
# print(df.head())
# print(df.tail())
print("logFPKM: {:.2f}/{:.2f}".format(minfpkm, maxfpkm))
#
# Plot all data
#
print("Plotting")
n_total = len(df)
df = df.loc[df['RNAi'] == 'No', :]
n_indexed = len(df)
fig_height = 11 / n_total * n_indexed
df = df.sort_values(['Status', 'mean fert-rate', 'gene'], ascending=[False, False, False]).reset_index()
fig = plt.figure(figsize=(2.5, fig_height))
# fig.suptitle('Core metazoan meiotic genes'.format(page, number_of_pages))
gs = gridspec.GridSpec(nrows=1, ncols=14)
n_for_grid_height = 1
ax_fert = plt.subplot(gs[:n_for_grid_height, 0:8])
ax_our_dm = plt.subplot(gs[:n_for_grid_height, 8])
ax_ext_dm = plt.subplot(gs[:n_for_grid_height, 9])
ax_ext_mm = plt.subplot(gs[:n_for_grid_height, 10])
ax_ext_hs = plt.subplot(gs[:n_for_grid_height, 11])
ax_fpkm = plt.subplot(gs[:n_for_grid_height, 12])
ax_rnai = plt.subplot(gs[:n_for_grid_height, 13])
adjustable = 'datalim'
aspect = 'auto'
ax_fert.set(adjustable=adjustable, aspect=aspect, anchor='NE')
ax_fpkm.set(adjustable=adjustable, aspect=aspect, anchor='NE')
ax_rnai.set(adjustable=adjustable, aspect=aspect, anchor='NE')
ax_our_dm.set(adjustable=adjustable, aspect=aspect, anchor='NE')
ax_ext_dm.set(adjustable=adjustable, aspect=aspect, anchor='NE')
ax_ext_mm.set(adjustable=adjustable, aspect=aspect, anchor='NE')
ax_ext_hs.set(adjustable=adjustable, aspect=aspect, anchor='NE')
norm_fpkm = mpl.colors.Normalize(vmin=minfpkm, vmax=maxfpkm)
cmap_fpkm = mpl.cm.Reds
rotation = 50
s = 12
marker = '_'
n = len(df)
yticks = list(np.arange(0, n, 50))
yticklabels = yticks[::-1]
#
# DM Expression Values
#
eb = ax_fert.errorbar(df['mean fert-rate'], range(0, len(df)), xerr=df['std fert-rate'], lw=0,
ecolor='#636363', elinewidth=0.6, capsize=0.6, # #3182bd
marker='.', markersize=1.5,
markeredgecolor='#636363', markeredgewidth=0.0, # #3182bd
markerfacecolor='black', markerfacecoloralt=None, zorder=5) # #6baed6
ax_fert.axvline(0.75, color='#d62728', lw=0, zorder=6)
#ax_fert.set_xlabel('Fertility Rate (Mean +/- SD) ', fontsize='small', ha='center')
ax_fert.set_xticks(np.linspace(0, 1, 5))
ax_fert.set_xticklabels([], fontsize='small', rotation=0)
ax_fert.set_yticks(yticks)
ax_fert.set_yticklabels(yticklabels, rotation=0, va='center', ha='right', fontsize='small')
ax_fert.set_xlim(-0.04, 1.04)
ax_fert.set_ylim(-1, len(df))
ax_fert.grid(linewidth=0.5)
#
# Expression (FPKM)
#
y = df['logFPKM'].index
x = np.zeros(len(y))
c = df['logFPKM'].apply(value_to_color, args=(cmap_fpkm, norm_fpkm))
sc_fpkm = ax_fpkm.scatter(x=x, y=y, s=s, c=c, marker=marker, zorder=5)
ax_fpkm.set_xticks([0])
ax_fpkm.set_xticklabels([])
ax_fpkm.set_yticks(yticks)
ax_fpkm.tick_params(axis='y', which='major', length=1.5)
ax_fpkm.set_yticklabels([])
ax_fpkm.set_xlim(-0.2, 0.2) # Adjusting this makes the plot shrink
ax_fpkm.set_ylim(-1, len(df))
#
# RNAi
#
data_rnai = df.loc[df['RNAi'] == 'Yes', 'RNAi']
y = data_rnai.index
x = np.zeros(len(y))
c = '#17becf'
sc_rnai = ax_rnai.scatter(x=x, y=y, s=s, c=c, marker=marker, zorder=5)
ax_rnai.set_xticks([0])
ax_rnai.set_xticklabels([])
ax_rnai.set_yticks(yticks)
ax_rnai.tick_params(axis='y', which='major', length=1.5)
ax_rnai.set_yticklabels([])
ax_rnai.set_xlim(-0.2, 0.2)
ax_rnai.set_ylim(-1, len(df))
# ax_rnai.grid(axis='y', linewidth=0.5)
#
# Our DM Phenotype
#
data_our_dm = df.loc[~df['our-DM-code'].isnull(), 'our-DM-code']
y = data_our_dm.index
x = np.zeros(len(y))
c = data_our_dm.map(code_color)
sc_our_dm = ax_our_dm.scatter(x=x, y=y, s=s, c=c, marker=marker, zorder=5)
ax_our_dm.set_xticks([0])
ax_our_dm.set_xticklabels([])
ax_our_dm.set_yticks(yticks)
ax_our_dm.tick_params(axis='y', which='major', length=1.5)
ax_our_dm.set_yticklabels([])
ax_our_dm.set_xlim(-0.5, 0.5)
ax_our_dm.set_ylim(-1, len(df))
# ax_our_dm.grid(axis='y', linewidth=0.5)
#
# External DM Phenotype
#
data_ext_dm = df.loc[~df['ext-DM-code'].isnull(), 'ext-DM-code']
y = data_ext_dm.index
x = np.zeros(len(y))
c = data_ext_dm.map(code_color)
sc_ext_dm = ax_ext_dm.scatter(x=x, y=y, s=s, c=c, marker=marker, zorder=5)
ax_ext_dm.set_xticks([0])
ax_ext_dm.set_xticklabels([])
ax_ext_dm.set_yticks(yticks)
ax_ext_dm.tick_params(axis='y', which='major', length=1.5)
ax_ext_dm.set_yticklabels([])
ax_ext_dm.set_xlim(-0.5, 0.5)
ax_ext_dm.set_ylim(-1, len(df))
# ax_ext_dm.grid(axis='y', linewidth=0.5)
#
# External MM Phenotype
#
# (these lines solve the problem when an MM phenotype has two codes, e.g., A/B)
data_ext_mm = df.loc[~df['ext-MM-code'].isnull(), 'ext-MM-code']
if len(data_ext_mm):
# print(data_ext_mm)
data_tmp = data_ext_mm.str.split('/').apply(pd.Series)
# print(data_tmp)
data_tmp = pd.melt(data_tmp.reset_index(), id_vars='index', value_vars=data_tmp.columns.tolist())
# print(data_tmp)
data_tmp = data_tmp.set_index('index').dropna(subset=['value'])
# print(data_tmp)
data_tmp.loc[(data_tmp.index.duplicated(keep=False) & (data_tmp['variable'] == 0)), 'variable'] = -0.2
data_tmp.loc[(data_tmp.index.duplicated(keep=False) & (data_tmp['variable'] == 1)), 'variable'] = +0.2
#
y = data_tmp.index.values
x = data_tmp['variable'].values
c = data_tmp['value'].map(code_color).values
sc_ext_mm = ax_ext_mm.scatter(x=x, y=y, s=s, c=c, marker=marker, zorder=5)
ax_ext_mm.set_xticks([0])
ax_ext_mm.set_xticklabels([])
ax_ext_mm.set_yticks(yticks)
ax_ext_mm.tick_params(axis='y', which='major', length=1.5)
ax_ext_mm.set_yticklabels([])
ax_ext_mm.set_xlim(-0.5, 0.5)
ax_ext_mm.set_ylim(-1, len(df))
# ax_ext_mm.grid(axis='y', linewidth=0.5)
#
# External HS Phenotype
#
data_ext_hs = df.loc[~df['ext-HS-code'].isnull(), 'ext-HS-code']
y = data_ext_hs.index
x = np.zeros(len(y))
c = data_ext_hs.map(code_color)
sc_ext_hs = ax_ext_dm.scatter(x=x, y=y, s=s, c=c, marker=marker, zorder=5)
ax_ext_hs.set_xticks([0])
ax_ext_hs.set_xticklabels([])
ax_ext_hs.set_yticks(yticks)
ax_ext_hs.tick_params(axis='y', which='major', length=1.5)
ax_ext_hs.set_yticklabels([])
ax_ext_hs.set_xlim(-0.5, 0.5)
ax_ext_hs.set_ylim(-1, len(df))
# ax_ext_hs.grid(axis='x', linewidth=0.5)
plt.subplots_adjust(left=0.15, right=0.96, bottom=0.01, top=0.99, wspace=0.4, hspace=1.4)
fig.savefig('images/img-core_DM_screened-rnai-No.pdf')
| [
"pandas.read_csv",
"numpy.arange",
"pandas.Categorical",
"pandas.set_option",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"matplotlib.colors.Normalize",
"matplotlib.colors.rgb2hex",
"numpy.log2",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust"... | [((164, 202), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(100)'], {}), "('display.max_rows', 100)\n", (177, 202), True, 'import pandas as pd\n'), ((203, 244), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (216, 244), True, 'import pandas as pd\n'), ((245, 281), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (258, 281), True, 'import pandas as pd\n'), ((562, 581), 'matplotlib.colors.rgb2hex', 'colors.rgb2hex', (['rgb'], {}), '(rgb)\n', (576, 581), False, 'from matplotlib import colors\n'), ((811, 931), 'pandas.read_csv', 'pd.read_csv', (['"""../02-core_genes/results/pipeline-core/DM_meiotic_genes.csv"""'], {'index_col': '(0)', 'usecols': "['id_gene', 'gene']"}), "('../02-core_genes/results/pipeline-core/DM_meiotic_genes.csv',\n index_col=0, usecols=['id_gene', 'gene'])\n", (822, 931), True, 'import pandas as pd\n'), ((964, 1028), 'pandas.read_csv', 'pd.read_csv', (['"""data/core_DM_screened_2020-10-21.csv"""'], {'index_col': '(0)'}), "('data/core_DM_screened_2020-10-21.csv', index_col=0)\n", (975, 1028), True, 'import pandas as pd\n'), ((1064, 1121), 'pandas.read_csv', 'pd.read_csv', (['"""data/screened_DM_controls.csv"""'], {'index_col': '(0)'}), "('data/screened_DM_controls.csv', index_col=0)\n", (1075, 1121), True, 'import pandas as pd\n'), ((1229, 1350), 'pandas.read_csv', 'pd.read_csv', (['"""../02-core_genes/results/FPKM/DM/DM-FPKM-spermatocyte.csv.gz"""'], {'index_col': '(0)', 'usecols': "['id_gene', 'FPKM']"}), "('../02-core_genes/results/FPKM/DM/DM-FPKM-spermatocyte.csv.gz',\n index_col=0, usecols=['id_gene', 'FPKM'])\n", (1240, 1350), True, 'import pandas as pd\n'), ((1492, 1559), 'pandas.Categorical', 'pd.Categorical', (["dfs['Status']"], {'categories': 'status_cats', 'ordered': '(True)'}), "(dfs['Status'], categories=status_cats, ordered=True)\n", (1506, 1559), True, 'import pandas as pd\n'), ((3984, 4021), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.5, fig_height)'}), '(figsize=(2.5, fig_height))\n', (3994, 4021), True, 'import matplotlib.pyplot as plt\n'), ((4112, 4148), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(14)'}), '(nrows=1, ncols=14)\n', (4129, 4148), True, 'import matplotlib.gridspec as gridspec\n'), ((4189, 4229), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:n_for_grid_height, 0:8]'], {}), '(gs[:n_for_grid_height, 0:8])\n', (4200, 4229), True, 'import matplotlib.pyplot as plt\n'), ((4246, 4284), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:n_for_grid_height, 8]'], {}), '(gs[:n_for_grid_height, 8])\n', (4257, 4284), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4339), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:n_for_grid_height, 9]'], {}), '(gs[:n_for_grid_height, 9])\n', (4312, 4339), True, 'import matplotlib.pyplot as plt\n'), ((4356, 4395), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:n_for_grid_height, 10]'], {}), '(gs[:n_for_grid_height, 10])\n', (4367, 4395), True, 'import matplotlib.pyplot as plt\n'), ((4412, 4451), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:n_for_grid_height, 11]'], {}), '(gs[:n_for_grid_height, 11])\n', (4423, 4451), True, 'import matplotlib.pyplot as plt\n'), ((4466, 4505), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:n_for_grid_height, 12]'], {}), '(gs[:n_for_grid_height, 12])\n', (4477, 4505), True, 'import matplotlib.pyplot as plt\n'), ((4520, 4559), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:n_for_grid_height, 13]'], {}), '(gs[:n_for_grid_height, 13])\n', (4531, 4559), True, 'import matplotlib.pyplot as plt\n'), ((5102, 5150), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'minfpkm', 'vmax': 'maxfpkm'}), '(vmin=minfpkm, vmax=maxfpkm)\n', (5122, 5150), True, 'import matplotlib as mpl\n'), ((10383, 10477), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'right': '(0.96)', 'bottom': '(0.01)', 'top': '(0.99)', 'wspace': '(0.4)', 'hspace': '(1.4)'}), '(left=0.15, right=0.96, bottom=0.01, top=0.99, wspace=\n 0.4, hspace=1.4)\n', (10402, 10477), True, 'import matplotlib.pyplot as plt\n'), ((5260, 5279), 'numpy.arange', 'np.arange', (['(0)', 'n', '(50)'], {}), '(0, n, 50)\n', (5269, 5279), True, 'import numpy as np\n'), ((5945, 5965), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (5956, 5965), True, 'import numpy as np\n'), ((2971, 2985), 'numpy.log2', 'np.log2', (['(x + 1)'], {}), '(x + 1)\n', (2978, 2985), True, 'import numpy as np\n')] |
from netCDF4._netCDF4 import Variable
import numpy
def decode_time(variable: Variable, unit: str = None) -> numpy.array:
if unit is None:
unit = variable.units
unit, direction, base_date = unit.split(' ', 2)
intervals = {
'years': 'Y',
'months': 'M',
'days': 'D',
'hours': 'h',
'minutes': 'm',
'seconds': 's',
}
base_date = base_date.strip(' UTC')
return numpy.datetime64(base_date) + numpy.array(variable).astype(
f'timedelta64[{intervals[unit]}]'
)
| [
"numpy.array",
"numpy.datetime64"
] | [((437, 464), 'numpy.datetime64', 'numpy.datetime64', (['base_date'], {}), '(base_date)\n', (453, 464), False, 'import numpy\n'), ((467, 488), 'numpy.array', 'numpy.array', (['variable'], {}), '(variable)\n', (478, 488), False, 'import numpy\n')] |
'''This class will log 1d array in Nd matrix from device and qualisys object'''
import numpy as np
from datetime import datetime as datetime
from time import time
from utils_mpc import quaternionToRPY
class LoggerControl():
def __init__(self, dt, N0_gait, joystick=None, estimator=None, loop=None, gait=None, statePlanner=None,
footstepPlanner=None, footTrajectoryGenerator=None, logSize=60e3, ringBuffer=False):
self.ringBuffer = ringBuffer
logSize = np.int(logSize)
self.logSize = logSize
self.i = 0
self.dt = dt
# Allocate the data:
# Joystick
self.joy_v_ref = np.zeros([logSize, 6]) # reference velocity of the joystick
# Estimator
self.esti_feet_status = np.zeros([logSize, 4]) # input feet status (contact or not)
self.esti_feet_goals = np.zeros([logSize, 3, 4]) # input feet goals (desired on the ground)
self.esti_q_filt = np.zeros([logSize, 19]) # output position
self.esti_v_filt = np.zeros([logSize, 18]) # output velocity
self.esti_v_secu = np.zeros([logSize, 12]) # filtered output velocity for security check
self.esti_FK_lin_vel = np.zeros([logSize, 3]) # estimated velocity of the base with FK
self.esti_FK_xyz = np.zeros([logSize, 3]) # estimated position of the base with FK
self.esti_xyz_mean_feet = np.zeros([logSize, 3]) # average of feet goals
self.esti_filt_lin_vel = np.zeros([logSize, 3]) # estimated velocity of the base before low pass filter
self.esti_HP_x = np.zeros([logSize, 3]) # x input of the velocity complementary filter
self.esti_HP_dx = np.zeros([logSize, 3]) # dx input of the velocity complementary filter
self.esti_HP_alpha = np.zeros([logSize, 3]) # alpha parameter of the velocity complementary filter
self.esti_HP_filt_x = np.zeros([logSize, 3]) # filtered output of the velocity complementary filter
self.esti_LP_x = np.zeros([logSize, 3]) # x input of the position complementary filter
self.esti_LP_dx = np.zeros([logSize, 3]) # dx input of the position complementary filter
self.esti_LP_alpha = np.zeros([logSize, 3]) # alpha parameter of the position complementary filter
self.esti_LP_filt_x = np.zeros([logSize, 3]) # filtered output of the position complementary filter
self.esti_kf_X = np.zeros([logSize, 18]) # state of the Kalman filter
self.esti_kf_Z = np.zeros([logSize, 16]) # measurement for the Kalman filter
# Loop
self.loop_o_q_int = np.zeros([logSize, 19]) # position in world frame (esti_q_filt + dt * loop_o_v)
self.loop_o_v = np.zeros([logSize, 18]) # estimated velocity in world frame
self.loop_h_v = np.zeros([logSize, 18]) # estimated velocity in horizontal frame
self.loop_pos_virtual_world = np.zeros([logSize, 3]) # x, y, yaw perfect position in world
# Gait
self.planner_gait = np.zeros([logSize, N0_gait, 4]) # Gait sequence
self.planner_is_static = np.zeros([logSize]) # if the planner is in static mode or not
self.planner_q_static = np.zeros([logSize, 19]) # position in static mode (4 stance phase)
self.planner_RPY_static = np.zeros([logSize, 3]) # RPY orientation in static mode (4 stance phase)
# State planner
if statePlanner is not None:
self.planner_xref = np.zeros([logSize, 12, 1+statePlanner.getNSteps()]) # Reference trajectory
# Footstep planner
if gait is not None:
self.planner_fsteps = np.zeros([logSize, gait.getCurrentGait().shape[0], 12]) # Reference footsteps position
self.planner_h_ref = np.zeros([logSize]) # reference height of the planner
# Foot Trajectory Generator
self.planner_goals = np.zeros([logSize, 3, 4]) # 3D target feet positions
self.planner_vgoals = np.zeros([logSize, 3, 4]) # 3D target feet velocities
self.planner_agoals = np.zeros([logSize, 3, 4]) # 3D target feet accelerations
# Model Predictive Control
# output vector of the MPC (next state + reference contact force)
if statePlanner is not None:
self.mpc_x_f = np.zeros([logSize, 24, statePlanner.getNSteps()])
# Whole body control
self.wbc_x_f = np.zeros([logSize, 24]) # input vector of the WBC (next state + reference contact force)
self.wbc_P = np.zeros([logSize, 12]) # proportionnal gains of the PD+
self.wbc_D = np.zeros([logSize, 12]) # derivative gains of the PD+
self.wbc_q_des = np.zeros([logSize, 12]) # desired position of actuators
self.wbc_v_des = np.zeros([logSize, 12]) # desired velocity of actuators
self.wbc_tau_ff = np.zeros([logSize, 12]) # feedforward torques computed by the WBC
self.wbc_f_ctc = np.zeros([logSize, 12]) # contact forces computed by the WBC
self.wbc_feet_pos = np.zeros([logSize, 3, 4]) # current feet positions according to WBC
self.wbc_feet_pos_target = np.zeros([logSize, 3, 4]) # current feet positions targets for WBC
self.wbc_feet_err = np.zeros([logSize, 3, 4]) # error between feet positions and their reference
self.wbc_feet_vel = np.zeros([logSize, 3, 4]) # current feet velocities according to WBC
self.wbc_feet_vel_target = np.zeros([logSize, 3, 4]) # current feet velocities targets for WBC
self.wbc_feet_acc_target = np.zeros([logSize, 3, 4]) # current feet accelerations targets for WBC
self.wbc_feet_pos_invkin = np.zeros([logSize, 3, 4]) # current feet positions according to InvKin
self.wbc_feet_vel_invkin = np.zeros([logSize, 3, 4]) # current feet velocities according to InvKin
# Timestamps
self.tstamps = np.zeros(logSize)
def sample(self, joystick, estimator, loop, gait, statePlanner, footstepPlanner, footTrajectoryGenerator, wbc):
if (self.i >= self.logSize):
if self.ringBuffer:
self.i = 0
else:
return
# Logging from joystick
self.joy_v_ref[self.i] = joystick.v_ref[:, 0]
# Logging from estimator
self.esti_feet_status[self.i] = estimator.feet_status[:]
self.esti_feet_goals[self.i] = estimator.feet_goals
self.esti_q_filt[self.i] = estimator.q_filt[:, 0]
self.esti_v_filt[self.i] = estimator.v_filt[:, 0]
self.esti_v_secu[self.i] = estimator.v_secu[:]
self.esti_FK_lin_vel[self.i] = estimator.FK_lin_vel[:]
self.esti_FK_xyz[self.i] = estimator.FK_xyz[:]
self.esti_xyz_mean_feet[self.i] = estimator.xyz_mean_feet[:]
self.esti_filt_lin_vel[self.i] = estimator.filt_lin_vel[:]
if not estimator.kf_enabled:
self.esti_HP_x[self.i] = estimator.filter_xyz_vel.x
self.esti_HP_dx[self.i] = estimator.filter_xyz_vel.dx
self.esti_HP_alpha[self.i] = estimator.filter_xyz_vel.alpha
self.esti_HP_filt_x[self.i] = estimator.filter_xyz_vel.filt_x
self.esti_LP_x[self.i] = estimator.filter_xyz_pos.x
self.esti_LP_dx[self.i] = estimator.filter_xyz_pos.dx
self.esti_LP_alpha[self.i] = estimator.filter_xyz_pos.alpha
self.esti_LP_filt_x[self.i] = estimator.filter_xyz_pos.filt_x
else:
self.esti_kf_X[self.i] = estimator.kf.X[:, 0]
self.esti_kf_Z[self.i] = estimator.Z[:, 0]
# Logging from the main loop
self.loop_o_q_int[self.i] = loop.q[:, 0]
self.loop_o_v[self.i] = loop.v[:, 0]
self.loop_h_v[self.i] = loop.h_v[:, 0]
self.loop_pos_virtual_world[self.i] = np.array([loop.q[0, 0], loop.q[1, 0], loop.yaw_estim])
# Logging from the planner
# self.planner_q_static[self.i] = planner.q_static[:]
# self.planner_RPY_static[self.i] = planner.RPY_static[:, 0]
self.planner_xref[self.i] = statePlanner.getReferenceStates()
self.planner_fsteps[self.i] = footstepPlanner.getFootsteps()
self.planner_gait[self.i] = gait.getCurrentGait()
self.planner_goals[self.i] = footTrajectoryGenerator.getFootPosition()
self.planner_vgoals[self.i] = footTrajectoryGenerator.getFootVelocity()
self.planner_agoals[self.i] = footTrajectoryGenerator.getFootAcceleration()
self.planner_is_static[self.i] = gait.getIsStatic()
self.planner_h_ref[self.i] = loop.h_ref
# Logging from model predictive control
self.mpc_x_f[self.i] = loop.x_f_mpc
# Logging from whole body control
self.wbc_x_f[self.i] = loop.x_f_wbc
self.wbc_P[self.i] = loop.result.P
self.wbc_D[self.i] = loop.result.D
self.wbc_q_des[self.i] = loop.result.q_des
self.wbc_v_des[self.i] = loop.result.v_des
self.wbc_tau_ff[self.i] = loop.result.tau_ff
self.wbc_f_ctc[self.i] = wbc.f_with_delta[:, 0]
self.wbc_feet_pos[self.i] = wbc.feet_pos
self.wbc_feet_pos_target[self.i] = wbc.log_feet_pos_target[:, :, self.i+1]
self.wbc_feet_err[self.i] = wbc.feet_err
self.wbc_feet_vel[self.i] = wbc.feet_vel
self.wbc_feet_vel_target[self.i] = wbc.log_feet_vel_target[:, :, self.i+1]
self.wbc_feet_acc_target[self.i] = wbc.log_feet_acc_target[:, :, self.i+1]
self.wbc_feet_pos_invkin[self.i] = wbc.invKin.cpp_posf.transpose()
self.wbc_feet_vel_invkin[self.i] = wbc.invKin.cpp_vf.transpose()
# Logging timestamp
self.tstamps[self.i] = time()
self.i += 1
def processMocap(self, N, loggerSensors):
self.mocap_b_v = np.zeros([N, 3])
self.mocap_b_w = np.zeros([N, 3])
self.mocap_RPY = np.zeros([N, 3])
for i in range(N):
oRb = loggerSensors.mocapOrientationMat9[i]
"""from IPython import embed
embed()"""
self.mocap_b_v[i] = (oRb.transpose() @ loggerSensors.mocapVelocity[i].reshape((3, 1))).ravel()
self.mocap_b_w[i] = (oRb.transpose() @ loggerSensors.mocapAngularVelocity[i].reshape((3, 1))).ravel()
self.mocap_RPY[i] = quaternionToRPY(loggerSensors.mocapOrientationQuat[i])[:, 0]
def plotAll(self, loggerSensors):
from matplotlib import pyplot as plt
N = self.tstamps.shape[0]
t_range = np.array([k*self.dt for k in range(N)])
self.processMocap(N, loggerSensors)
index6 = [1, 3, 5, 2, 4, 6]
index12 = [1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12]
"""plt.figure()
for i in range(4):
if i == 0:
ax0 = plt.subplot(2, 2, i+1)
else:
plt.subplot(2, 2, i+1, sharex=ax0)
switch = np.diff(self.esti_feet_status[:, i])
tmp = self.wbc_feet_pos[:-1, 2, i]
tmp_y = tmp[switch > 0]
tmp_x = t_range[:-1]
tmp_x = tmp_x[switch > 0]
plt.plot(tmp_x, tmp_y, linewidth=3)"""
lgd_X = ["FL", "FR", "HL", "HR"]
lgd_Y = ["Pos X", "Pos Y", "Pos Z"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
plt.plot(t_range, self.wbc_feet_pos[:, i % 3, np.int(i/3)], color='b', linewidth=3, marker='')
plt.plot(t_range, self.wbc_feet_err[:, i % 3, np.int(i/3)] + self.wbc_feet_pos[0, i % 3, np.int(i/3)], color='g', linewidth=3, marker='')
plt.plot(t_range, self.wbc_feet_pos_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
"""plt.plot(t_range, self.wbc_feet_pos_invkin[:, i % 3, np.int(i/3)],
color='darkviolet', linewidth=3, linestyle="--", marker='')"""
if (i % 3) == 2:
mini = np.min(self.wbc_feet_pos[:, i % 3, np.int(i/3)])
maxi = np.max(self.wbc_feet_pos[:, i % 3, np.int(i/3)])
plt.plot(t_range, self.planner_gait[:, 0, np.int(
i/3)] * (maxi - mini) + mini, color='k', linewidth=3, marker='')
plt.legend([lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)]+"", "error",
lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)]+" Ref", "Contact state"], prop={'size': 8})
plt.suptitle("Measured & Reference feet positions (base frame)")
lgd_X = ["FL", "FR", "HL", "HR"]
lgd_Y = ["Vel X", "Vel Y", "Vel Z"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
plt.plot(t_range, self.wbc_feet_vel[:, i % 3, np.int(i/3)], color='b', linewidth=3, marker='')
plt.plot(t_range, self.wbc_feet_vel_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
"""plt.plot(t_range, self.wbc_feet_vel_invkin[:, i % 3, np.int(i/3)],
color='darkviolet', linewidth=3, linestyle="--", marker='')"""
plt.legend([lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)], lgd_Y[i %
3] + " " + lgd_X[np.int(i/3)]+" Ref"], prop={'size': 8})
plt.suptitle("Measured and Reference feet velocities (base frame)")
lgd_X = ["FL", "FR", "HL", "HR"]
lgd_Y = ["Acc X", "Acc Y", "Acc Z"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
plt.plot(t_range, self.wbc_feet_acc_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
plt.legend([lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)]+" Ref"], prop={'size': 8})
plt.suptitle("Reference feet accelerations (base frame)")
# LOG_Q
lgd = ["Position X", "Position Y", "Position Z", "Position Roll", "Position Pitch", "Position Yaw"]
plt.figure()
for i in range(6):
if i == 0:
ax0 = plt.subplot(3, 2, index6[i])
else:
plt.subplot(3, 2, index6[i], sharex=ax0)
if i in [0, 1]:
plt.plot(t_range, self.loop_pos_virtual_world[:, i], "b", linewidth=3)
plt.plot(t_range, self.loop_pos_virtual_world[:, i], "r", linewidth=3)
elif i == 5:
plt.plot(t_range, self.loop_pos_virtual_world[:, 2], "b", linewidth=3)
plt.plot(t_range, self.loop_pos_virtual_world[:, 2], "r", linewidth=3)
else:
plt.plot(t_range, self.planner_xref[:, i, 0], "b", linewidth=2)
plt.plot(t_range, self.planner_xref[:, i, 1], "r", linewidth=3)
if i < 3:
plt.plot(t_range, loggerSensors.mocapPosition[:, i], "k", linewidth=3)
else:
plt.plot(t_range, self.mocap_RPY[:, i-3], "k", linewidth=3)
# plt.plot(t_range, self.log_q[i, :], "grey", linewidth=4)
# plt.plot(t_range[:-2], self.log_x_invkin[i, :-2], "g", linewidth=2)
# plt.plot(t_range[:-2], self.log_x_ref_invkin[i, :-2], "violet", linewidth=2, linestyle="--")
plt.legend(["Robot state", "Robot reference state", "Ground truth"], prop={'size': 8})
plt.ylabel(lgd[i])
plt.suptitle("Measured & Reference position and orientation")
# LOG_V
lgd = ["Linear vel X", "Linear vel Y", "Linear vel Z",
"Angular vel Roll", "Angular vel Pitch", "Angular vel Yaw"]
plt.figure()
for i in range(6):
if i == 0:
ax0 = plt.subplot(3, 2, index6[i])
else:
plt.subplot(3, 2, index6[i], sharex=ax0)
plt.plot(t_range, self.loop_h_v[:, i], "b", linewidth=2)
plt.plot(t_range, self.joy_v_ref[:, i], "r", linewidth=3)
if i < 3:
plt.plot(t_range, self.mocap_b_v[:, i], "k", linewidth=3)
# plt.plot(t_range, self.esti_FK_lin_vel[:, i], "violet", linewidth=3, linestyle="--")
plt.plot(t_range, self.esti_filt_lin_vel[:, i], "violet", linewidth=3, linestyle="--")
else:
plt.plot(t_range, self.mocap_b_w[:, i-3], "k", linewidth=3)
"""N = 2000
y = np.convolve(self.mocap_b_w[:, i-3], np.ones(N)/N, mode='valid')
plt.plot(t_range[int(N/2)-1:-int(N/2)], y, linewidth=3, linestyle="--")"""
# plt.plot(t_range, self.log_dq[i, :], "g", linewidth=2)
# plt.plot(t_range[:-2], self.log_dx_invkin[i, :-2], "g", linewidth=2)
# plt.plot(t_range[:-2], self.log_dx_ref_invkin[i, :-2], "violet", linewidth=2, linestyle="--")
plt.legend(["Robot state", "Robot reference state", "Ground truth"], prop={'size': 8})
plt.ylabel(lgd[i])
plt.suptitle("Measured & Reference linear and angular velocities")
"""plt.figure()
plt.plot(t_range[:-2], self.log_x[6, :-2], "b", linewidth=2)
plt.plot(t_range[:-2], self.log_x_cmd[6, :-2], "r", linewidth=2)
plt.plot(t_range[:-2], self.log_dx_invkin[0, :-2], "g", linewidth=2)
plt.plot(t_range[:-2], self.log_dx_ref_invkin[0, :-2], "violet", linewidth=2)
plt.legend(["WBC integrated output state", "Robot reference state",
"Task current state", "Task reference state"])"""
# Analysis of the footstep locations (current and future) with a slider to move along time
# self.slider_predicted_footholds()
# Analysis of the footholds locations during the whole experiment
"""import utils_mpc
import pinocchio as pin
f_c = ["r", "b", "forestgreen", "rebeccapurple"]
quat = np.zeros((4, 1))
steps = np.zeros((12, 1))
o_step = np.zeros((3, 1))
plt.figure()
plt.plot(self.loop_o_q_int[:, 0], self.loop_o_q_int[:, 1], linewidth=2, color="k")
for i in range(self.planner_fsteps.shape[0]):
fsteps = self.planner_fsteps[i]
RPY = utils_mpc.quaternionToRPY(self.loop_o_q_int[i, 3:7])
quat[:, 0] = utils_mpc.EulerToQuaternion([0.0, 0.0, RPY[2]])
oRh = pin.Quaternion(quat).toRotationMatrix()
for j in range(4):
#if np.any(fsteps[k, (j*3):((j+1)*3)]) and not np.array_equal(steps[(j*3):((j+1)*3), 0],
# fsteps[k, (j*3):((j+1)*3)]):
# steps[(j*3):((j+1)*3), 0] = fsteps[k, (j*3):((j+1)*3)]
# o_step[:, 0:1] = oRh @ steps[(j*3):((j+1)*3), 0:1] + self.loop_o_q_int[i:(i+1), 0:3].transpose()
o_step[:, 0:1] = oRh @ fsteps[0:1, (j*3):((j+1)*3)].transpose() + self.loop_o_q_int[i:(i+1), 0:3].transpose()
plt.plot(o_step[0, 0], o_step[1, 0], linestyle=None, linewidth=1, marker="o", color=f_c[j])
"""
lgd1 = ["HAA", "HFE", "Knee"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
tau_fb = self.wbc_P[:, i] * (self.wbc_q_des[:, i] - self.esti_q_filt[:, 7+i]) + \
self.wbc_D[:, i] * (self.wbc_v_des[:, i] - self.esti_v_filt[:, 6+i])
h1, = plt.plot(t_range, self.wbc_tau_ff[:, i], "r", linewidth=3)
h2, = plt.plot(t_range, tau_fb, "b", linewidth=3)
h3, = plt.plot(t_range, self.wbc_tau_ff[:, i] + tau_fb, "g", linewidth=3)
h4, = plt.plot(t_range[:-1], loggerSensors.torquesFromCurrentMeasurment[1:, i],
"violet", linewidth=3, linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [Nm]")
tmp = lgd1[i % 3]+" "+lgd2[int(i/3)]
plt.legend([h1, h2, h3, h4], ["FF "+tmp, "FB "+tmp, "PD+ "+tmp, "Meas "+tmp], prop={'size': 8})
plt.ylim([-8.0, 8.0])
plt.suptitle("FF torques & FB torques & Sent torques & Meas torques")
lgd1 = ["Ctct force X", "Ctct force Y", "Ctct force Z"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
h1, = plt.plot(t_range, self.mpc_x_f[:, 12+i, 0], "r", linewidth=3)
h2, = plt.plot(t_range, self.wbc_f_ctc[:, i], "b", linewidth=3, linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [N]")
plt.legend([h1, h2], ["MPC " + lgd1[i % 3]+" "+lgd2[int(i/3)],
"WBC " + lgd1[i % 3]+" "+lgd2[int(i/3)]], prop={'size': 8})
if (i % 3) == 2:
plt.ylim([-0.0, 26.0])
else:
plt.ylim([-26.0, 26.0])
plt.suptitle("Contact forces (MPC command) & WBC QP output")
lgd1 = ["HAA", "HFE", "Knee"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
h1, = plt.plot(t_range, self.wbc_q_des[:, i], color='r', linewidth=3)
h2, = plt.plot(t_range, self.esti_q_filt[:, 7+i], color='b', linewidth=3)
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [rad]")
plt.legend([h1, h2], ["Ref "+lgd1[i % 3]+" "+lgd2[int(i/3)],
lgd1[i % 3]+" "+lgd2[int(i/3)]], prop={'size': 8})
plt.suptitle("Desired actuator positions & Measured actuator positions")
# Evolution of predicted trajectory along time
log_t_pred = np.array([k*self.dt*10 for k in range(self.mpc_x_f.shape[2])])
log_t_ref = np.array([k*self.dt*10 for k in range(self.planner_xref.shape[2])])
"""from IPython import embed
embed()"""
titles = ["X", "Y", "Z", "Roll", "Pitch", "Yaw"]
step = 1000
plt.figure()
for j in range(6):
plt.subplot(3, 2, index6[j])
c = [[i/(self.mpc_x_f.shape[0]+5), 0.0, i/(self.mpc_x_f.shape[0]+5)]
for i in range(0, self.mpc_x_f.shape[0], step)]
for i in range(0, self.mpc_x_f.shape[0], step):
h1, = plt.plot(log_t_pred+(i+10)*self.dt,
self.mpc_x_f[i, j, :], "b", linewidth=2, color=c[int(i/step)])
h2, = plt.plot(log_t_ref+i*self.dt,
self.planner_xref[i, j, :], linestyle="--", marker='x', color="g", linewidth=2)
#h3, = plt.plot(np.array([k*self.dt for k in range(self.mpc_x_f.shape[0])]),
# self.planner_xref[:, j, 0], linestyle=None, marker='x', color="r", linewidth=1)
plt.xlabel("Time [s]")
plt.legend([h1, h2, h3], ["Output trajectory of MPC",
"Input trajectory of planner"]) #, "Actual robot trajectory"])
plt.title("Predicted trajectory for " + titles[j])
plt.suptitle("Analysis of trajectories in position and orientation computed by the MPC")
plt.figure()
for j in range(6):
plt.subplot(3, 2, index6[j])
c = [[i/(self.mpc_x_f.shape[0]+5), 0.0, i/(self.mpc_x_f.shape[0]+5)]
for i in range(0, self.mpc_x_f.shape[0], step)]
for i in range(0, self.mpc_x_f.shape[0], step):
h1, = plt.plot(log_t_pred+(i+10)*self.dt,
self.mpc_x_f[i, j+6, :], "b", linewidth=2, color=c[int(i/step)])
h2, = plt.plot(log_t_ref+i*self.dt,
self.planner_xref[i, j+6, :], linestyle="--", marker='x', color="g", linewidth=2)
h3, = plt.plot(np.array([k*self.dt for k in range(self.mpc_x_f.shape[0])]),
self.planner_xref[:, j+6, 0], linestyle=None, marker='x', color="r", linewidth=1)
plt.xlabel("Time [s]")
plt.legend([h1, h2, h3], ["Output trajectory of MPC",
"Input trajectory of planner", "Actual robot trajectory"])
plt.title("Predicted trajectory for velocity in " + titles[j])
plt.suptitle("Analysis of trajectories of linear and angular velocities computed by the MPC")
step = 1000
lgd1 = ["Ctct force X", "Ctct force Y", "Ctct force Z"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
h1, = plt.plot(t_range, self.mpc_x_f[:, 12+i, 0], "r", linewidth=3)
h2, = plt.plot(t_range, self.wbc_f_ctc[:, i], "b", linewidth=3, linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [N]")
plt.legend([h1, h2], ["MPC " + lgd1[i % 3]+" "+lgd2[int(i/3)],
"WBC " + lgd1[i % 3]+" "+lgd2[int(i/3)]], prop={'size': 8})
if (i % 3) == 2:
plt.ylim([-0.0, 26.0])
else:
plt.ylim([-26.0, 26.0])
plt.suptitle("Contact forces (MPC command) & WBC QP output")
lgd1 = ["Ctct force X", "Ctct force Y", "Ctct force Z"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(4):
if i == 0:
ax0 = plt.subplot(1, 4, i+1)
else:
plt.subplot(1, 4, i+1, sharex=ax0)
for k in range(0, self.mpc_x_f.shape[0], step):
h2, = plt.plot(log_t_pred+k*self.dt, self.mpc_x_f[k, 12+(3*i+2), :], linestyle="--", marker='x', linewidth=2)
h1, = plt.plot(t_range, self.mpc_x_f[:, 12+(3*i+2), 0], "r", linewidth=3)
# h3, = plt.plot(t_range, self.wbc_f_ctc[:, i], "b", linewidth=3, linestyle="--")
plt.plot(t_range, self.esti_feet_status[:, i], "k", linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd2[i]+" [N]")
plt.legend([h1, h2], ["MPC "+lgd2[i],
"MPC "+lgd2[i]+" trajectory"])
plt.ylim([-1.0, 26.0])
plt.suptitle("Contact forces trajectories & Actual forces trajectories")
# Analysis of the complementary filter behaviour
clr = ["b", "darkred", "forestgreen"]
# Velocity complementary filter
lgd_Y = ["dx", "ddx", "alpha dx", "dx_out", "dy", "ddy", "alpha dy", "dy_out", "dz", "ddz", "alpha dz", "dz_out"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, i+1)
else:
plt.subplot(3, 4, i+1, sharex=ax0)
if i % 4 == 0:
plt.plot(t_range, self.esti_HP_x[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # x input of the velocity complementary filter
elif i % 4 == 1:
plt.plot(t_range, self.esti_HP_dx[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # dx input of the velocity complementary filter
elif i % 4 == 2:
plt.plot(t_range, self.esti_HP_alpha[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # alpha parameter of the velocity complementary filter
else:
plt.plot(t_range, self.esti_HP_filt_x[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # filtered output of the velocity complementary filter
plt.legend([lgd_Y[i]], prop={'size': 8})
plt.suptitle("Evolution of the quantities of the velocity complementary filter")
# Position complementary filter
lgd_Y = ["x", "dx", "alpha x", "x_out", "y", "dy", "alpha y", "y_out", "z", "dz", "alpha z", "z_out"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, i+1)
else:
plt.subplot(3, 4, i+1, sharex=ax0)
if i % 4 == 0:
plt.plot(t_range, self.esti_LP_x[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # x input of the position complementary filter
elif i % 4 == 1:
plt.plot(t_range, self.esti_LP_dx[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # dx input of the position complementary filter
elif i % 4 == 2:
plt.plot(t_range, self.esti_LP_alpha[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # alpha parameter of the position complementary filter
else:
plt.plot(t_range, self.esti_LP_filt_x[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # filtered output of the position complementary filter
plt.legend([lgd_Y[i]], prop={'size': 8})
plt.suptitle("Evolution of the quantities of the position complementary filter")
plt.show(block=True)
from IPython import embed
embed()
def saveAll(self, loggerSensors, fileName="data"):
date_str = datetime.now().strftime('_%Y_%m_%d_%H_%M')
np.savez(fileName + date_str + ".npz",
joy_v_ref=self.joy_v_ref,
esti_feet_status=self.esti_feet_status,
esti_feet_goals=self.esti_feet_goals,
esti_q_filt=self.esti_q_filt,
esti_v_filt=self.esti_v_filt,
esti_v_secu=self.esti_v_secu,
esti_FK_lin_vel=self.esti_FK_lin_vel,
esti_FK_xyz=self.esti_FK_xyz,
esti_xyz_mean_feet=self.esti_xyz_mean_feet,
esti_filt_lin_vel=self.esti_filt_lin_vel,
esti_HP_x=self.esti_HP_x,
esti_HP_dx=self.esti_HP_dx,
esti_HP_alpha=self.esti_HP_alpha,
esti_HP_filt_x=self.esti_HP_filt_x,
esti_LP_x=self.esti_LP_x,
esti_LP_dx=self.esti_LP_dx,
esti_LP_alpha=self.esti_LP_alpha,
esti_LP_filt_x=self.esti_LP_filt_x,
esti_kf_X=self.esti_kf_X,
esti_kf_Z=self.esti_kf_Z,
loop_o_q_int=self.loop_o_q_int,
loop_o_v=self.loop_o_v,
loop_h_v=self.loop_h_v,
loop_pos_virtual_world=self.loop_pos_virtual_world,
planner_q_static=self.planner_q_static,
planner_RPY_static=self.planner_RPY_static,
planner_xref=self.planner_xref,
planner_fsteps=self.planner_fsteps,
planner_gait=self.planner_gait,
planner_goals=self.planner_goals,
planner_vgoals=self.planner_vgoals,
planner_agoals=self.planner_agoals,
planner_is_static=self.planner_is_static,
planner_h_ref=self.planner_h_ref,
mpc_x_f=self.mpc_x_f,
wbc_x_f=self.wbc_x_f,
wbc_P=self.wbc_P,
wbc_D=self.wbc_D,
wbc_q_des=self.wbc_q_des,
wbc_v_des=self.wbc_v_des,
wbc_tau_ff=self.wbc_tau_ff,
wbc_f_ctc=self.wbc_f_ctc,
wbc_feet_pos=self.wbc_feet_pos,
wbc_feet_pos_target=self.wbc_feet_pos_target,
wbc_feet_err=self.wbc_feet_err,
wbc_feet_vel=self.wbc_feet_vel,
wbc_feet_vel_target=self.wbc_feet_vel_target,
wbc_feet_acc_target=self.wbc_feet_acc_target,
tstamps=self.tstamps,
q_mes=loggerSensors.q_mes,
v_mes=loggerSensors.v_mes,
baseOrientation=loggerSensors.baseOrientation,
baseAngularVelocity=loggerSensors.baseAngularVelocity,
baseLinearAcceleration=loggerSensors.baseLinearAcceleration,
baseAccelerometer=loggerSensors.baseAccelerometer,
torquesFromCurrentMeasurment=loggerSensors.torquesFromCurrentMeasurment,
mocapPosition=loggerSensors.mocapPosition,
mocapVelocity=loggerSensors.mocapVelocity,
mocapAngularVelocity=loggerSensors.mocapAngularVelocity,
mocapOrientationMat9=loggerSensors.mocapOrientationMat9,
mocapOrientationQuat=loggerSensors.mocapOrientationQuat,
)
def loadAll(self, loggerSensors, fileName=None):
if fileName is None:
import glob
fileName = np.sort(glob.glob('data_2021_*.npz'))[-1] # Most recent file
data = np.load(fileName)
# Load LoggerControl arrays
self.joy_v_ref = data["joy_v_ref"]
self.logSize = self.joy_v_ref.shape[0]
self.esti_feet_status = data["esti_feet_status"]
self.esti_feet_goals = data["esti_feet_goals"]
self.esti_q_filt = data["esti_q_filt"]
self.esti_v_filt = data["esti_v_filt"]
self.esti_v_secu = data["esti_v_secu"]
self.esti_FK_lin_vel = data["esti_FK_lin_vel"]
self.esti_FK_xyz = data["esti_FK_xyz"]
self.esti_xyz_mean_feet = data["esti_xyz_mean_feet"]
self.esti_filt_lin_vel = data["esti_filt_lin_vel"]
self.esti_HP_x = data["esti_HP_x"]
self.esti_HP_dx = data["esti_HP_dx"]
self.esti_HP_alpha = data["esti_HP_alpha"]
self.esti_HP_filt_x = data["esti_HP_filt_x"]
self.esti_LP_x = data["esti_LP_x"]
self.esti_LP_dx = data["esti_LP_dx"]
self.esti_LP_alpha = data["esti_LP_alpha"]
self.esti_LP_filt_x = data["esti_LP_filt_x"]
self.esti_kf_X = data["esti_kf_X"]
self.esti_kf_Z = data["esti_kf_Z"]
self.loop_o_q_int = data["loop_o_q_int"]
self.loop_o_v = data["loop_o_v"]
self.loop_h_v = data["loop_h_v"]
self.loop_pos_virtual_world = data["loop_pos_virtual_world"]
self.planner_q_static = data["planner_q_static"]
self.planner_RPY_static = data["planner_RPY_static"]
self.planner_xref = data["planner_xref"]
self.planner_fsteps = data["planner_fsteps"]
self.planner_gait = data["planner_gait"]
self.planner_goals = data["planner_goals"]
self.planner_vgoals = data["planner_vgoals"]
self.planner_agoals = data["planner_agoals"]
self.planner_is_static = data["planner_is_static"]
self.planner_h_ref = data["planner_h_ref"]
self.mpc_x_f = data["mpc_x_f"]
self.wbc_x_f = data["wbc_x_f"]
self.wbc_P = data["wbc_P"]
self.wbc_D = data["wbc_D"]
self.wbc_q_des = data["wbc_q_des"]
self.wbc_v_des = data["wbc_v_des"]
self.wbc_tau_ff = data["wbc_tau_ff"]
self.wbc_f_ctc = data["wbc_f_ctc"]
self.wbc_feet_pos = data["wbc_feet_pos"]
self.wbc_feet_pos_target = data["wbc_feet_pos_target"]
self.wbc_feet_err = data["wbc_feet_err"]
self.wbc_feet_vel = data["wbc_feet_vel"]
self.wbc_feet_vel_target = data["wbc_feet_vel_target"]
self.wbc_feet_acc_target = data["wbc_feet_acc_target"]
self.tstamps = data["tstamps"]
# Load LoggerSensors arrays
loggerSensors.q_mes = data["q_mes"]
loggerSensors.v_mes = data["v_mes"]
loggerSensors.baseOrientation = data["baseOrientation"]
loggerSensors.baseAngularVelocity = data["baseAngularVelocity"]
loggerSensors.baseLinearAcceleration = data["baseLinearAcceleration"]
loggerSensors.baseAccelerometer = data["baseAccelerometer"]
loggerSensors.torquesFromCurrentMeasurment = data["torquesFromCurrentMeasurment"]
loggerSensors.mocapPosition = data["mocapPosition"]
loggerSensors.mocapVelocity = data["mocapVelocity"]
loggerSensors.mocapAngularVelocity = data["mocapAngularVelocity"]
loggerSensors.mocapOrientationMat9 = data["mocapOrientationMat9"]
loggerSensors.mocapOrientationQuat = data["mocapOrientationQuat"]
loggerSensors.logSize = loggerSensors.q_mes.shape[0]
def slider_predicted_trajectory(self):
from matplotlib import pyplot as plt
from matplotlib.widgets import Slider, Button
# The parametrized function to be plotted
def f(t, time):
return np.sin(2 * np.pi * t) + time
index6 = [1, 3, 5, 2, 4, 6]
log_t_pred = np.array([(k+1)*self.dt*10 for k in range(self.mpc_x_f.shape[2])])
log_t_ref = np.array([k*self.dt*10 for k in range(self.planner_xref.shape[2])])
trange = np.max([np.max(log_t_pred), np.max(log_t_ref)])
h1s = []
h2s = []
axs = []
h1s_vel = []
h2s_vel = []
axs_vel = []
# Define initial parameters
init_time = 0.0
# Create the figure and the line that we will manipulate
fig = plt.figure()
ax = plt.gca()
for j in range(6):
ax = plt.subplot(3, 2, index6[j])
h1, = plt.plot(log_t_pred, self.mpc_x_f[0, j, :], "b", linewidth=2)
h2, = plt.plot(log_t_ref, self.planner_xref[0, j, :], linestyle="--", marker='x', color="g", linewidth=2)
h3, = plt.plot(np.array([k*self.dt for k in range(self.mpc_x_f.shape[0])]),
self.planner_xref[:, j, 0], linestyle=None, marker='x', color="r", linewidth=1)
axs.append(ax)
h1s.append(h1)
h2s.append(h2)
#ax.set_xlabel('Time [s]')
axcolor = 'lightgoldenrodyellow'
#ax.margins(x=0)
# Make a horizontal slider to control the time.
axtime = plt.axes([0.25, 0.03, 0.65, 0.03], facecolor=axcolor)
time_slider = Slider(
ax=axtime,
label='Time [s]',
valmin=0.0,
valmax=self.logSize*self.dt,
valinit=init_time,
)
# Create the figure and the line that we will manipulate (for velocities)
fig_vel = plt.figure()
ax = plt.gca()
for j in range(6):
ax = plt.subplot(3, 2, index6[j])
h1, = plt.plot(log_t_pred, self.mpc_x_f[0, j, :], "b", linewidth=2)
h2, = plt.plot(log_t_ref, self.planner_xref[0, j, :], linestyle="--", marker='x', color="g", linewidth=2)
h3, = plt.plot(np.array([k*self.dt for k in range(self.mpc_x_f.shape[0])]),
self.planner_xref[:, j+6, 0], linestyle=None, marker='x', color="r", linewidth=1)
axs_vel.append(ax)
h1s_vel.append(h1)
h2s_vel.append(h2)
#axcolor = 'lightgoldenrodyellow'
#ax.margins(x=0)
# Make a horizontal slider to control the time.
axtime_vel = plt.axes([0.25, 0.03, 0.65, 0.03], facecolor=axcolor)
time_slider_vel = Slider(
ax=axtime_vel,
label='Time [s]',
valmin=0.0,
valmax=self.logSize*self.dt,
valinit=init_time,
)
# The function to be called anytime a slider's value changes
def update(val, recursive=False):
time_slider.val = np.round(val / (self.dt*10), decimals=0) * (self.dt*10)
rounded = int(np.round(time_slider.val / self.dt, decimals=0))
for j in range(6):
h1s[j].set_xdata(log_t_pred + time_slider.val)
h2s[j].set_xdata(log_t_ref + time_slider.val)
y1 = self.mpc_x_f[rounded, j, :] - self.planner_xref[rounded, j, 1:]
y2 = self.planner_xref[rounded, j, :] - self.planner_xref[rounded, j, :]
h1s[j].set_ydata(y1)
h2s[j].set_ydata(y2)
axs[j].set_xlim([time_slider.val - self.dt * 3, time_slider.val+trange+self.dt * 3])
ymin = np.min([np.min(y1), np.min(y2)])
ymax = np.max([np.max(y1), np.max(y2)])
axs[j].set_ylim([ymin - 0.05 * (ymax - ymin), ymax + 0.05 * (ymax - ymin)])
fig.canvas.draw_idle()
if not recursive:
update_vel(time_slider.val, True)
def update_vel(val, recursive=False):
time_slider_vel.val = np.round(val / (self.dt*10), decimals=0) * (self.dt*10)
rounded = int(np.round(time_slider_vel.val / self.dt, decimals=0))
for j in range(6):
h1s_vel[j].set_xdata(log_t_pred + time_slider.val)
h2s_vel[j].set_xdata(log_t_ref + time_slider.val)
y1 = self.mpc_x_f[rounded, j+6, :]
y2 = self.planner_xref[rounded, j+6, :]
h1s_vel[j].set_ydata(y1)
h2s_vel[j].set_ydata(y2)
axs_vel[j].set_xlim([time_slider.val - self.dt * 3, time_slider.val+trange+self.dt * 3])
ymin = np.min([np.min(y1), np.min(y2)])
ymax = np.max([np.max(y1), np.max(y2)])
axs_vel[j].set_ylim([ymin - 0.05 * (ymax - ymin), ymax + 0.05 * (ymax - ymin)])
fig_vel.canvas.draw_idle()
if not recursive:
update(time_slider_vel.val, True)
# register the update function with each slider
time_slider.on_changed(update)
time_slider_vel.on_changed(update)
plt.show()
def slider_predicted_footholds(self):
from matplotlib import pyplot as plt
from matplotlib.widgets import Slider, Button
import utils_mpc
import pinocchio as pin
self.planner_fsteps
# Define initial parameters
init_time = 0.0
# Create the figure and the line that we will manipulate
fig = plt.figure()
ax = plt.gca()
h1s = []
f_c = ["r", "b", "forestgreen", "rebeccapurple"]
quat = np.zeros((4, 1))
fsteps = self.planner_fsteps[0]
o_step = np.zeros((3*int(fsteps.shape[0]), 1))
RPY = utils_mpc.quaternionToRPY(self.loop_o_q_int[0, 3:7])
quat[:, 0] = utils_mpc.EulerToQuaternion([0.0, 0.0, RPY[2]])
oRh = pin.Quaternion(quat).toRotationMatrix()
for j in range(4):
o_step[0:3, 0:1] = oRh @ fsteps[0:1, (j*3):((j+1)*3)].transpose() + self.loop_o_q_int[0:1, 0:3].transpose()
h1, = plt.plot(o_step[0::3, 0], o_step[1::3, 0], linestyle=None, linewidth=0, marker="o", color=f_c[j])
h1s.append(h1)
axcolor = 'lightgoldenrodyellow'
# Make a horizontal slider to control the time.
axtime = plt.axes([0.25, 0.03, 0.65, 0.03], facecolor=axcolor)
time_slider = Slider(
ax=axtime,
label='Time [s]',
valmin=0.0,
valmax=self.logSize*self.dt,
valinit=init_time,
)
ax.set_xlim([-0.3, 0.5])
ax.set_ylim([-0.3, 0.5])
# The function to be called anytime a slider's value changes
def update(val):
time_slider.val = np.round(val / (self.dt*10), decimals=0) * (self.dt*10)
rounded = int(np.round(time_slider.val / self.dt, decimals=0))
fsteps = self.planner_fsteps[rounded]
o_step = np.zeros((3*int(fsteps.shape[0]), 1))
RPY = utils_mpc.quaternionToRPY(self.loop_o_q_int[rounded, 3:7])
quat[:, 0] = utils_mpc.EulerToQuaternion([0.0, 0.0, RPY[2]])
oRh = pin.Quaternion(quat).toRotationMatrix()
for j in range(4):
for k in range(int(fsteps.shape[0])):
o_step[(3*k):(3*(k+1)), 0:1] = oRh @ fsteps[(k):(k+1), (j*3):((j+1)*3)].transpose() + self.loop_o_q_int[rounded:(rounded+1), 0:3].transpose()
h1s[j].set_xdata(o_step[0::3, 0].copy())
h1s[j].set_ydata(o_step[1::3, 0].copy())
fig.canvas.draw_idle()
# register the update function with each slider
time_slider.on_changed(update)
plt.show()
if __name__ == "__main__":
import LoggerSensors
# Create loggers
loggerSensors = LoggerSensors.LoggerSensors(logSize=5997)
logger = LoggerControl(0.002, 100, logSize=5997)
# Load data from .npz file
logger.loadAll(loggerSensors)
# Call all ploting functions
#logger.plotAll(loggerSensors)
logger.slider_predicted_trajectory()
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.sin",
"matplotlib.widgets.Slider",
"numpy.savez",
"utils_mpc.EulerToQuaternion",
"matplotlib.pyplot.plot",
"IPython.embed",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.round",
"glob.glob",
"utils... | [((44262, 44303), 'LoggerSensors.LoggerSensors', 'LoggerSensors.LoggerSensors', ([], {'logSize': '(5997)'}), '(logSize=5997)\n', (44289, 44303), False, 'import LoggerSensors\n'), ((491, 506), 'numpy.int', 'np.int', (['logSize'], {}), '(logSize)\n', (497, 506), True, 'import numpy as np\n'), ((653, 675), 'numpy.zeros', 'np.zeros', (['[logSize, 6]'], {}), '([logSize, 6])\n', (661, 675), True, 'import numpy as np\n'), ((767, 789), 'numpy.zeros', 'np.zeros', (['[logSize, 4]'], {}), '([logSize, 4])\n', (775, 789), True, 'import numpy as np\n'), ((859, 884), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (867, 884), True, 'import numpy as np\n'), ((956, 979), 'numpy.zeros', 'np.zeros', (['[logSize, 19]'], {}), '([logSize, 19])\n', (964, 979), True, 'import numpy as np\n'), ((1026, 1049), 'numpy.zeros', 'np.zeros', (['[logSize, 18]'], {}), '([logSize, 18])\n', (1034, 1049), True, 'import numpy as np\n'), ((1096, 1119), 'numpy.zeros', 'np.zeros', (['[logSize, 12]'], {}), '([logSize, 12])\n', (1104, 1119), True, 'import numpy as np\n'), ((1199, 1221), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1207, 1221), True, 'import numpy as np\n'), ((1291, 1313), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1299, 1313), True, 'import numpy as np\n'), ((1390, 1412), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1398, 1412), True, 'import numpy as np\n'), ((1471, 1493), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1479, 1493), True, 'import numpy as np\n'), ((1577, 1599), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1585, 1599), True, 'import numpy as np\n'), ((1674, 1696), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1682, 1696), True, 'import numpy as np\n'), ((1775, 1797), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1783, 1797), True, 'import numpy as np\n'), ((1884, 1906), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1892, 1906), True, 'import numpy as np\n'), ((1989, 2011), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (1997, 2011), True, 'import numpy as np\n'), ((2086, 2108), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (2094, 2108), True, 'import numpy as np\n'), ((2187, 2209), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (2195, 2209), True, 'import numpy as np\n'), ((2296, 2318), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (2304, 2318), True, 'import numpy as np\n'), ((2401, 2424), 'numpy.zeros', 'np.zeros', (['[logSize, 18]'], {}), '([logSize, 18])\n', (2409, 2424), True, 'import numpy as np\n'), ((2480, 2503), 'numpy.zeros', 'np.zeros', (['[logSize, 16]'], {}), '([logSize, 16])\n', (2488, 2503), True, 'import numpy as np\n'), ((2585, 2608), 'numpy.zeros', 'np.zeros', (['[logSize, 19]'], {}), '([logSize, 19])\n', (2593, 2608), True, 'import numpy as np\n'), ((2690, 2713), 'numpy.zeros', 'np.zeros', (['[logSize, 18]'], {}), '([logSize, 18])\n', (2698, 2713), True, 'import numpy as np\n'), ((2775, 2798), 'numpy.zeros', 'np.zeros', (['[logSize, 18]'], {}), '([logSize, 18])\n', (2783, 2798), True, 'import numpy as np\n'), ((2879, 2901), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (2887, 2901), True, 'import numpy as np\n'), ((2985, 3016), 'numpy.zeros', 'np.zeros', (['[logSize, N0_gait, 4]'], {}), '([logSize, N0_gait, 4])\n', (2993, 3016), True, 'import numpy as np\n'), ((3067, 3086), 'numpy.zeros', 'np.zeros', (['[logSize]'], {}), '([logSize])\n', (3075, 3086), True, 'import numpy as np\n'), ((3162, 3185), 'numpy.zeros', 'np.zeros', (['[logSize, 19]'], {}), '([logSize, 19])\n', (3170, 3185), True, 'import numpy as np\n'), ((3264, 3286), 'numpy.zeros', 'np.zeros', (['[logSize, 3]'], {}), '([logSize, 3])\n', (3272, 3286), True, 'import numpy as np\n'), ((3716, 3735), 'numpy.zeros', 'np.zeros', (['[logSize]'], {}), '([logSize])\n', (3724, 3735), True, 'import numpy as np\n'), ((3837, 3862), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (3845, 3862), True, 'import numpy as np\n'), ((3921, 3946), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (3929, 3946), True, 'import numpy as np\n'), ((4006, 4031), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (4014, 4031), True, 'import numpy as np\n'), ((4341, 4364), 'numpy.zeros', 'np.zeros', (['[logSize, 24]'], {}), '([logSize, 24])\n', (4349, 4364), True, 'import numpy as np\n'), ((4452, 4475), 'numpy.zeros', 'np.zeros', (['[logSize, 12]'], {}), '([logSize, 12])\n', (4460, 4475), True, 'import numpy as np\n'), ((4531, 4554), 'numpy.zeros', 'np.zeros', (['[logSize, 12]'], {}), '([logSize, 12])\n', (4539, 4554), True, 'import numpy as np\n'), ((4611, 4634), 'numpy.zeros', 'np.zeros', (['[logSize, 12]'], {}), '([logSize, 12])\n', (4619, 4634), True, 'import numpy as np\n'), ((4693, 4716), 'numpy.zeros', 'np.zeros', (['[logSize, 12]'], {}), '([logSize, 12])\n', (4701, 4716), True, 'import numpy as np\n'), ((4776, 4799), 'numpy.zeros', 'np.zeros', (['[logSize, 12]'], {}), '([logSize, 12])\n', (4784, 4799), True, 'import numpy as np\n'), ((4868, 4891), 'numpy.zeros', 'np.zeros', (['[logSize, 12]'], {}), '([logSize, 12])\n', (4876, 4891), True, 'import numpy as np\n'), ((4958, 4983), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (4966, 4983), True, 'import numpy as np\n'), ((5062, 5087), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (5070, 5087), True, 'import numpy as np\n'), ((5158, 5183), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (5166, 5183), True, 'import numpy as np\n'), ((5264, 5289), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (5272, 5289), True, 'import numpy as np\n'), ((5369, 5394), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (5377, 5394), True, 'import numpy as np\n'), ((5473, 5498), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (5481, 5498), True, 'import numpy as np\n'), ((5580, 5605), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (5588, 5605), True, 'import numpy as np\n'), ((5687, 5712), 'numpy.zeros', 'np.zeros', (['[logSize, 3, 4]'], {}), '([logSize, 3, 4])\n', (5695, 5712), True, 'import numpy as np\n'), ((5805, 5822), 'numpy.zeros', 'np.zeros', (['logSize'], {}), '(logSize)\n', (5813, 5822), True, 'import numpy as np\n'), ((7691, 7745), 'numpy.array', 'np.array', (['[loop.q[0, 0], loop.q[1, 0], loop.yaw_estim]'], {}), '([loop.q[0, 0], loop.q[1, 0], loop.yaw_estim])\n', (7699, 7745), True, 'import numpy as np\n'), ((9542, 9548), 'time.time', 'time', ([], {}), '()\n', (9546, 9548), False, 'from time import time\n'), ((9643, 9659), 'numpy.zeros', 'np.zeros', (['[N, 3]'], {}), '([N, 3])\n', (9651, 9659), True, 'import numpy as np\n'), ((9685, 9701), 'numpy.zeros', 'np.zeros', (['[N, 3]'], {}), '([N, 3])\n', (9693, 9701), True, 'import numpy as np\n'), ((9727, 9743), 'numpy.zeros', 'np.zeros', (['[N, 3]'], {}), '([N, 3])\n', (9735, 9743), True, 'import numpy as np\n'), ((11072, 11084), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11082, 11084), True, 'from matplotlib import pyplot as plt\n'), ((12319, 12383), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Measured & Reference feet positions (base frame)"""'], {}), "('Measured & Reference feet positions (base frame)')\n", (12331, 12383), True, 'from matplotlib import pyplot as plt\n'), ((12478, 12490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12488, 12490), True, 'from matplotlib import pyplot as plt\n'), ((13268, 13335), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Measured and Reference feet velocities (base frame)"""'], {}), "('Measured and Reference feet velocities (base frame)')\n", (13280, 13335), True, 'from matplotlib import pyplot as plt\n'), ((13430, 13442), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13440, 13442), True, 'from matplotlib import pyplot as plt\n'), ((13835, 13892), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Reference feet accelerations (base frame)"""'], {}), "('Reference feet accelerations (base frame)')\n", (13847, 13892), True, 'from matplotlib import pyplot as plt\n'), ((14026, 14038), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14036, 14038), True, 'from matplotlib import pyplot as plt\n'), ((15395, 15456), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Measured & Reference position and orientation"""'], {}), "('Measured & Reference position and orientation')\n", (15407, 15456), True, 'from matplotlib import pyplot as plt\n'), ((15620, 15632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15630, 15632), True, 'from matplotlib import pyplot as plt\n'), ((16947, 17013), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Measured & Reference linear and angular velocities"""'], {}), "('Measured & Reference linear and angular velocities')\n", (16959, 17013), True, 'from matplotlib import pyplot as plt\n'), ((19105, 19117), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19115, 19117), True, 'from matplotlib import pyplot as plt\n'), ((20156, 20225), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""FF torques & FB torques & Sent torques & Meas torques"""'], {}), "('FF torques & FB torques & Sent torques & Meas torques')\n", (20168, 20225), True, 'from matplotlib import pyplot as plt\n'), ((20339, 20351), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20349, 20351), True, 'from matplotlib import pyplot as plt\n'), ((21103, 21163), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Contact forces (MPC command) & WBC QP output"""'], {}), "('Contact forces (MPC command) & WBC QP output')\n", (21115, 21163), True, 'from matplotlib import pyplot as plt\n'), ((21251, 21263), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21261, 21263), True, 'from matplotlib import pyplot as plt\n'), ((21876, 21948), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Desired actuator positions & Measured actuator positions"""'], {}), "('Desired actuator positions & Measured actuator positions')\n", (21888, 21948), True, 'from matplotlib import pyplot as plt\n'), ((22320, 22332), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22330, 22332), True, 'from matplotlib import pyplot as plt\n'), ((23392, 23485), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Analysis of trajectories in position and orientation computed by the MPC"""'], {}), "(\n 'Analysis of trajectories in position and orientation computed by the MPC')\n", (23404, 23485), True, 'from matplotlib import pyplot as plt\n'), ((23490, 23502), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23500, 23502), True, 'from matplotlib import pyplot as plt\n'), ((24574, 24677), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Analysis of trajectories of linear and angular velocities computed by the MPC"""'], {}), "(\n 'Analysis of trajectories of linear and angular velocities computed by the MPC'\n )\n", (24586, 24677), True, 'from matplotlib import pyplot as plt\n'), ((24801, 24813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24811, 24813), True, 'from matplotlib import pyplot as plt\n'), ((25565, 25625), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Contact forces (MPC command) & WBC QP output"""'], {}), "('Contact forces (MPC command) & WBC QP output')\n", (25577, 25625), True, 'from matplotlib import pyplot as plt\n'), ((25739, 25751), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25749, 25751), True, 'from matplotlib import pyplot as plt\n'), ((26595, 26667), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Contact forces trajectories & Actual forces trajectories"""'], {}), "('Contact forces trajectories & Actual forces trajectories')\n", (26607, 26667), True, 'from matplotlib import pyplot as plt\n'), ((26942, 26954), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26952, 26954), True, 'from matplotlib import pyplot as plt\n'), ((27944, 28029), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Evolution of the quantities of the velocity complementary filter"""'], {}), "('Evolution of the quantities of the velocity complementary filter'\n )\n", (27956, 28029), True, 'from matplotlib import pyplot as plt\n'), ((28184, 28196), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28194, 28196), True, 'from matplotlib import pyplot as plt\n'), ((29186, 29271), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Evolution of the quantities of the position complementary filter"""'], {}), "('Evolution of the quantities of the position complementary filter'\n )\n", (29198, 29271), True, 'from matplotlib import pyplot as plt\n'), ((29276, 29296), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (29284, 29296), True, 'from matplotlib import pyplot as plt\n'), ((29340, 29347), 'IPython.embed', 'embed', ([], {}), '()\n', (29345, 29347), False, 'from IPython import embed\n'), ((29475, 31850), 'numpy.savez', 'np.savez', (["(fileName + date_str + '.npz')"], {'joy_v_ref': 'self.joy_v_ref', 'esti_feet_status': 'self.esti_feet_status', 'esti_feet_goals': 'self.esti_feet_goals', 'esti_q_filt': 'self.esti_q_filt', 'esti_v_filt': 'self.esti_v_filt', 'esti_v_secu': 'self.esti_v_secu', 'esti_FK_lin_vel': 'self.esti_FK_lin_vel', 'esti_FK_xyz': 'self.esti_FK_xyz', 'esti_xyz_mean_feet': 'self.esti_xyz_mean_feet', 'esti_filt_lin_vel': 'self.esti_filt_lin_vel', 'esti_HP_x': 'self.esti_HP_x', 'esti_HP_dx': 'self.esti_HP_dx', 'esti_HP_alpha': 'self.esti_HP_alpha', 'esti_HP_filt_x': 'self.esti_HP_filt_x', 'esti_LP_x': 'self.esti_LP_x', 'esti_LP_dx': 'self.esti_LP_dx', 'esti_LP_alpha': 'self.esti_LP_alpha', 'esti_LP_filt_x': 'self.esti_LP_filt_x', 'esti_kf_X': 'self.esti_kf_X', 'esti_kf_Z': 'self.esti_kf_Z', 'loop_o_q_int': 'self.loop_o_q_int', 'loop_o_v': 'self.loop_o_v', 'loop_h_v': 'self.loop_h_v', 'loop_pos_virtual_world': 'self.loop_pos_virtual_world', 'planner_q_static': 'self.planner_q_static', 'planner_RPY_static': 'self.planner_RPY_static', 'planner_xref': 'self.planner_xref', 'planner_fsteps': 'self.planner_fsteps', 'planner_gait': 'self.planner_gait', 'planner_goals': 'self.planner_goals', 'planner_vgoals': 'self.planner_vgoals', 'planner_agoals': 'self.planner_agoals', 'planner_is_static': 'self.planner_is_static', 'planner_h_ref': 'self.planner_h_ref', 'mpc_x_f': 'self.mpc_x_f', 'wbc_x_f': 'self.wbc_x_f', 'wbc_P': 'self.wbc_P', 'wbc_D': 'self.wbc_D', 'wbc_q_des': 'self.wbc_q_des', 'wbc_v_des': 'self.wbc_v_des', 'wbc_tau_ff': 'self.wbc_tau_ff', 'wbc_f_ctc': 'self.wbc_f_ctc', 'wbc_feet_pos': 'self.wbc_feet_pos', 'wbc_feet_pos_target': 'self.wbc_feet_pos_target', 'wbc_feet_err': 'self.wbc_feet_err', 'wbc_feet_vel': 'self.wbc_feet_vel', 'wbc_feet_vel_target': 'self.wbc_feet_vel_target', 'wbc_feet_acc_target': 'self.wbc_feet_acc_target', 'tstamps': 'self.tstamps', 'q_mes': 'loggerSensors.q_mes', 'v_mes': 'loggerSensors.v_mes', 'baseOrientation': 'loggerSensors.baseOrientation', 'baseAngularVelocity': 'loggerSensors.baseAngularVelocity', 'baseLinearAcceleration': 'loggerSensors.baseLinearAcceleration', 'baseAccelerometer': 'loggerSensors.baseAccelerometer', 'torquesFromCurrentMeasurment': 'loggerSensors.torquesFromCurrentMeasurment', 'mocapPosition': 'loggerSensors.mocapPosition', 'mocapVelocity': 'loggerSensors.mocapVelocity', 'mocapAngularVelocity': 'loggerSensors.mocapAngularVelocity', 'mocapOrientationMat9': 'loggerSensors.mocapOrientationMat9', 'mocapOrientationQuat': 'loggerSensors.mocapOrientationQuat'}), "(fileName + date_str + '.npz', joy_v_ref=self.joy_v_ref,\n esti_feet_status=self.esti_feet_status, esti_feet_goals=self.\n esti_feet_goals, esti_q_filt=self.esti_q_filt, esti_v_filt=self.\n esti_v_filt, esti_v_secu=self.esti_v_secu, esti_FK_lin_vel=self.\n esti_FK_lin_vel, esti_FK_xyz=self.esti_FK_xyz, esti_xyz_mean_feet=self.\n esti_xyz_mean_feet, esti_filt_lin_vel=self.esti_filt_lin_vel, esti_HP_x\n =self.esti_HP_x, esti_HP_dx=self.esti_HP_dx, esti_HP_alpha=self.\n esti_HP_alpha, esti_HP_filt_x=self.esti_HP_filt_x, esti_LP_x=self.\n esti_LP_x, esti_LP_dx=self.esti_LP_dx, esti_LP_alpha=self.esti_LP_alpha,\n esti_LP_filt_x=self.esti_LP_filt_x, esti_kf_X=self.esti_kf_X, esti_kf_Z\n =self.esti_kf_Z, loop_o_q_int=self.loop_o_q_int, loop_o_v=self.loop_o_v,\n loop_h_v=self.loop_h_v, loop_pos_virtual_world=self.\n loop_pos_virtual_world, planner_q_static=self.planner_q_static,\n planner_RPY_static=self.planner_RPY_static, planner_xref=self.\n planner_xref, planner_fsteps=self.planner_fsteps, planner_gait=self.\n planner_gait, planner_goals=self.planner_goals, planner_vgoals=self.\n planner_vgoals, planner_agoals=self.planner_agoals, planner_is_static=\n self.planner_is_static, planner_h_ref=self.planner_h_ref, mpc_x_f=self.\n mpc_x_f, wbc_x_f=self.wbc_x_f, wbc_P=self.wbc_P, wbc_D=self.wbc_D,\n wbc_q_des=self.wbc_q_des, wbc_v_des=self.wbc_v_des, wbc_tau_ff=self.\n wbc_tau_ff, wbc_f_ctc=self.wbc_f_ctc, wbc_feet_pos=self.wbc_feet_pos,\n wbc_feet_pos_target=self.wbc_feet_pos_target, wbc_feet_err=self.\n wbc_feet_err, wbc_feet_vel=self.wbc_feet_vel, wbc_feet_vel_target=self.\n wbc_feet_vel_target, wbc_feet_acc_target=self.wbc_feet_acc_target,\n tstamps=self.tstamps, q_mes=loggerSensors.q_mes, v_mes=loggerSensors.\n v_mes, baseOrientation=loggerSensors.baseOrientation,\n baseAngularVelocity=loggerSensors.baseAngularVelocity,\n baseLinearAcceleration=loggerSensors.baseLinearAcceleration,\n baseAccelerometer=loggerSensors.baseAccelerometer,\n torquesFromCurrentMeasurment=loggerSensors.torquesFromCurrentMeasurment,\n mocapPosition=loggerSensors.mocapPosition, mocapVelocity=loggerSensors.\n mocapVelocity, mocapAngularVelocity=loggerSensors.mocapAngularVelocity,\n mocapOrientationMat9=loggerSensors.mocapOrientationMat9,\n mocapOrientationQuat=loggerSensors.mocapOrientationQuat)\n", (29483, 31850), True, 'import numpy as np\n'), ((32977, 32994), 'numpy.load', 'np.load', (['fileName'], {}), '(fileName)\n', (32984, 32994), True, 'import numpy as np\n'), ((37211, 37223), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37221, 37223), True, 'from matplotlib import pyplot as plt\n'), ((37237, 37246), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (37244, 37246), True, 'from matplotlib import pyplot as plt\n'), ((37970, 38023), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.03, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.03, 0.65, 0.03], facecolor=axcolor)\n', (37978, 38023), True, 'from matplotlib import pyplot as plt\n'), ((38046, 38148), 'matplotlib.widgets.Slider', 'Slider', ([], {'ax': 'axtime', 'label': '"""Time [s]"""', 'valmin': '(0.0)', 'valmax': '(self.logSize * self.dt)', 'valinit': 'init_time'}), "(ax=axtime, label='Time [s]', valmin=0.0, valmax=self.logSize * self.\n dt, valinit=init_time)\n", (38052, 38148), False, 'from matplotlib.widgets import Slider, Button\n'), ((38314, 38326), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38324, 38326), True, 'from matplotlib import pyplot as plt\n'), ((38340, 38349), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38347, 38349), True, 'from matplotlib import pyplot as plt\n'), ((39057, 39110), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.03, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.03, 0.65, 0.03], facecolor=axcolor)\n', (39065, 39110), True, 'from matplotlib import pyplot as plt\n'), ((39137, 39242), 'matplotlib.widgets.Slider', 'Slider', ([], {'ax': 'axtime_vel', 'label': '"""Time [s]"""', 'valmin': '(0.0)', 'valmax': '(self.logSize * self.dt)', 'valinit': 'init_time'}), "(ax=axtime_vel, label='Time [s]', valmin=0.0, valmax=self.logSize *\n self.dt, valinit=init_time)\n", (39143, 39242), False, 'from matplotlib.widgets import Slider, Button\n'), ((41554, 41564), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (41562, 41564), True, 'from matplotlib import pyplot as plt\n'), ((41935, 41947), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (41945, 41947), True, 'from matplotlib import pyplot as plt\n'), ((41961, 41970), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (41968, 41970), True, 'from matplotlib import pyplot as plt\n'), ((42061, 42077), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (42069, 42077), True, 'import numpy as np\n'), ((42188, 42240), 'utils_mpc.quaternionToRPY', 'utils_mpc.quaternionToRPY', (['self.loop_o_q_int[0, 3:7]'], {}), '(self.loop_o_q_int[0, 3:7])\n', (42213, 42240), False, 'import utils_mpc\n'), ((42262, 42309), 'utils_mpc.EulerToQuaternion', 'utils_mpc.EulerToQuaternion', (['[0.0, 0.0, RPY[2]]'], {}), '([0.0, 0.0, RPY[2]])\n', (42289, 42309), False, 'import utils_mpc\n'), ((42770, 42823), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.03, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.03, 0.65, 0.03], facecolor=axcolor)\n', (42778, 42823), True, 'from matplotlib import pyplot as plt\n'), ((42846, 42948), 'matplotlib.widgets.Slider', 'Slider', ([], {'ax': 'axtime', 'label': '"""Time [s]"""', 'valmin': '(0.0)', 'valmax': '(self.logSize * self.dt)', 'valinit': 'init_time'}), "(ax=axtime, label='Time [s]', valmin=0.0, valmax=self.logSize * self.\n dt, valinit=init_time)\n", (42852, 42948), False, 'from matplotlib.widgets import Slider, Button\n'), ((44154, 44164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44162, 44164), True, 'from matplotlib import pyplot as plt\n'), ((15269, 15360), 'matplotlib.pyplot.legend', 'plt.legend', (["['Robot state', 'Robot reference state', 'Ground truth']"], {'prop': "{'size': 8}"}), "(['Robot state', 'Robot reference state', 'Ground truth'], prop={\n 'size': 8})\n", (15279, 15360), True, 'from matplotlib import pyplot as plt\n'), ((15368, 15386), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['lgd[i]'], {}), '(lgd[i])\n', (15378, 15386), True, 'from matplotlib import pyplot as plt\n'), ((15821, 15877), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.loop_h_v[:, i]', '"""b"""'], {'linewidth': '(2)'}), "(t_range, self.loop_h_v[:, i], 'b', linewidth=2)\n", (15829, 15877), True, 'from matplotlib import pyplot as plt\n'), ((15890, 15947), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.joy_v_ref[:, i]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.joy_v_ref[:, i], 'r', linewidth=3)\n", (15898, 15947), True, 'from matplotlib import pyplot as plt\n'), ((16821, 16912), 'matplotlib.pyplot.legend', 'plt.legend', (["['Robot state', 'Robot reference state', 'Ground truth']"], {'prop': "{'size': 8}"}), "(['Robot state', 'Robot reference state', 'Ground truth'], prop={\n 'size': 8})\n", (16831, 16912), True, 'from matplotlib import pyplot as plt\n'), ((16920, 16938), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['lgd[i]'], {}), '(lgd[i])\n', (16930, 16938), True, 'from matplotlib import pyplot as plt\n'), ((19494, 19552), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.wbc_tau_ff[:, i]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.wbc_tau_ff[:, i], 'r', linewidth=3)\n", (19502, 19552), True, 'from matplotlib import pyplot as plt\n'), ((19571, 19614), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'tau_fb', '"""b"""'], {'linewidth': '(3)'}), "(t_range, tau_fb, 'b', linewidth=3)\n", (19579, 19614), True, 'from matplotlib import pyplot as plt\n'), ((19633, 19700), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', '(self.wbc_tau_ff[:, i] + tau_fb)', '"""g"""'], {'linewidth': '(3)'}), "(t_range, self.wbc_tau_ff[:, i] + tau_fb, 'g', linewidth=3)\n", (19641, 19700), True, 'from matplotlib import pyplot as plt\n'), ((19719, 19835), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range[:-1]', 'loggerSensors.torquesFromCurrentMeasurment[1:, i]', '"""violet"""'], {'linewidth': '(3)', 'linestyle': '"""--"""'}), "(t_range[:-1], loggerSensors.torquesFromCurrentMeasurment[1:, i],\n 'violet', linewidth=3, linestyle='--')\n", (19727, 19835), True, 'from matplotlib import pyplot as plt\n'), ((19871, 19893), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (19881, 19893), True, 'from matplotlib import pyplot as plt\n'), ((20018, 20126), 'matplotlib.pyplot.legend', 'plt.legend', (['[h1, h2, h3, h4]', "['FF ' + tmp, 'FB ' + tmp, 'PD+ ' + tmp, 'Meas ' + tmp]"], {'prop': "{'size': 8}"}), "([h1, h2, h3, h4], ['FF ' + tmp, 'FB ' + tmp, 'PD+ ' + tmp, \n 'Meas ' + tmp], prop={'size': 8})\n", (20028, 20126), True, 'from matplotlib import pyplot as plt\n'), ((20126, 20147), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-8.0, 8.0]'], {}), '([-8.0, 8.0])\n', (20134, 20147), True, 'from matplotlib import pyplot as plt\n'), ((20549, 20612), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.mpc_x_f[:, 12 + i, 0]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.mpc_x_f[:, 12 + i, 0], 'r', linewidth=3)\n", (20557, 20612), True, 'from matplotlib import pyplot as plt\n'), ((20629, 20702), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.wbc_f_ctc[:, i]', '"""b"""'], {'linewidth': '(3)', 'linestyle': '"""--"""'}), "(t_range, self.wbc_f_ctc[:, i], 'b', linewidth=3, linestyle='--')\n", (20637, 20702), True, 'from matplotlib import pyplot as plt\n'), ((20715, 20737), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (20725, 20737), True, 'from matplotlib import pyplot as plt\n'), ((21461, 21524), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.wbc_q_des[:, i]'], {'color': '"""r"""', 'linewidth': '(3)'}), "(t_range, self.wbc_q_des[:, i], color='r', linewidth=3)\n", (21469, 21524), True, 'from matplotlib import pyplot as plt\n'), ((21543, 21612), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.esti_q_filt[:, 7 + i]'], {'color': '"""b"""', 'linewidth': '(3)'}), "(t_range, self.esti_q_filt[:, 7 + i], color='b', linewidth=3)\n", (21551, 21612), True, 'from matplotlib import pyplot as plt\n'), ((21623, 21645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (21633, 21645), True, 'from matplotlib import pyplot as plt\n'), ((22372, 22400), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[j]'], {}), '(3, 2, index6[j])\n', (22383, 22400), True, 'from matplotlib import pyplot as plt\n'), ((23131, 23153), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (23141, 23153), True, 'from matplotlib import pyplot as plt\n'), ((23166, 23255), 'matplotlib.pyplot.legend', 'plt.legend', (['[h1, h2, h3]', "['Output trajectory of MPC', 'Input trajectory of planner']"], {}), "([h1, h2, h3], ['Output trajectory of MPC',\n 'Input trajectory of planner'])\n", (23176, 23255), True, 'from matplotlib import pyplot as plt\n'), ((23333, 23383), 'matplotlib.pyplot.title', 'plt.title', (["('Predicted trajectory for ' + titles[j])"], {}), "('Predicted trajectory for ' + titles[j])\n", (23342, 23383), True, 'from matplotlib import pyplot as plt\n'), ((23542, 23570), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[j]'], {}), '(3, 2, index6[j])\n', (23553, 23570), True, 'from matplotlib import pyplot as plt\n'), ((24305, 24327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (24315, 24327), True, 'from matplotlib import pyplot as plt\n'), ((24340, 24456), 'matplotlib.pyplot.legend', 'plt.legend', (['[h1, h2, h3]', "['Output trajectory of MPC', 'Input trajectory of planner',\n 'Actual robot trajectory']"], {}), "([h1, h2, h3], ['Output trajectory of MPC',\n 'Input trajectory of planner', 'Actual robot trajectory'])\n", (24350, 24456), True, 'from matplotlib import pyplot as plt\n'), ((24503, 24565), 'matplotlib.pyplot.title', 'plt.title', (["('Predicted trajectory for velocity in ' + titles[j])"], {}), "('Predicted trajectory for velocity in ' + titles[j])\n", (24512, 24565), True, 'from matplotlib import pyplot as plt\n'), ((25011, 25074), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.mpc_x_f[:, 12 + i, 0]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.mpc_x_f[:, 12 + i, 0], 'r', linewidth=3)\n", (25019, 25074), True, 'from matplotlib import pyplot as plt\n'), ((25091, 25164), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.wbc_f_ctc[:, i]', '"""b"""'], {'linewidth': '(3)', 'linestyle': '"""--"""'}), "(t_range, self.wbc_f_ctc[:, i], 'b', linewidth=3, linestyle='--')\n", (25099, 25164), True, 'from matplotlib import pyplot as plt\n'), ((25177, 25199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (25187, 25199), True, 'from matplotlib import pyplot as plt\n'), ((26121, 26194), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.mpc_x_f[:, 12 + (3 * i + 2), 0]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.mpc_x_f[:, 12 + (3 * i + 2), 0], 'r', linewidth=3)\n", (26129, 26194), True, 'from matplotlib import pyplot as plt\n'), ((26295, 26362), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.esti_feet_status[:, i]', '"""k"""'], {'linestyle': '"""--"""'}), "(t_range, self.esti_feet_status[:, i], 'k', linestyle='--')\n", (26303, 26362), True, 'from matplotlib import pyplot as plt\n'), ((26375, 26397), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (26385, 26397), True, 'from matplotlib import pyplot as plt\n'), ((26410, 26438), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(lgd2[i] + ' [N]')"], {}), "(lgd2[i] + ' [N]')\n", (26420, 26438), True, 'from matplotlib import pyplot as plt\n'), ((26449, 26523), 'matplotlib.pyplot.legend', 'plt.legend', (['[h1, h2]', "['MPC ' + lgd2[i], 'MPC ' + lgd2[i] + ' trajectory']"], {}), "([h1, h2], ['MPC ' + lgd2[i], 'MPC ' + lgd2[i] + ' trajectory'])\n", (26459, 26523), True, 'from matplotlib import pyplot as plt\n'), ((26564, 26586), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1.0, 26.0]'], {}), '([-1.0, 26.0])\n', (26572, 26586), True, 'from matplotlib import pyplot as plt\n'), ((27895, 27935), 'matplotlib.pyplot.legend', 'plt.legend', (['[lgd_Y[i]]'], {'prop': "{'size': 8}"}), "([lgd_Y[i]], prop={'size': 8})\n", (27905, 27935), True, 'from matplotlib import pyplot as plt\n'), ((29137, 29177), 'matplotlib.pyplot.legend', 'plt.legend', (['[lgd_Y[i]]'], {'prop': "{'size': 8}"}), "([lgd_Y[i]], prop={'size': 8})\n", (29147, 29177), True, 'from matplotlib import pyplot as plt\n'), ((37291, 37319), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[j]'], {}), '(3, 2, index6[j])\n', (37302, 37319), True, 'from matplotlib import pyplot as plt\n'), ((37338, 37399), 'matplotlib.pyplot.plot', 'plt.plot', (['log_t_pred', 'self.mpc_x_f[0, j, :]', '"""b"""'], {'linewidth': '(2)'}), "(log_t_pred, self.mpc_x_f[0, j, :], 'b', linewidth=2)\n", (37346, 37399), True, 'from matplotlib import pyplot as plt\n'), ((37418, 37521), 'matplotlib.pyplot.plot', 'plt.plot', (['log_t_ref', 'self.planner_xref[0, j, :]'], {'linestyle': '"""--"""', 'marker': '"""x"""', 'color': '"""g"""', 'linewidth': '(2)'}), "(log_t_ref, self.planner_xref[0, j, :], linestyle='--', marker='x',\n color='g', linewidth=2)\n", (37426, 37521), True, 'from matplotlib import pyplot as plt\n'), ((38394, 38422), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[j]'], {}), '(3, 2, index6[j])\n', (38405, 38422), True, 'from matplotlib import pyplot as plt\n'), ((38441, 38502), 'matplotlib.pyplot.plot', 'plt.plot', (['log_t_pred', 'self.mpc_x_f[0, j, :]', '"""b"""'], {'linewidth': '(2)'}), "(log_t_pred, self.mpc_x_f[0, j, :], 'b', linewidth=2)\n", (38449, 38502), True, 'from matplotlib import pyplot as plt\n'), ((38521, 38624), 'matplotlib.pyplot.plot', 'plt.plot', (['log_t_ref', 'self.planner_xref[0, j, :]'], {'linestyle': '"""--"""', 'marker': '"""x"""', 'color': '"""g"""', 'linewidth': '(2)'}), "(log_t_ref, self.planner_xref[0, j, :], linestyle='--', marker='x',\n color='g', linewidth=2)\n", (38529, 38624), True, 'from matplotlib import pyplot as plt\n'), ((42529, 42630), 'matplotlib.pyplot.plot', 'plt.plot', (['o_step[0::3, 0]', 'o_step[1::3, 0]'], {'linestyle': 'None', 'linewidth': '(0)', 'marker': '"""o"""', 'color': 'f_c[j]'}), "(o_step[0::3, 0], o_step[1::3, 0], linestyle=None, linewidth=0,\n marker='o', color=f_c[j])\n", (42537, 42630), True, 'from matplotlib import pyplot as plt\n'), ((43463, 43521), 'utils_mpc.quaternionToRPY', 'utils_mpc.quaternionToRPY', (['self.loop_o_q_int[rounded, 3:7]'], {}), '(self.loop_o_q_int[rounded, 3:7])\n', (43488, 43521), False, 'import utils_mpc\n'), ((43547, 43594), 'utils_mpc.EulerToQuaternion', 'utils_mpc.EulerToQuaternion', (['[0.0, 0.0, RPY[2]]'], {}), '([0.0, 0.0, RPY[2]])\n', (43574, 43594), False, 'import utils_mpc\n'), ((10147, 10201), 'utils_mpc.quaternionToRPY', 'quaternionToRPY', (['loggerSensors.mocapOrientationQuat[i]'], {}), '(loggerSensors.mocapOrientationQuat[i])\n', (10162, 10201), False, 'from utils_mpc import quaternionToRPY\n'), ((11158, 11187), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {}), '(3, 4, index12[i])\n', (11169, 11187), True, 'from matplotlib import pyplot as plt\n'), ((11222, 11263), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {'sharex': 'ax0'}), '(3, 4, index12[i], sharex=ax0)\n', (11233, 11263), True, 'from matplotlib import pyplot as plt\n'), ((12564, 12593), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {}), '(3, 4, index12[i])\n', (12575, 12593), True, 'from matplotlib import pyplot as plt\n'), ((12628, 12669), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {'sharex': 'ax0'}), '(3, 4, index12[i], sharex=ax0)\n', (12639, 12669), True, 'from matplotlib import pyplot as plt\n'), ((13516, 13545), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {}), '(3, 4, index12[i])\n', (13527, 13545), True, 'from matplotlib import pyplot as plt\n'), ((13580, 13621), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {'sharex': 'ax0'}), '(3, 4, index12[i], sharex=ax0)\n', (13591, 13621), True, 'from matplotlib import pyplot as plt\n'), ((14111, 14139), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[i]'], {}), '(3, 2, index6[i])\n', (14122, 14139), True, 'from matplotlib import pyplot as plt\n'), ((14174, 14214), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[i]'], {'sharex': 'ax0'}), '(3, 2, index6[i], sharex=ax0)\n', (14185, 14214), True, 'from matplotlib import pyplot as plt\n'), ((14259, 14329), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.loop_pos_virtual_world[:, i]', '"""b"""'], {'linewidth': '(3)'}), "(t_range, self.loop_pos_virtual_world[:, i], 'b', linewidth=3)\n", (14267, 14329), True, 'from matplotlib import pyplot as plt\n'), ((14346, 14416), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.loop_pos_virtual_world[:, i]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.loop_pos_virtual_world[:, i], 'r', linewidth=3)\n", (14354, 14416), True, 'from matplotlib import pyplot as plt\n'), ((14832, 14902), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'loggerSensors.mocapPosition[:, i]', '"""k"""'], {'linewidth': '(3)'}), "(t_range, loggerSensors.mocapPosition[:, i], 'k', linewidth=3)\n", (14840, 14902), True, 'from matplotlib import pyplot as plt\n'), ((14937, 14998), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.mocap_RPY[:, i - 3]', '"""k"""'], {'linewidth': '(3)'}), "(t_range, self.mocap_RPY[:, i - 3], 'k', linewidth=3)\n", (14945, 14998), True, 'from matplotlib import pyplot as plt\n'), ((15705, 15733), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[i]'], {}), '(3, 2, index6[i])\n', (15716, 15733), True, 'from matplotlib import pyplot as plt\n'), ((15768, 15808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'index6[i]'], {'sharex': 'ax0'}), '(3, 2, index6[i], sharex=ax0)\n', (15779, 15808), True, 'from matplotlib import pyplot as plt\n'), ((15986, 16043), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.mocap_b_v[:, i]', '"""k"""'], {'linewidth': '(3)'}), "(t_range, self.mocap_b_v[:, i], 'k', linewidth=3)\n", (15994, 16043), True, 'from matplotlib import pyplot as plt\n'), ((16163, 16253), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.esti_filt_lin_vel[:, i]', '"""violet"""'], {'linewidth': '(3)', 'linestyle': '"""--"""'}), "(t_range, self.esti_filt_lin_vel[:, i], 'violet', linewidth=3,\n linestyle='--')\n", (16171, 16253), True, 'from matplotlib import pyplot as plt\n'), ((16284, 16345), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.mocap_b_w[:, i - 3]', '"""k"""'], {'linewidth': '(3)'}), "(t_range, self.mocap_b_w[:, i - 3], 'k', linewidth=3)\n", (16292, 16345), True, 'from matplotlib import pyplot as plt\n'), ((19191, 19220), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {}), '(3, 4, index12[i])\n', (19202, 19220), True, 'from matplotlib import pyplot as plt\n'), ((19255, 19296), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {'sharex': 'ax0'}), '(3, 4, index12[i], sharex=ax0)\n', (19266, 19296), True, 'from matplotlib import pyplot as plt\n'), ((20425, 20454), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {}), '(3, 4, index12[i])\n', (20436, 20454), True, 'from matplotlib import pyplot as plt\n'), ((20489, 20530), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {'sharex': 'ax0'}), '(3, 4, index12[i], sharex=ax0)\n', (20500, 20530), True, 'from matplotlib import pyplot as plt\n'), ((21014, 21036), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.0, 26.0]'], {}), '([-0.0, 26.0])\n', (21022, 21036), True, 'from matplotlib import pyplot as plt\n'), ((21071, 21094), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-26.0, 26.0]'], {}), '([-26.0, 26.0])\n', (21079, 21094), True, 'from matplotlib import pyplot as plt\n'), ((21337, 21366), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {}), '(3, 4, index12[i])\n', (21348, 21366), True, 'from matplotlib import pyplot as plt\n'), ((21401, 21442), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {'sharex': 'ax0'}), '(3, 4, index12[i], sharex=ax0)\n', (21412, 21442), True, 'from matplotlib import pyplot as plt\n'), ((22781, 22899), 'matplotlib.pyplot.plot', 'plt.plot', (['(log_t_ref + i * self.dt)', 'self.planner_xref[i, j, :]'], {'linestyle': '"""--"""', 'marker': '"""x"""', 'color': '"""g"""', 'linewidth': '(2)'}), "(log_t_ref + i * self.dt, self.planner_xref[i, j, :], linestyle=\n '--', marker='x', color='g', linewidth=2)\n", (22789, 22899), True, 'from matplotlib import pyplot as plt\n'), ((23953, 24075), 'matplotlib.pyplot.plot', 'plt.plot', (['(log_t_ref + i * self.dt)', 'self.planner_xref[i, j + 6, :]'], {'linestyle': '"""--"""', 'marker': '"""x"""', 'color': '"""g"""', 'linewidth': '(2)'}), "(log_t_ref + i * self.dt, self.planner_xref[i, j + 6, :], linestyle\n ='--', marker='x', color='g', linewidth=2)\n", (23961, 24075), True, 'from matplotlib import pyplot as plt\n'), ((24887, 24916), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {}), '(3, 4, index12[i])\n', (24898, 24916), True, 'from matplotlib import pyplot as plt\n'), ((24951, 24992), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'index12[i]'], {'sharex': 'ax0'}), '(3, 4, index12[i], sharex=ax0)\n', (24962, 24992), True, 'from matplotlib import pyplot as plt\n'), ((25476, 25498), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.0, 26.0]'], {}), '([-0.0, 26.0])\n', (25484, 25498), True, 'from matplotlib import pyplot as plt\n'), ((25533, 25556), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-26.0, 26.0]'], {}), '([-26.0, 26.0])\n', (25541, 25556), True, 'from matplotlib import pyplot as plt\n'), ((25824, 25848), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(i + 1)'], {}), '(1, 4, i + 1)\n', (25835, 25848), True, 'from matplotlib import pyplot as plt\n'), ((25881, 25917), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(i + 1)'], {'sharex': 'ax0'}), '(1, 4, i + 1, sharex=ax0)\n', (25892, 25917), True, 'from matplotlib import pyplot as plt\n'), ((25999, 26116), 'matplotlib.pyplot.plot', 'plt.plot', (['(log_t_pred + k * self.dt)', 'self.mpc_x_f[k, 12 + (3 * i + 2), :]'], {'linestyle': '"""--"""', 'marker': '"""x"""', 'linewidth': '(2)'}), "(log_t_pred + k * self.dt, self.mpc_x_f[k, 12 + (3 * i + 2), :],\n linestyle='--', marker='x', linewidth=2)\n", (26007, 26116), True, 'from matplotlib import pyplot as plt\n'), ((27028, 27052), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (27039, 27052), True, 'from matplotlib import pyplot as plt\n'), ((27085, 27121), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {'sharex': 'ax0'}), '(3, 4, i + 1, sharex=ax0)\n', (27096, 27121), True, 'from matplotlib import pyplot as plt\n'), ((28270, 28294), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (28281, 28294), True, 'from matplotlib import pyplot as plt\n'), ((28327, 28363), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {'sharex': 'ax0'}), '(3, 4, i + 1, sharex=ax0)\n', (28338, 28363), True, 'from matplotlib import pyplot as plt\n'), ((29423, 29437), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (29435, 29437), True, 'from datetime import datetime as datetime\n'), ((36649, 36670), 'numpy.sin', 'np.sin', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (36655, 36670), True, 'import numpy as np\n'), ((36916, 36934), 'numpy.max', 'np.max', (['log_t_pred'], {}), '(log_t_pred)\n', (36922, 36934), True, 'import numpy as np\n'), ((36936, 36953), 'numpy.max', 'np.max', (['log_t_ref'], {}), '(log_t_ref)\n', (36942, 36953), True, 'import numpy as np\n'), ((39450, 39492), 'numpy.round', 'np.round', (['(val / (self.dt * 10))'], {'decimals': '(0)'}), '(val / (self.dt * 10), decimals=0)\n', (39458, 39492), True, 'import numpy as np\n'), ((39532, 39579), 'numpy.round', 'np.round', (['(time_slider.val / self.dt)'], {'decimals': '(0)'}), '(time_slider.val / self.dt, decimals=0)\n', (39540, 39579), True, 'import numpy as np\n'), ((40486, 40528), 'numpy.round', 'np.round', (['(val / (self.dt * 10))'], {'decimals': '(0)'}), '(val / (self.dt * 10), decimals=0)\n', (40494, 40528), True, 'import numpy as np\n'), ((40568, 40619), 'numpy.round', 'np.round', (['(time_slider_vel.val / self.dt)'], {'decimals': '(0)'}), '(time_slider_vel.val / self.dt, decimals=0)\n', (40576, 40619), True, 'import numpy as np\n'), ((42324, 42344), 'pinocchio.Quaternion', 'pin.Quaternion', (['quat'], {}), '(quat)\n', (42338, 42344), True, 'import pinocchio as pin\n'), ((43205, 43247), 'numpy.round', 'np.round', (['(val / (self.dt * 10))'], {'decimals': '(0)'}), '(val / (self.dt * 10), decimals=0)\n', (43213, 43247), True, 'import numpy as np\n'), ((43287, 43334), 'numpy.round', 'np.round', (['(time_slider.val / self.dt)'], {'decimals': '(0)'}), '(time_slider.val / self.dt, decimals=0)\n', (43295, 43334), True, 'import numpy as np\n'), ((14458, 14528), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.loop_pos_virtual_world[:, 2]', '"""b"""'], {'linewidth': '(3)'}), "(t_range, self.loop_pos_virtual_world[:, 2], 'b', linewidth=3)\n", (14466, 14528), True, 'from matplotlib import pyplot as plt\n'), ((14545, 14615), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.loop_pos_virtual_world[:, 2]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.loop_pos_virtual_world[:, 2], 'r', linewidth=3)\n", (14553, 14615), True, 'from matplotlib import pyplot as plt\n'), ((14650, 14713), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.planner_xref[:, i, 0]', '"""b"""'], {'linewidth': '(2)'}), "(t_range, self.planner_xref[:, i, 0], 'b', linewidth=2)\n", (14658, 14713), True, 'from matplotlib import pyplot as plt\n'), ((14730, 14793), 'matplotlib.pyplot.plot', 'plt.plot', (['t_range', 'self.planner_xref[:, i, 1]', '"""r"""'], {'linewidth': '(3)'}), "(t_range, self.planner_xref[:, i, 1], 'r', linewidth=3)\n", (14738, 14793), True, 'from matplotlib import pyplot as plt\n'), ((32907, 32935), 'glob.glob', 'glob.glob', (['"""data_2021_*.npz"""'], {}), "('data_2021_*.npz')\n", (32916, 32935), False, 'import glob\n'), ((43613, 43633), 'pinocchio.Quaternion', 'pin.Quaternion', (['quat'], {}), '(quat)\n', (43627, 43633), True, 'import pinocchio as pin\n'), ((11323, 11336), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (11329, 11336), True, 'import numpy as np\n'), ((11587, 11600), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (11593, 11600), True, 'import numpy as np\n'), ((12728, 12741), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (12734, 12741), True, 'import numpy as np\n'), ((12842, 12855), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (12848, 12855), True, 'import numpy as np\n'), ((13687, 13700), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (13693, 13700), True, 'import numpy as np\n'), ((40117, 40127), 'numpy.min', 'np.min', (['y1'], {}), '(y1)\n', (40123, 40127), True, 'import numpy as np\n'), ((40129, 40139), 'numpy.min', 'np.min', (['y2'], {}), '(y2)\n', (40135, 40139), True, 'import numpy as np\n'), ((40173, 40183), 'numpy.max', 'np.max', (['y1'], {}), '(y1)\n', (40179, 40183), True, 'import numpy as np\n'), ((40185, 40195), 'numpy.max', 'np.max', (['y2'], {}), '(y2)\n', (40191, 40195), True, 'import numpy as np\n'), ((41110, 41120), 'numpy.min', 'np.min', (['y1'], {}), '(y1)\n', (41116, 41120), True, 'import numpy as np\n'), ((41122, 41132), 'numpy.min', 'np.min', (['y2'], {}), '(y2)\n', (41128, 41132), True, 'import numpy as np\n'), ((41166, 41176), 'numpy.max', 'np.max', (['y1'], {}), '(y1)\n', (41172, 41176), True, 'import numpy as np\n'), ((41178, 41188), 'numpy.max', 'np.max', (['y2'], {}), '(y2)\n', (41184, 41188), True, 'import numpy as np\n'), ((11430, 11443), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (11436, 11443), True, 'import numpy as np\n'), ((11473, 11486), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (11479, 11486), True, 'import numpy as np\n'), ((11889, 11902), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (11895, 11902), True, 'import numpy as np\n'), ((11961, 11974), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (11967, 11974), True, 'import numpy as np\n'), ((13108, 13121), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (13114, 13121), True, 'import numpy as np\n'), ((12177, 12190), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (12183, 12190), True, 'import numpy as np\n'), ((12254, 12267), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (12260, 12267), True, 'import numpy as np\n'), ((13220, 13233), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (13226, 13233), True, 'import numpy as np\n'), ((13787, 13800), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (13793, 13800), True, 'import numpy as np\n'), ((12033, 12046), 'numpy.int', 'np.int', (['(i / 3)'], {}), '(i / 3)\n', (12039, 12046), True, 'import numpy as np\n')] |
from typing import Any, Tuple, Callable, Iterator
from os import path
import csv
import random
import numpy as np
from PIL import Image
import torch
from torchvision.transforms.functional import to_tensor
def make_reproducible(seed: int = 0) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def memoize(func: Callable) -> Callable:
cache = {}
def wrapper(*args: Any) -> Any:
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrapper
@memoize
def load_image(file: str) -> torch.FloatTensor:
return to_tensor(Image.open(file)).unsqueeze(0)
def load_reference() -> Iterator[Tuple[torch.FloatTensor, torch.FloatTensor, float]]:
assets_root = path.join(path.dirname(__file__), "assets")
images_root = path.join(assets_root, "images")
with open(path.join(assets_root, "reference.csv")) as csvfh:
for row in csv.DictReader(csvfh):
image1 = load_image(path.join(images_root, row["image1"]))
image2 = load_image(path.join(images_root, row["image2"]))
score = float(row["score"])
yield image1, image2, score
| [
"torch.manual_seed",
"csv.DictReader",
"PIL.Image.open",
"os.path.join",
"random.seed",
"os.path.dirname",
"numpy.random.seed"
] | [((257, 274), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (268, 274), False, 'import random\n'), ((279, 299), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (293, 299), True, 'import numpy as np\n'), ((304, 327), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (321, 327), False, 'import torch\n'), ((906, 938), 'os.path.join', 'path.join', (['assets_root', '"""images"""'], {}), "(assets_root, 'images')\n", (915, 938), False, 'from os import path\n'), ((854, 876), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (866, 876), False, 'from os import path\n'), ((1024, 1045), 'csv.DictReader', 'csv.DictReader', (['csvfh'], {}), '(csvfh)\n', (1038, 1045), False, 'import csv\n'), ((954, 993), 'os.path.join', 'path.join', (['assets_root', '"""reference.csv"""'], {}), "(assets_root, 'reference.csv')\n", (963, 993), False, 'from os import path\n'), ((707, 723), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (717, 723), False, 'from PIL import Image\n'), ((1079, 1116), 'os.path.join', 'path.join', (['images_root', "row['image1']"], {}), "(images_root, row['image1'])\n", (1088, 1116), False, 'from os import path\n'), ((1150, 1187), 'os.path.join', 'path.join', (['images_root', "row['image2']"], {}), "(images_root, row['image2'])\n", (1159, 1187), False, 'from os import path\n')] |
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-4, 4, num=20)
y1 = x
y2 = -y1
y3 = y1**2
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
ax.scatter(x=x, y=y1, marker="v", s=1000)
ax.scatter(x=x, y=y2, marker="X", s=100)
ax.scatter(x=x, y=y3, marker="s", s=10)
plt.tight_layout()
plt.savefig('markers.svg', bbox_inches='tight')
plt.show() | [
"matplotlib.pyplot.savefig",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show"
] | [((57, 83), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)'], {'num': '(20)'}), '(-4, 4, num=20)\n', (68, 83), True, 'import numpy as np\n'), ((119, 145), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (129, 145), True, 'import matplotlib.pyplot as plt\n'), ((296, 314), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (312, 314), True, 'import matplotlib.pyplot as plt\n'), ((315, 362), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""markers.svg"""'], {'bbox_inches': '"""tight"""'}), "('markers.svg', bbox_inches='tight')\n", (326, 362), True, 'import matplotlib.pyplot as plt\n'), ((363, 373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (371, 373), True, 'import matplotlib.pyplot as plt\n')] |
#Write by <NAME>, contact: <EMAIL>
# -*- coding: utf-8 -*-
## use GPU
import os
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES']='0'
config=tf.ConfigProto()
config.gpu_options.allow_growth= True
sess=tf.Session(config=config)
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adam, SGD, Adadelta, RMSprop, Nadam
from sklearn import metrics, preprocessing
from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ssrn_SS_Houston_3FF_F1
import h5py
from keras.models import load_model
from keras.utils.vis_utils import plot_model
def sampling1(trainlabels, testlabels):
labels_loc = {}
train_indices=[]
test_indices=[]
m={}
m=np.max(trainlabels[:])
for i in range(m):
indices = [j for j, x in enumerate(trainlabels.ravel().tolist()) if x == i + 1]
labels_loc[i] = indices
train_indices += labels_loc[i]
for i in range(m):
indices = [j for j, x in enumerate(testlabels.ravel().tolist()) if x == i + 1]
labels_loc[i] = indices
test_indices += labels_loc[i]
return train_indices, test_indices
def sampling(proptionVal, groundTruth): #divide dataset into train and test datasets
labels_loc = {}
train = {}
test = {}
m = max(groundTruth)
for i in range(m):
indices = [j for j, x in enumerate(groundTruth.ravel().tolist()) if x == i + 1]
np.random.shuffle(indices)
labels_loc[i] = indices
nb_val = int(proptionVal * len(indices))
train[i] = indices[:-nb_val]
test[i] = indices[-nb_val:]
# whole_indices = []
train_indices = []
test_indices = []
for i in range(m):
# whole_indices += labels_loc[i]
train_indices += train[i]
test_indices += test[i]
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
return train_indices, test_indices
def indexToAssignment(index_, Row, Col, pad_length):
new_assign = {}
for counter, value in enumerate(index_):
assign_0 = value // Col + pad_length
assign_1 = value % Col + pad_length
new_assign[counter] = [assign_0, assign_1]
return new_assign
def assignmentToIndex( assign_0, assign_1, Row, Col):
new_index = assign_0 * Col + assign_1
return new_index
def selectNeighboringPatch(matrix, pos_row, pos_col, ex_len):
selected_rows = matrix[range(pos_row-ex_len,pos_row+ex_len+1), :]
selected_patch = selected_rows[:, range(pos_col-ex_len, pos_col+ex_len+1)]
return selected_patch
def classification_map(map, groundTruth, dpi, savePath):
fig = plt.figure(frameon=False)
fig.set_size_inches(groundTruth.shape[1]*2.0/dpi, groundTruth.shape[0]*2.0/dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.add_axes(ax)
ax.imshow(map, aspect='equal')
fig.savefig(savePath, dpi = dpi)
return 0
def res4_model_ss():
model_res4 = ssrn_SS_Houston_3FF_F1.ResnetBuilder.build_resnet_2_2((1, img_rows, img_cols, img_channels), nb_classes)
RMS = RMSprop(lr=0.0003)
# Let's train the model using RMSprop
model_res4.compile(loss='categorical_crossentropy', optimizer=RMS, metrics=['accuracy'])
return model_res4
######### Load data HSI ########
mat_LiDAR = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI.mat')
data_Houston_HSI = mat_LiDAR['HSI']
mat_data = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat')
trainlabels = mat_data['trainlabels']
testlabels = mat_data['testlabels']
del mat_data, mat_LiDAR
######### Load data HSI_EPLBP ########
file=h5py.File('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI_EPLBP.mat','r')
file.keys()
data = file['HSI_EPLBP'][:]
data_Houston_HSIEPLBP=data.transpose(2,1,0);
file.close()
mat_data = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat')
trainlabels = mat_data['trainlabels']
testlabels = mat_data['testlabels']
del mat_data, data
######### Load data LiDAR_EPLBP ########
mat_LiDAR = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/LiDAR_DSM_EPLBP.mat')
data_Houston_LiDAREPLBP = mat_LiDAR['LiDAR_DSM_EPLBP']
mat_data = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat')
trainlabels = mat_data['trainlabels']
testlabels = mat_data['testlabels']
del mat_data, mat_LiDAR
######### Training parameter setting ##########
batch_size = 16 #sample number of each batch
nb_classes = 15 #class number
nb_epoch = 200 #epoch
img_rows, img_cols = 7, 7
patience = 200
PATCH_LENGTH = 3 #Patch_size
TEST_SIZE = 12197
TRAIN_SIZE = 2832
TOTAL_SIZE = TRAIN_SIZE+TEST_SIZE
img_channels_HSI = 144
img_channels_HSIEPLBP = 815
img_channels_LiDAREPLBP = 134
CATEGORY = 15
ALL_SIZE = data_Houston_HSI.shape[0] * data_Houston_HSI.shape[1]
######### Data normalization ########
data = data_Houston_HSI.reshape(np.prod(data_Houston_HSI.shape[:2]),np.prod(data_Houston_HSI.shape[2:]))# 3D to 2D
data = preprocessing.scale(data) #normalization
whole_data_HSI = data.reshape(data_Houston_HSI.shape[0], data_Houston_HSI.shape[1],data_Houston_HSI.shape[2])
padded_data_HSI = zeroPadding.zeroPadding_3D(whole_data_HSI, PATCH_LENGTH)
del data,data_Houston_HSI
data = data_Houston_HSIEPLBP.reshape(np.prod(data_Houston_HSIEPLBP.shape[:2]),np.prod(data_Houston_HSIEPLBP.shape[2:]))# 3维矩阵转换为2维矩阵
data = preprocessing.scale(data) #normalization
whole_data_HSIEPLBP = data.reshape(data_Houston_HSIEPLBP.shape[0], data_Houston_HSIEPLBP.shape[1],data_Houston_HSIEPLBP.shape[2])
padded_data_HSIEPLBP = zeroPadding.zeroPadding_3D(whole_data_HSIEPLBP, PATCH_LENGTH)
del data,data_Houston_HSIEPLBP
data = data_Houston_LiDAREPLBP.reshape(np.prod(data_Houston_LiDAREPLBP.shape[:2]),np.prod(data_Houston_LiDAREPLBP.shape[2:]))# 3维矩阵转换为2维矩阵
data = preprocessing.scale(data) #normalization
whole_data_LiDAREPLBP = data.reshape(data_Houston_LiDAREPLBP.shape[0], data_Houston_LiDAREPLBP.shape[1],data_Houston_LiDAREPLBP.shape[2])
padded_data_LiDAREPLBP = zeroPadding.zeroPadding_3D(whole_data_LiDAREPLBP, PATCH_LENGTH)
del data,data_Houston_LiDAREPLBP
############ Full image mapping and model reading ############
best_weights_RES_path_ss4 = ('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/models/Houston/3DFF/Houston_3FF_7-7_2-2_24_0.0003.hdf5')
model=load_model(best_weights_RES_path_ss4)
##Grouping the testing samples
n=60
group=ALL_SIZE//n
group_last=ALL_SIZE%n
Group=[]
for i in range(n+1):
if i==0:
Group.append(range(group))
elif i!= n and i > 0:
Group.append(range(group*i,group*(i+1)))
elif i==n:
Group.append(range(group*i,group*i+group_last))
GROUP=[]
for i in range(n+1):
if i!= n:
GROUP.append(group)
elif i==n:
GROUP.append(group_last)
##Predict each set of test samples. imagemap is the final map.
imagemap=[]
imageprob=[]
for i in range(len(Group)):
print(i)
all_data_HSI = np.zeros((GROUP[i], 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels_HSI))
all_data_HSIEPLBP = np.zeros((GROUP[i], 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels_HSIEPLBP))
all_data_LiDAREPLBP = np.zeros((GROUP[i], 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels_LiDAREPLBP))
all_assign = indexToAssignment(Group[i], whole_data_HSI.shape[0], whole_data_HSI.shape[1], PATCH_LENGTH)
for j in range(len(all_assign)):
all_data_HSI[j] = selectNeighboringPatch(padded_data_HSI, all_assign[j][0], all_assign[j][1], PATCH_LENGTH)
all_data_HSIEPLBP[j] = selectNeighboringPatch(padded_data_HSIEPLBP, all_assign[j][0], all_assign[j][1], PATCH_LENGTH)
all_data_LiDAREPLBP[j] = selectNeighboringPatch(padded_data_LiDAREPLBP, all_assign[j][0], all_assign[j][1], PATCH_LENGTH)
prob_image = model.predict(
[all_data_HSI.reshape(all_data_HSI.shape[0], all_data_HSI.shape[1], all_data_HSI.shape[2], all_data_HSI.shape[3], 1),
all_data_HSIEPLBP.reshape(all_data_HSIEPLBP.shape[0], all_data_HSIEPLBP.shape[1], all_data_HSIEPLBP.shape[2], all_data_HSIEPLBP.shape[3], 1),
all_data_LiDAREPLBP.reshape(all_data_LiDAREPLBP.shape[0], all_data_LiDAREPLBP.shape[1], all_data_LiDAREPLBP.shape[2], all_data_LiDAREPLBP.shape[3], 1)])
pred_image=prob_image.argmax(axis=1)
imageprob=imageprob+[prob_image]
imagemap=imagemap+[pred_image]
adict={}
adict['imageprob']=imageprob
adict['imagemap']=imagemap
sio.savemat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/records/Houston/map/3FF_map.mat',adict)
| [
"numpy.prod",
"keras.models.load_model",
"scipy.io.savemat",
"Utils.zeroPadding.zeroPadding_3D",
"tensorflow.Session",
"scipy.io.loadmat",
"matplotlib.pyplot.Axes",
"h5py.File",
"numpy.max",
"Utils.ssrn_SS_Houston_3FF_F1.ResnetBuilder.build_resnet_2_2",
"matplotlib.pyplot.figure",
"numpy.zeros... | [((151, 167), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (165, 167), True, 'import tensorflow as tf\n'), ((211, 236), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (221, 236), True, 'import tensorflow as tf\n'), ((3432, 3519), 'scipy.io.loadmat', 'sio.loadmat', (['"""/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI.mat"""'], {}), "(\n '/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI.mat')\n", (3443, 3519), True, 'import scipy.io as sio\n'), ((3562, 3661), 'scipy.io.loadmat', 'sio.loadmat', (['"""/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat"""'], {}), "(\n '/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat'\n )\n", (3573, 3661), True, 'import scipy.io as sio\n'), ((3795, 3896), 'h5py.File', 'h5py.File', (['"""/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI_EPLBP.mat"""', '"""r"""'], {}), "(\n '/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI_EPLBP.mat'\n , 'r')\n", (3804, 3896), False, 'import h5py\n'), ((3995, 4094), 'scipy.io.loadmat', 'sio.loadmat', (['"""/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat"""'], {}), "(\n '/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat'\n )\n", (4006, 4094), True, 'import scipy.io as sio\n'), ((4232, 4336), 'scipy.io.loadmat', 'sio.loadmat', (['"""/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/LiDAR_DSM_EPLBP.mat"""'], {}), "(\n '/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/LiDAR_DSM_EPLBP.mat'\n )\n", (4243, 4336), True, 'import scipy.io as sio\n'), ((4393, 4492), 'scipy.io.loadmat', 'sio.loadmat', (['"""/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat"""'], {}), "(\n '/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat'\n )\n", (4404, 4492), True, 'import scipy.io as sio\n'), ((5213, 5238), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['data'], {}), '(data)\n', (5232, 5238), False, 'from sklearn import metrics, preprocessing\n'), ((5382, 5438), 'Utils.zeroPadding.zeroPadding_3D', 'zeroPadding.zeroPadding_3D', (['whole_data_HSI', 'PATCH_LENGTH'], {}), '(whole_data_HSI, PATCH_LENGTH)\n', (5408, 5438), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ssrn_SS_Houston_3FF_F1\n'), ((5606, 5631), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['data'], {}), '(data)\n', (5625, 5631), False, 'from sklearn import metrics, preprocessing\n'), ((5800, 5861), 'Utils.zeroPadding.zeroPadding_3D', 'zeroPadding.zeroPadding_3D', (['whole_data_HSIEPLBP', 'PATCH_LENGTH'], {}), '(whole_data_HSIEPLBP, PATCH_LENGTH)\n', (5826, 5861), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ssrn_SS_Houston_3FF_F1\n'), ((6040, 6065), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['data'], {}), '(data)\n', (6059, 6065), False, 'from sklearn import metrics, preprocessing\n'), ((6244, 6307), 'Utils.zeroPadding.zeroPadding_3D', 'zeroPadding.zeroPadding_3D', (['whole_data_LiDAREPLBP', 'PATCH_LENGTH'], {}), '(whole_data_LiDAREPLBP, PATCH_LENGTH)\n', (6270, 6307), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ssrn_SS_Houston_3FF_F1\n'), ((6543, 6580), 'keras.models.load_model', 'load_model', (['best_weights_RES_path_ss4'], {}), '(best_weights_RES_path_ss4)\n', (6553, 6580), False, 'from keras.models import load_model\n'), ((8623, 8729), 'scipy.io.savemat', 'sio.savemat', (['"""/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/records/Houston/map/3FF_map.mat"""', 'adict'], {}), "(\n '/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/records/Houston/map/3FF_map.mat'\n , adict)\n", (8634, 8729), True, 'import scipy.io as sio\n'), ((789, 811), 'numpy.max', 'np.max', (['trainlabels[:]'], {}), '(trainlabels[:])\n', (795, 811), True, 'import numpy as np\n'), ((1894, 1926), 'numpy.random.shuffle', 'np.random.shuffle', (['train_indices'], {}), '(train_indices)\n', (1911, 1926), True, 'import numpy as np\n'), ((1931, 1962), 'numpy.random.shuffle', 'np.random.shuffle', (['test_indices'], {}), '(test_indices)\n', (1948, 1962), True, 'import numpy as np\n'), ((2708, 2733), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (2718, 2733), True, 'import matplotlib.pyplot as plt\n'), ((2828, 2863), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (2836, 2863), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3201), 'Utils.ssrn_SS_Houston_3FF_F1.ResnetBuilder.build_resnet_2_2', 'ssrn_SS_Houston_3FF_F1.ResnetBuilder.build_resnet_2_2', (['(1, img_rows, img_cols, img_channels)', 'nb_classes'], {}), '((1, img_rows,\n img_cols, img_channels), nb_classes)\n', (3146, 3201), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ssrn_SS_Houston_3FF_F1\n'), ((3209, 3227), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.0003)'}), '(lr=0.0003)\n', (3216, 3227), False, 'from keras.optimizers import Adam, SGD, Adadelta, RMSprop, Nadam\n'), ((5123, 5158), 'numpy.prod', 'np.prod', (['data_Houston_HSI.shape[:2]'], {}), '(data_Houston_HSI.shape[:2])\n', (5130, 5158), True, 'import numpy as np\n'), ((5159, 5194), 'numpy.prod', 'np.prod', (['data_Houston_HSI.shape[2:]'], {}), '(data_Houston_HSI.shape[2:])\n', (5166, 5194), True, 'import numpy as np\n'), ((5503, 5543), 'numpy.prod', 'np.prod', (['data_Houston_HSIEPLBP.shape[:2]'], {}), '(data_Houston_HSIEPLBP.shape[:2])\n', (5510, 5543), True, 'import numpy as np\n'), ((5544, 5584), 'numpy.prod', 'np.prod', (['data_Houston_HSIEPLBP.shape[2:]'], {}), '(data_Houston_HSIEPLBP.shape[2:])\n', (5551, 5584), True, 'import numpy as np\n'), ((5933, 5975), 'numpy.prod', 'np.prod', (['data_Houston_LiDAREPLBP.shape[:2]'], {}), '(data_Houston_LiDAREPLBP.shape[:2])\n', (5940, 5975), True, 'import numpy as np\n'), ((5976, 6018), 'numpy.prod', 'np.prod', (['data_Houston_LiDAREPLBP.shape[2:]'], {}), '(data_Houston_LiDAREPLBP.shape[2:])\n', (5983, 6018), True, 'import numpy as np\n'), ((7152, 7238), 'numpy.zeros', 'np.zeros', (['(GROUP[i], 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1, img_channels_HSI)'], {}), '((GROUP[i], 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1,\n img_channels_HSI))\n', (7160, 7238), True, 'import numpy as np\n'), ((7254, 7345), 'numpy.zeros', 'np.zeros', (['(GROUP[i], 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1, img_channels_HSIEPLBP)'], {}), '((GROUP[i], 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1,\n img_channels_HSIEPLBP))\n', (7262, 7345), True, 'import numpy as np\n'), ((7363, 7456), 'numpy.zeros', 'np.zeros', (['(GROUP[i], 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1, img_channels_LiDAREPLBP)'], {}), '((GROUP[i], 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1,\n img_channels_LiDAREPLBP))\n', (7371, 7456), True, 'import numpy as np\n'), ((1511, 1537), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1528, 1537), True, 'import numpy as np\n')] |
"""
This file to define Estimate various parameters
"""
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib.pyplot as plt
import networkx as nx
from pyproj import Proj
from pyproj import Proj, transform
# trasforming latlong into mercetor coordinates
def tran(data):
utmxy = {}
rx=[]
ry=[]
for index, row in data.iterrows():
x = row['x']
y = row['y']
inProj = Proj(init='epsg:4326')
outProj = Proj(init='epsg:3395')
cz = transform(inProj,outProj,x,y)
r1=cz[0]
r2=cz[1]
rx.append(r1)
ry.append(r2)
rx=np.array(rx)
ry=np.array(ry)
return rx,ry
# data1 and data2 should be node and link, respectively
def Length(data1,data2):
dist=[]
for index, row in data2.iterrows():
sp=data1[data1.id==row['start_node']]
start_x, start_y = (list(sp.x),list(sp.y))
ep=data1[data1.id==row['end_node']]
end_x,end_y=(list(ep.x),list(ep.y))
lal = distance.euclidean((start_x, start_y),(end_x,end_y))
dist.append(lal)
dist=np.array(dist)
return dist
# data1 and data2 should be node and link, respectively
def C_Check(data):
maint=[]
for index,row in data.iterrows():
if (row['ind']>=0.5) & (row['dl']>0):
mm=2
elif (row['ind']<0.5) & (row['dl']>0):
mm=1
else:
mm=0
maint.append(mm)
maint=np.array(maint)
return maint
def C_Est(data):
cost=[]
for index,row in data.iterrows():
if (row['MA']==1):
mm=row['repair_C']*row['nNoB']
elif (row['MA']==2):
mm=row['replace_C']
else:
mm=0
cost.append(mm)
cost=np.array(cost)
return cost
def T_cost(data,pipe_cost=None):
"""
"""
nrpr = []
nrpl = []
network_cost = 0
if pipe_cost is None:
diameter = [4, 6, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30, 32, 34, 36] # inch
rpl = [600, 630, 675, 750, 825, 1200, 1950, 2400, 2700, 3450, 4350, 4650, 5250, 5700, 6300] # replace cost/m
rpr = [400, 420, 450, 500, 550, 800, 1300, 1600, 1800, 2300, 2900, 3100, 3500, 3800, 4200] # repair cost
# diameter = np.array(diameter)*0.0254 # m
repair_cost = pd.Series(rpr,diameter)
replace_cost = pd.Series(rpl,diameter)
# Pipe construction cost
for index, row in data.iterrows():
dia = row['dia']
length=row['link_m']
idxrpr = np.argmin([np.abs(repair_cost.index - dia)])
idxrpl = np.argmin([np.abs(replace_cost.index - dia)])
#print(link_name, pipe_cost.iloc[idx], link.length)
repair_C = network_cost + repair_cost.iloc[idxrpr]
replace_C = network_cost + replace_cost.iloc[idxrpl]*length
nrpr.append(repair_C)
nrpl.append(replace_C)
nrpr = np.array(nrpr)
nrpl = np.array(nrpl)
return nrpr,nrpl
| [
"pandas.Series",
"numpy.abs",
"pyproj.transform",
"numpy.array",
"pyproj.Proj",
"scipy.spatial.distance.euclidean"
] | [((639, 651), 'numpy.array', 'np.array', (['rx'], {}), '(rx)\n', (647, 651), True, 'import numpy as np\n'), ((659, 671), 'numpy.array', 'np.array', (['ry'], {}), '(ry)\n', (667, 671), True, 'import numpy as np\n'), ((1112, 1126), 'numpy.array', 'np.array', (['dist'], {}), '(dist)\n', (1120, 1126), True, 'import numpy as np\n'), ((1466, 1481), 'numpy.array', 'np.array', (['maint'], {}), '(maint)\n', (1474, 1481), True, 'import numpy as np\n'), ((1765, 1779), 'numpy.array', 'np.array', (['cost'], {}), '(cost)\n', (1773, 1779), True, 'import numpy as np\n'), ((2922, 2936), 'numpy.array', 'np.array', (['nrpr'], {}), '(nrpr)\n', (2930, 2936), True, 'import numpy as np\n'), ((2948, 2962), 'numpy.array', 'np.array', (['nrpl'], {}), '(nrpl)\n', (2956, 2962), True, 'import numpy as np\n'), ((447, 469), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (451, 469), False, 'from pyproj import Proj, transform\n'), ((488, 510), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:3395"""'}), "(init='epsg:3395')\n", (492, 510), False, 'from pyproj import Proj, transform\n'), ((524, 556), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'x', 'y'], {}), '(inProj, outProj, x, y)\n', (533, 556), False, 'from pyproj import Proj, transform\n'), ((1025, 1079), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['(start_x, start_y)', '(end_x, end_y)'], {}), '((start_x, start_y), (end_x, end_y))\n', (1043, 1079), False, 'from scipy.spatial import distance\n'), ((2328, 2352), 'pandas.Series', 'pd.Series', (['rpr', 'diameter'], {}), '(rpr, diameter)\n', (2337, 2352), True, 'import pandas as pd\n'), ((2383, 2407), 'pandas.Series', 'pd.Series', (['rpl', 'diameter'], {}), '(rpl, diameter)\n', (2392, 2407), True, 'import pandas as pd\n'), ((2566, 2597), 'numpy.abs', 'np.abs', (['(repair_cost.index - dia)'], {}), '(repair_cost.index - dia)\n', (2572, 2597), True, 'import numpy as np\n'), ((2628, 2660), 'numpy.abs', 'np.abs', (['(replace_cost.index - dia)'], {}), '(replace_cost.index - dia)\n', (2634, 2660), True, 'import numpy as np\n')] |
"""
Operative functions to be run into the SA_algorithm.
Creation of initial value, definition of 2-D movements and dedicated
domain boundaries conditions, optimization methods and stopping criteria
are listed below.
"""
import numpy as np
from numpy import random as rnd
#-------Neighbour generation----------#
def boltz_move(state, temp, interval):
"""
The concept of "move" is here presented: the function defines the steps, whose
magnitude and directions are expressed as the square root of <<current>> temperature
and through random values of an uniform distribution respectively.
Parameters
----------
new_state: list
It labels x and y axes.
n: float
Random index for move direction choice: positive for n<0.5, negative otherwise.
Returns
----------
new_state: list
The actual position is updated to the new state after addition/subtraction operations.
The new state is inside the function domain (see <<clip>>).
"""
new_state = [0,0]
n = rnd.random()
if n < 0.5 :
new_state[0] = state[0] + np.sqrt(temp)
new_state[1] = state[1] + np.sqrt(temp)
return (clip(new_state[0], interval, state[0]),
clip(new_state[1], interval, state[1]))
else :
new_state[0] = state[0] - np.sqrt(temp)
new_state[1] = state[1] - np.sqrt(temp)
return (clip(new_state[0], interval, state[0]),
clip(new_state[1], interval, state[1]))
def clip(x, interval, state):
"""
<<Clip>> function allows for confinement of the moves inside the domain.
If x is not in interval,
return a point chosen uniformly at random between the violated boundary
and the previous state; otherwise return x.
Parameters
----------
x: float
Number to be clipped.
interval: list-like
Estremes of a given interval.
state: float
Current position
"""
a,b = interval
if x < a :
return rnd.uniform(a, state)
if x > b :
return rnd.uniform(state, b)
else:
return x
#-----------Acceptance function--------------#
def boltz_acceptance_prob(energy, new_energy, temperature):
"""
Boltzmann Annealing: the function generates a probability value to be considered
when deciding whether if it's convenient or not for the transition to occur.
It returns a float in the ]0,1] interval.
Parameters
----------
energy: float
Gibbs free energy of the current state at given temperature.
new_energy: float
Gibbs free energy of the hypothetic next state at given temperature.
temperature: float
Self-explained variable to be fixed and/or changed via parsing in order to modify the
jump length.
"""
delta_e = new_energy - energy
if delta_e < 0 :
return 1
else:
return np.exp(- delta_e / temperature)
#-----------Cooling Procedure---------------#
#Other cooling methods exist and can be found in references [1], [2]
def geom_cooling(temp, alpha = 0.95):
"""
Geometric temperature decreasing procedure allows for temperature variations
after every iteration (k).
It returns a value of temperature such that
T(k+1)=alpha*(T)
Parameters
----------
temp: float
Current temperature.
alpha: float
Fixed geometric ratio.
"""
return temp * alpha
#-----------Stopping Conditions------------#
def tolerance(energies, tolerance, tolerance_iter) :
"""
The algorithm runs until the average change in value of the objective function
is less than the tolerance.
"""
if len(energies) <= tolerance_iter :
return False
if avg_last_k_value(energies, tolerance_iter) < tolerance :
return True
else :
return False
def objective_limit(energy, limit):
"""
The algorithm stops as soon as the current objective function value
is less or equal then limit.
"""
if energy <= limit :
return True
else :
return False
def avg_last_k_value(energies, k):
"""
Compute the average of the last k absolute differences between the values of a list.
"""
diff = []
L = len(energies)
for i in range(L - 1,L - (k+1),-1):
diff.append(abs(energies[i]-energies[i-1]))
return np.mean(diff)
| [
"numpy.mean",
"numpy.sqrt",
"numpy.random.random",
"numpy.exp",
"numpy.random.uniform"
] | [((1106, 1118), 'numpy.random.random', 'rnd.random', ([], {}), '()\n', (1116, 1118), True, 'from numpy import random as rnd\n'), ((4595, 4608), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (4602, 4608), True, 'import numpy as np\n'), ((2132, 2153), 'numpy.random.uniform', 'rnd.uniform', (['a', 'state'], {}), '(a, state)\n', (2143, 2153), True, 'from numpy import random as rnd\n'), ((2188, 2209), 'numpy.random.uniform', 'rnd.uniform', (['state', 'b'], {}), '(state, b)\n', (2199, 2209), True, 'from numpy import random as rnd\n'), ((3103, 3133), 'numpy.exp', 'np.exp', (['(-delta_e / temperature)'], {}), '(-delta_e / temperature)\n', (3109, 3133), True, 'import numpy as np\n'), ((1170, 1183), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (1177, 1183), True, 'import numpy as np\n'), ((1218, 1231), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (1225, 1231), True, 'import numpy as np\n'), ((1390, 1403), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (1397, 1403), True, 'import numpy as np\n'), ((1438, 1451), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (1445, 1451), True, 'import numpy as np\n')] |
import os
import sys
from typing import List, Tuple
import kaggle
import zipfile
import cv2
from matplotlib import pyplot as plt
import numpy as np
from pandas import DataFrame
from torch.tensor import Tensor
from torchvision import transforms
# from cn.protect import Protect
# from cn.protect.privacy import KAnonymity
from logger import logPrint
from datasetLoaders.DatasetLoader import DatasetLoader
from datasetLoaders.DatasetInterface import DatasetInterface
import cv2
import os
###############
######## GO HERE: https://www.kaggle.com/madz2000/pneumonia-detection-using-cnn-92-6-accuracy
###############
class DatasetLoaderPneumonia(DatasetLoader):
def __init__(self, dim=(224, 224), assembleDatasets=True):
self.assembleDatasets = assembleDatasets
self.dim = dim
self.dataPath = "./data/Pneumonia"
self.fullPath = self.dataPath + "/chest_xray"
self.testPath = self.fullPath + "/test"
self.trainPath = self.fullPath + "/train"
self.labels = {"PNEUMONIA": 0, "NORMAL": 1}
self.img_size = 150
self.__download_data()
def getDatasets(self, percUsers, labels, size=None):
logPrint("Loading Pneumonia Dataset...")
self._setRandomSeeds()
data = self.__loadPneumoniaData()
trainDataframe, testDataframe = self._filterDataByLabel(labels, *data)
logPrint("Splitting datasets over clients...")
clientDatasets = self._splitTrainDataIntoClientDatasets(
percUsers, trainDataframe, self.PneumoniaDataset
)
testDataset = self.PneumoniaDataset(testDataframe, isTestDataset=True)
return clientDatasets, testDataset
def __loadPneumoniaData(self) -> Tuple[DataFrame, DataFrame]:
if self.__datasetNotFound():
logPrint(
"Can't find train|test split files or "
"/train, /test files not populated accordingly."
)
sys.exit(0)
logPrint("Loading training images from files...")
trainDataframe = self.__readDataframe(self.trainPath)
logPrint("Loading testing images from files...")
testDataframe = self.__readDataframe(self.testPath)
logPrint("Finished loading files")
return trainDataframe, testDataframe
def get_img_data(self, data_dir) -> np.ndarray:
data = []
for label, class_num in self.labels.items():
path = os.path.join(data_dir, label)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(
img_arr, (self.img_size, self.img_size)
) # Reshaping images to preferred size
data.append([resized_arr, class_num])
except Exception as e:
print(e)
return np.array(data, dtype=object)
def __datasetNotFound(self) -> bool:
return (
not os.path.exists(self.fullPath)
or not os.path.exists(self.fullPath + "/test")
or not os.path.exists(self.fullPath + "/train")
)
# Might also want to check the number of files or subfolders
def __readDataframe(self, path: str) -> DataFrame:
img = self.get_img_data(path)
dataFrame = DataFrame(img, columns=["img", "labels"])
return dataFrame
def __download_data(self) -> None:
if self.__datasetNotFound():
try:
logPrint("Need to download the KAGGLE Pneumonia Detection Dataset")
os.makedirs(self.dataPath)
# Get json file from kaggle account or just manually download
kaggle.api.authenticate()
kaggle.api.dataset_download_files(
"paultimothymooney/chest-xray-pneumonia", self.dataPath
)
with zipfile.ZipFile(self.dataPath + "/chest-xray-pneumonia.zip", "r") as zip_ref:
zip_ref.extractall(self.dataPath)
except:
logPrint("Failed to get files.")
logPrint(
"You need to unzip (https://www.kaggle.com/c/rsna-pneumonia-detection-challenge) dataset to {}."
"".format(self.dataPath)
)
exit(0)
class PneumoniaDataset(DatasetInterface):
def __init__(self, dataframe: DataFrame, isTestDataset=False):
self.root = "./data/Pneumonia/" + ("test/" if isTestDataset else "train/")
self.imgs: List[np.ndarray] = dataframe["img"]
super().__init__(dataframe["labels"].values.tolist())
def __getitem__(self, index: int) -> Tuple[np.ndarray, Tensor]:
imageTensor = self.__load_image(self.imgs[index])
labelTensor = self.labels[index]
return imageTensor, labelTensor
@staticmethod
def __load_image(image: np.ndarray) -> np.ndarray:
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
# transforms.Normalize(mean=[0.5], std=[0.5]),
]
)
imageTensor = transform(image)
return imageTensor
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"zipfile.ZipFile",
"torchvision.transforms.RandomRotation",
"kaggle.api.authenticate",
"logger.logPrint",
"kaggle.api.dataset_download_files",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"numpy.array",
"sys.exit",
"pandas.Data... | [((1172, 1212), 'logger.logPrint', 'logPrint', (['"""Loading Pneumonia Dataset..."""'], {}), "('Loading Pneumonia Dataset...')\n", (1180, 1212), False, 'from logger import logPrint\n'), ((1373, 1419), 'logger.logPrint', 'logPrint', (['"""Splitting datasets over clients..."""'], {}), "('Splitting datasets over clients...')\n", (1381, 1419), False, 'from logger import logPrint\n'), ((1972, 2021), 'logger.logPrint', 'logPrint', (['"""Loading training images from files..."""'], {}), "('Loading training images from files...')\n", (1980, 2021), False, 'from logger import logPrint\n'), ((2092, 2140), 'logger.logPrint', 'logPrint', (['"""Loading testing images from files..."""'], {}), "('Loading testing images from files...')\n", (2100, 2140), False, 'from logger import logPrint\n'), ((2209, 2243), 'logger.logPrint', 'logPrint', (['"""Finished loading files"""'], {}), "('Finished loading files')\n", (2217, 2243), False, 'from logger import logPrint\n'), ((2924, 2952), 'numpy.array', 'np.array', (['data'], {'dtype': 'object'}), '(data, dtype=object)\n', (2932, 2952), True, 'import numpy as np\n'), ((3370, 3411), 'pandas.DataFrame', 'DataFrame', (['img'], {'columns': "['img', 'labels']"}), "(img, columns=['img', 'labels'])\n", (3379, 3411), False, 'from pandas import DataFrame\n'), ((1794, 1899), 'logger.logPrint', 'logPrint', (['"""Can\'t find train|test split files or /train, /test files not populated accordingly."""'], {}), '(\n "Can\'t find train|test split files or /train, /test files not populated accordingly."\n )\n', (1802, 1899), False, 'from logger import logPrint\n'), ((1951, 1962), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1959, 1962), False, 'import sys\n'), ((2433, 2462), 'os.path.join', 'os.path.join', (['data_dir', 'label'], {}), '(data_dir, label)\n', (2445, 2462), False, 'import os\n'), ((2486, 2502), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2496, 2502), False, 'import os\n'), ((3028, 3057), 'os.path.exists', 'os.path.exists', (['self.fullPath'], {}), '(self.fullPath)\n', (3042, 3057), False, 'import os\n'), ((3077, 3116), 'os.path.exists', 'os.path.exists', (["(self.fullPath + '/test')"], {}), "(self.fullPath + '/test')\n", (3091, 3116), False, 'import os\n'), ((3136, 3176), 'os.path.exists', 'os.path.exists', (["(self.fullPath + '/train')"], {}), "(self.fullPath + '/train')\n", (3150, 3176), False, 'import os\n'), ((3548, 3615), 'logger.logPrint', 'logPrint', (['"""Need to download the KAGGLE Pneumonia Detection Dataset"""'], {}), "('Need to download the KAGGLE Pneumonia Detection Dataset')\n", (3556, 3615), False, 'from logger import logPrint\n'), ((3632, 3658), 'os.makedirs', 'os.makedirs', (['self.dataPath'], {}), '(self.dataPath)\n', (3643, 3658), False, 'import os\n'), ((3754, 3779), 'kaggle.api.authenticate', 'kaggle.api.authenticate', ([], {}), '()\n', (3777, 3779), False, 'import kaggle\n'), ((3796, 3890), 'kaggle.api.dataset_download_files', 'kaggle.api.dataset_download_files', (['"""paultimothymooney/chest-xray-pneumonia"""', 'self.dataPath'], {}), "('paultimothymooney/chest-xray-pneumonia',\n self.dataPath)\n", (3829, 3890), False, 'import kaggle\n'), ((2647, 2698), 'cv2.resize', 'cv2.resize', (['img_arr', '(self.img_size, self.img_size)'], {}), '(img_arr, (self.img_size, self.img_size))\n', (2657, 2698), False, 'import cv2\n'), ((3946, 4011), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(self.dataPath + '/chest-xray-pneumonia.zip')", '"""r"""'], {}), "(self.dataPath + '/chest-xray-pneumonia.zip', 'r')\n", (3961, 4011), False, 'import zipfile\n'), ((4114, 4146), 'logger.logPrint', 'logPrint', (['"""Failed to get files."""'], {}), "('Failed to get files.')\n", (4122, 4146), False, 'from logger import logPrint\n'), ((5095, 5116), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5114, 5116), False, 'from torchvision import transforms\n'), ((5138, 5167), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (5163, 5167), False, 'from torchvision import transforms\n'), ((5189, 5222), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (5220, 5222), False, 'from torchvision import transforms\n'), ((2566, 2589), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (2578, 2589), False, 'import os\n')] |
#!/usr/bin/env python
"""Tests for `rawtools` package."""
import numpy as np
import pytest
from numpy import uint8, uint16
from rawtools import rawtools
DIMS = (4, 5)
@pytest.fixture
def slice_uint8():
"""Sample uint8 slice"""
return np.rint(np.arange(0, 20, dtype=uint8).reshape(DIMS))
@pytest.fixture
def slice_uint16():
"""Sample uint16 slice"""
return np.rint(np.arange(0, 20, dtype=uint16).reshape(DIMS))
@pytest.fixture
def slice_uint16_high_variance():
"""Sample uint16 slice with variable values"""
return np.array([-1, 0, 100, 1000, 5000, 14830, 50321, 65535, 65536], dtype=uint16)
def test_scale_uint8(slice_uint8):
"""Test scaling a unsigned 8-bit integer array to own bounds."""
from rawtools.convert import scale
xs = np.arange(0, 20, dtype=uint8).reshape(DIMS)
lbound = np.iinfo(uint8).min
ubound = np.iinfo(uint8).max
scaled_slice = scale(xs, lbound, ubound, lbound, ubound)
np.testing.assert_array_equal(scaled_slice, slice_uint8)
def test_scale_uint16_to_uint8(slice_uint16):
"""Test scaling an unsigned 16-bit integer array to an unsigned 8-bit array's bounds."""
from rawtools.convert import scale
lbound = np.iinfo(uint16).min
ubound = np.iinfo(uint16).max
new_lbound = np.iinfo(uint8).min
new_ubound = np.iinfo(uint8).max
slice_uint8 = np.zeros(DIMS, dtype=uint8)
scaled_slice = np.rint(
scale(slice_uint16, lbound, ubound, new_lbound, new_ubound))
np.testing.assert_array_equal(scaled_slice, slice_uint8)
def test_scale_uint16_to_uint8_large_variance(slice_uint16_high_variance):
"""Test scaling an unsigned 16-bit integer array with high variance to an unsigned 8-bit array's bounds."""
from rawtools.convert import scale
lbound = np.iinfo(uint16).min
ubound = np.iinfo(uint16).max
new_lbound = np.iinfo(uint8).min
new_ubound = np.iinfo(uint8).max
# Mapped values should wrap as they exceed target bit depth
slice_uint8 = np.array([255, 0, 0, 4, 19, 58, 196, 255, 0], dtype=uint8)
scaled_slice = np.rint(
scale(slice_uint16_high_variance, lbound, ubound, new_lbound, new_ubound))
np.testing.assert_array_equal(scaled_slice, slice_uint8)
| [
"rawtools.convert.scale",
"numpy.arange",
"numpy.iinfo",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_equal"
] | [((547, 623), 'numpy.array', 'np.array', (['[-1, 0, 100, 1000, 5000, 14830, 50321, 65535, 65536]'], {'dtype': 'uint16'}), '([-1, 0, 100, 1000, 5000, 14830, 50321, 65535, 65536], dtype=uint16)\n', (555, 623), True, 'import numpy as np\n'), ((907, 948), 'rawtools.convert.scale', 'scale', (['xs', 'lbound', 'ubound', 'lbound', 'ubound'], {}), '(xs, lbound, ubound, lbound, ubound)\n', (912, 948), False, 'from rawtools.convert import scale\n'), ((953, 1009), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['scaled_slice', 'slice_uint8'], {}), '(scaled_slice, slice_uint8)\n', (982, 1009), True, 'import numpy as np\n'), ((1350, 1377), 'numpy.zeros', 'np.zeros', (['DIMS'], {'dtype': 'uint8'}), '(DIMS, dtype=uint8)\n', (1358, 1377), True, 'import numpy as np\n'), ((1480, 1536), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['scaled_slice', 'slice_uint8'], {}), '(scaled_slice, slice_uint8)\n', (1509, 1536), True, 'import numpy as np\n'), ((1989, 2047), 'numpy.array', 'np.array', (['[255, 0, 0, 4, 19, 58, 196, 255, 0]'], {'dtype': 'uint8'}), '([255, 0, 0, 4, 19, 58, 196, 255, 0], dtype=uint8)\n', (1997, 2047), True, 'import numpy as np\n'), ((2164, 2220), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['scaled_slice', 'slice_uint8'], {}), '(scaled_slice, slice_uint8)\n', (2193, 2220), True, 'import numpy as np\n'), ((835, 850), 'numpy.iinfo', 'np.iinfo', (['uint8'], {}), '(uint8)\n', (843, 850), True, 'import numpy as np\n'), ((868, 883), 'numpy.iinfo', 'np.iinfo', (['uint8'], {}), '(uint8)\n', (876, 883), True, 'import numpy as np\n'), ((1203, 1219), 'numpy.iinfo', 'np.iinfo', (['uint16'], {}), '(uint16)\n', (1211, 1219), True, 'import numpy as np\n'), ((1237, 1253), 'numpy.iinfo', 'np.iinfo', (['uint16'], {}), '(uint16)\n', (1245, 1253), True, 'import numpy as np\n'), ((1275, 1290), 'numpy.iinfo', 'np.iinfo', (['uint8'], {}), '(uint8)\n', (1283, 1290), True, 'import numpy as np\n'), ((1312, 1327), 'numpy.iinfo', 'np.iinfo', (['uint8'], {}), '(uint8)\n', (1320, 1327), True, 'import numpy as np\n'), ((1414, 1473), 'rawtools.convert.scale', 'scale', (['slice_uint16', 'lbound', 'ubound', 'new_lbound', 'new_ubound'], {}), '(slice_uint16, lbound, ubound, new_lbound, new_ubound)\n', (1419, 1473), False, 'from rawtools.convert import scale\n'), ((1778, 1794), 'numpy.iinfo', 'np.iinfo', (['uint16'], {}), '(uint16)\n', (1786, 1794), True, 'import numpy as np\n'), ((1812, 1828), 'numpy.iinfo', 'np.iinfo', (['uint16'], {}), '(uint16)\n', (1820, 1828), True, 'import numpy as np\n'), ((1850, 1865), 'numpy.iinfo', 'np.iinfo', (['uint8'], {}), '(uint8)\n', (1858, 1865), True, 'import numpy as np\n'), ((1887, 1902), 'numpy.iinfo', 'np.iinfo', (['uint8'], {}), '(uint8)\n', (1895, 1902), True, 'import numpy as np\n'), ((2084, 2157), 'rawtools.convert.scale', 'scale', (['slice_uint16_high_variance', 'lbound', 'ubound', 'new_lbound', 'new_ubound'], {}), '(slice_uint16_high_variance, lbound, ubound, new_lbound, new_ubound)\n', (2089, 2157), False, 'from rawtools.convert import scale\n'), ((778, 807), 'numpy.arange', 'np.arange', (['(0)', '(20)'], {'dtype': 'uint8'}), '(0, 20, dtype=uint8)\n', (787, 807), True, 'import numpy as np\n'), ((255, 284), 'numpy.arange', 'np.arange', (['(0)', '(20)'], {'dtype': 'uint8'}), '(0, 20, dtype=uint8)\n', (264, 284), True, 'import numpy as np\n'), ((387, 417), 'numpy.arange', 'np.arange', (['(0)', '(20)'], {'dtype': 'uint16'}), '(0, 20, dtype=uint16)\n', (396, 417), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
#
# Modified by <NAME>
# --------------------------------------------------------
import numpy as np
def generate_anchors(stride, base_size, ratios, scales, feat_size):
"""
Pre-generate all anchors
:param stride:
:param base_size:
:param ratios:
:param scales:
:param feat_size:
:return:
"""
anchor_bases = generate_anchor_bases(base_size, ratios, scales) # gluon style
anchors = shift_anchor_bases(anchor_bases, stride, feat_size)
return anchors
def generate_anchor_bases(base_size, ratios, scales):
"""
Generate all anchors bases
:param base_size:
:param ratios:
:param scales:
:return:
"""
# generate same shapes on every location
px, py = (base_size - 1) * 0.5, (base_size - 1) * 0.5
anchor_bases = []
for r in ratios:
for s in scales:
size = base_size * base_size / r
ws = np.round(np.sqrt(size))
w = (ws * s - 1) * 0.5
h = (np.round(ws * r) * s - 1) * 0.5
anchor_bases.append([px - w, py - h, px + w, py + h])
anchor_bases = np.array(anchor_bases) # (N, 4)
return anchor_bases
def shift_anchor_bases(anchor_bases,stride, feat_size):
"""
Shift anchor bases with the strides across the feat size
:param anchor_bases:
:param stride:
:param feat_size:
:return:
"""
# propagete to all locations by shifting offsets
height, width = feat_size
offset_x = np.arange(0, width * stride, stride)
offset_y = np.arange(0, height * stride, stride)
offset_x, offset_y = np.meshgrid(offset_x, offset_y)
offsets = np.stack((offset_x.ravel(), offset_y.ravel(),
offset_x.ravel(), offset_y.ravel()), axis=1)
# broadcast_add (1, N, 4) + (M, 1, 4)
anchors = (anchor_bases.reshape((1, -1, 4)) + offsets.reshape((-1, 1, 4)))
anchors = anchors.reshape((1, anchors.shape[0]*anchors.shape[1], -1)).astype(np.float32)
return anchors
| [
"numpy.sqrt",
"numpy.arange",
"numpy.array",
"numpy.meshgrid",
"numpy.round"
] | [((1300, 1322), 'numpy.array', 'np.array', (['anchor_bases'], {}), '(anchor_bases)\n', (1308, 1322), True, 'import numpy as np\n'), ((1671, 1707), 'numpy.arange', 'np.arange', (['(0)', '(width * stride)', 'stride'], {}), '(0, width * stride, stride)\n', (1680, 1707), True, 'import numpy as np\n'), ((1723, 1760), 'numpy.arange', 'np.arange', (['(0)', '(height * stride)', 'stride'], {}), '(0, height * stride, stride)\n', (1732, 1760), True, 'import numpy as np\n'), ((1786, 1817), 'numpy.meshgrid', 'np.meshgrid', (['offset_x', 'offset_y'], {}), '(offset_x, offset_y)\n', (1797, 1817), True, 'import numpy as np\n'), ((1116, 1129), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (1123, 1129), True, 'import numpy as np\n'), ((1183, 1199), 'numpy.round', 'np.round', (['(ws * r)'], {}), '(ws * r)\n', (1191, 1199), True, 'import numpy as np\n')] |
import os
import logging
import math
from functools import reduce
from collections import defaultdict
import json
from timeit import default_timer
from tqdm import trange, tqdm
import numpy as np
import torch
import sklearn.metrics
import sklearn.svm as svm
import multiprocessing
import time
def generator(mus, mus_test, ys, ys_test, num_latents, num_factors):
for i in range(num_latents):
for j in range(num_factors):
mu_i = mus[i, :]
y_j = ys[j, :]
mu_i_test = mus_test[i, :]
y_j_test = ys_test[j, :]
yield mu_i, mu_i_test, y_j, y_j_test
def process_latent(arg):
mu_i, mu_i_test, y_j, y_j_test = arg
# Attribute is considered discrete.
classifier = svm.LinearSVC(C=0.01, class_weight="balanced")
classifier.fit(mu_i[:, np.newaxis], y_j)
pred = classifier.predict(mu_i_test[:, np.newaxis])
return np.mean(pred == y_j_test)
def compute_mig(mus_train, ys_train):
from lib.disentanglement_lib.disentanglement_lib.evaluation import evaluate
import lib.disentanglement_lib.disentanglement_lib.evaluation.metrics as metrics
import gin
gin_bindings = [
# "evaluation.evaluation_fn = @mig",
"dataset.name='auto'",
"evaluation.random_seed = 0",
"mig.num_train=1000",
"discretizer.discretizer_fn = @histogram_discretizer",
"discretizer.num_bins = 20"
]
gin.parse_config_files_and_bindings([], gin_bindings)
return metrics.mig._compute_mig(mus_train, ys_train)
def compute_sap(mus, ys):
var = np.var(mus, 0)
limit = np.quantile(var, 0.05)
mus = mus[:, var > limit]
mus = mus[:, :, None].repeat(ys.shape[1], 2)
ys = ys[:, None, :].repeat(mus.shape[1], 1)
c1 = (mus * (1 - ys)).sum(0, keepdims=True) / \
(1 - ys).sum(0, keepdims=True)
c2 = (mus * ys).mean(0, keepdims=True) / ys.sum(0, keepdims=True)
d1 = np.abs(mus - c1)
d2 = np.abs(mus - c2)
score = ((d1 > d2).astype(float) == ys).astype(float).mean(0)
ret = 0
for factor in range(mus.shape[2]):
sscore = np.sort(score[:, factor])
try:
ret += sscore[-1] - sscore[-2]
except:
return -1
return ret / mus.shape[2]
def _compute_sap(mus, ys, continuous_factors=False):
"""Computes score based on both training and testing codes and factors."""
mus = mus.transpose()
ys = ys[:, None].transpose()
mus_test = mus[:, -5000:]
ys_test = ys[:, -5000:]
mus = mus[:, :10000]
ys = ys[:, :10000]
score_matrix = compute_score_matrix(mus, ys, mus_test,
ys_test, continuous_factors)
# Score matrix should have shape [num_latents, num_factors].
assert score_matrix.shape[0] == mus.shape[0]
assert score_matrix.shape[1] == ys.shape[0]
return compute_avg_diff_top_two(score_matrix)
def compute_score_matrix(mus, ys, mus_test, ys_test, continuous_factors):
"""Compute score matrix as described in Section 3."""
num_latents = mus.shape[0]
num_factors = ys.shape[0]
if False:
score_matrix = np.zeros([num_latents, num_factors])
for i in range(num_latents):
for j in range(num_factors):
mu_i = mus[i, :]
y_j = ys[j, :]
if continuous_factors:
# Attribute is considered continuous.
cov_mu_i_y_j = np.cov(mu_i, y_j, ddof=1)
cov_mu_y = cov_mu_i_y_j[0, 1]**2
var_mu = cov_mu_i_y_j[0, 0]
var_y = cov_mu_i_y_j[1, 1]
if var_mu > 1e-12:
score_matrix[i, j] = cov_mu_y * 1. / (var_mu * var_y)
else:
score_matrix[i, j] = 0.
else:
# Attribute is considered discrete.
mu_i_test = mus_test[i, :]
y_j_test = ys_test[j, :]
classifier = svm.LinearSVC(C=0.01, class_weight="balanced")
classifier.fit(mu_i[:, np.newaxis], y_j)
pred = classifier.predict(mu_i_test[:, np.newaxis])
score_matrix[i, j] = np.mean(pred == y_j_test)
else:
with multiprocessing.Pool(16) as pool:
score_matrix = np.array(pool.map(process_latent, generator(mus, mus_test, ys, ys_test, num_latents, num_factors)))
return score_matrix.reshape((num_latents, num_factors))
def compute_avg_diff_top_two(matrix):
sorted_matrix = np.sort(matrix, axis=0)
return np.mean(sorted_matrix[-1, :] - sorted_matrix[-2, :])
| [
"numpy.mean",
"numpy.abs",
"numpy.sort",
"sklearn.svm.LinearSVC",
"gin.parse_config_files_and_bindings",
"numpy.quantile",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.cov",
"lib.disentanglement_lib.disentanglement_lib.evaluation.metrics.mig._compute_mig",
"numpy.var"
] | [((738, 784), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': '(0.01)', 'class_weight': '"""balanced"""'}), "(C=0.01, class_weight='balanced')\n", (751, 784), True, 'import sklearn.svm as svm\n'), ((897, 922), 'numpy.mean', 'np.mean', (['(pred == y_j_test)'], {}), '(pred == y_j_test)\n', (904, 922), True, 'import numpy as np\n'), ((1416, 1469), 'gin.parse_config_files_and_bindings', 'gin.parse_config_files_and_bindings', (['[]', 'gin_bindings'], {}), '([], gin_bindings)\n', (1451, 1469), False, 'import gin\n'), ((1481, 1526), 'lib.disentanglement_lib.disentanglement_lib.evaluation.metrics.mig._compute_mig', 'metrics.mig._compute_mig', (['mus_train', 'ys_train'], {}), '(mus_train, ys_train)\n', (1505, 1526), True, 'import lib.disentanglement_lib.disentanglement_lib.evaluation.metrics as metrics\n'), ((1565, 1579), 'numpy.var', 'np.var', (['mus', '(0)'], {}), '(mus, 0)\n', (1571, 1579), True, 'import numpy as np\n'), ((1592, 1614), 'numpy.quantile', 'np.quantile', (['var', '(0.05)'], {}), '(var, 0.05)\n', (1603, 1614), True, 'import numpy as np\n'), ((1912, 1928), 'numpy.abs', 'np.abs', (['(mus - c1)'], {}), '(mus - c1)\n', (1918, 1928), True, 'import numpy as np\n'), ((1938, 1954), 'numpy.abs', 'np.abs', (['(mus - c2)'], {}), '(mus - c2)\n', (1944, 1954), True, 'import numpy as np\n'), ((4540, 4563), 'numpy.sort', 'np.sort', (['matrix'], {'axis': '(0)'}), '(matrix, axis=0)\n', (4547, 4563), True, 'import numpy as np\n'), ((4575, 4627), 'numpy.mean', 'np.mean', (['(sorted_matrix[-1, :] - sorted_matrix[-2, :])'], {}), '(sorted_matrix[-1, :] - sorted_matrix[-2, :])\n', (4582, 4627), True, 'import numpy as np\n'), ((2089, 2114), 'numpy.sort', 'np.sort', (['score[:, factor]'], {}), '(score[:, factor])\n', (2096, 2114), True, 'import numpy as np\n'), ((3110, 3146), 'numpy.zeros', 'np.zeros', (['[num_latents, num_factors]'], {}), '([num_latents, num_factors])\n', (3118, 3146), True, 'import numpy as np\n'), ((4259, 4283), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(16)'], {}), '(16)\n', (4279, 4283), False, 'import multiprocessing\n'), ((3421, 3446), 'numpy.cov', 'np.cov', (['mu_i', 'y_j'], {'ddof': '(1)'}), '(mu_i, y_j, ddof=1)\n', (3427, 3446), True, 'import numpy as np\n'), ((3989, 4035), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': '(0.01)', 'class_weight': '"""balanced"""'}), "(C=0.01, class_weight='balanced')\n", (4002, 4035), True, 'import sklearn.svm as svm\n'), ((4210, 4235), 'numpy.mean', 'np.mean', (['(pred == y_j_test)'], {}), '(pred == y_j_test)\n', (4217, 4235), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_ToeplitzMatrix [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ToeplitzMatrix&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=EigToepStruct).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import ones, sort, argsort, diagflat, eye
from numpy.linalg import eig
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot
plt.style.use('seaborn')
from ARPM_utils import save_plot
# Inputs
n_ = 200 # dimension of the matrix
rho = 0.9 # decay factor
# -
# ## Build Toeplitz matrix
t = eye(n_)
for n in range(n_ - 1):
t = t + rho ** n * (diagflat(ones((n_ - n, 1)), n) + diagflat(ones((n_ - n, 1)), -n))
# ## Perform spectral decomposition
Diag_lambda2, e = eig(t)
lambda2, index = sort(Diag_lambda2)[::-1], argsort(Diag_lambda2)[::-1]
e = e[:, index]
# ## Plot first eigenvectors
figure()
color = [[0, 0.4470, 0.7410], [0.8500, 0.3250, 0.0980],[0.9290, 0.6940, 0.1250]]
for n in range(3):
h = plot(e[:, n], color=color[n])
plt.grid(True);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"numpy.eye",
"matplotlib.pyplot.grid",
"numpy.linalg.eig",
"numpy.ones",
"numpy.sort",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"numpy.argsort",
"matplotlib.pyplot.figure",
"os.path.abspath"
] | [((901, 925), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (914, 925), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1076), 'numpy.eye', 'eye', (['n_'], {}), '(n_)\n', (1072, 1076), False, 'from numpy import ones, sort, argsort, diagflat, eye\n'), ((1247, 1253), 'numpy.linalg.eig', 'eig', (['t'], {}), '(t)\n', (1250, 1253), False, 'from numpy.linalg import eig\n'), ((1372, 1380), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (1378, 1380), False, 'from matplotlib.pyplot import figure, plot\n'), ((1519, 1533), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1527, 1533), True, 'import matplotlib.pyplot as plt\n'), ((701, 739), 'os.path.abspath', 'path.abspath', (['"""../../functions-legacy"""'], {}), "('../../functions-legacy')\n", (713, 739), True, 'import os.path as path\n'), ((1489, 1518), 'matplotlib.pyplot.plot', 'plot', (['e[:, n]'], {'color': 'color[n]'}), '(e[:, n], color=color[n])\n', (1493, 1518), False, 'from matplotlib.pyplot import figure, plot\n'), ((1271, 1289), 'numpy.sort', 'sort', (['Diag_lambda2'], {}), '(Diag_lambda2)\n', (1275, 1289), False, 'from numpy import ones, sort, argsort, diagflat, eye\n'), ((1297, 1318), 'numpy.argsort', 'argsort', (['Diag_lambda2'], {}), '(Diag_lambda2)\n', (1304, 1318), False, 'from numpy import ones, sort, argsort, diagflat, eye\n'), ((1134, 1151), 'numpy.ones', 'ones', (['(n_ - n, 1)'], {}), '((n_ - n, 1))\n', (1138, 1151), False, 'from numpy import ones, sort, argsort, diagflat, eye\n'), ((1167, 1184), 'numpy.ones', 'ones', (['(n_ - n, 1)'], {}), '((n_ - n, 1))\n', (1171, 1184), False, 'from numpy import ones, sort, argsort, diagflat, eye\n')] |
import cv2
import os
import numpy as np
from sys import argv
from lib import sauvola, linelocalization, pathfinder
from WordSegmentation import wordSegmentation, prepareImg
from time import time as timer
from SamplePreprocessor import preprocess
from DataLoader import Batch
from Model import Model
def draw_line(im, path):
for p in path:
im[p[0], p[1]] = 0
def draw_box(im, path, prev):
curr=(path[0][1], path[0][0])
cv2.rectangle(im, prev, curr,0,3)
def draw_map(im, mapy):
for m in mapy:
im[m[0], m[1]] = 255
def print_path(path):
print('\t# path: ' + str(path[::-1]))
def save(filename, imbw, immap):
imbw_filename = str.replace(filename, '.', '_bw.')
#imbw_filename = str.replace(imbw_filename, 'data', 'data/bw')
print('Saving image "' + imbw_filename + '"..\n')
cv2.imwrite(imbw_filename, imbw)
immap_filename = str.replace(imbw_filename, '_bw', '_map')
cv2.imwrite(immap_filename, immap)
def infer(model, fnImg):
"recognize text in image provided by file path"
img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = model.inferBatch(batch, True)
#print('Recognized:', '"' + recognized[0] + '"')
#print('Probability:', probability[0])
return (recognized[0], probability[0])
######################
# ------ MAIN ------ #
######################
def identify_words(filepath, filenames, model):
begin = timer()
out_dict={}
out_path='../data/out/'
print('############################')
print('#Line and Word Segmentation#')
print('############################')
for filename in filenames:
fullpath=os.path.join(filepath,filename)
f=filename.split('.')[0]
ext=filename.split('.')[1]
if ext=='pdf':
continue
out_dict[f] = []
print('Reading image "' + filename + '"..')
im = cv2.imread(fullpath, 0)
print('- Thresholding image..')
#imbw = sauvola.binarize(im, [20, 20], 128, 0.3)
pxmin = np.min(im)
pxmax = np.max(im)
im = (im - pxmin) / (pxmax - pxmin) * 255
im = im.astype('uint8')
#binarize
_ , imbw = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#cv2.imshow('tempb', imbw)
# increase line width
kernel = np.ones((3, 3), np.uint8)
imbw = cv2.erode(imbw, kernel, iterations = 1)
print('- Localizing lines..')
lines = linelocalization.localize(imbw)
lines.append(imbw.shape[0])
print(' => ' + str(len(lines)) + ' lines detected.')
print('- Path planning with ')
immap = np.zeros((imbw.shape), dtype=np.int32)
# for i in range(0, 1):
prev=(0,0)
n_line=1
for line in lines:
# line = lines[i]
rline = imbw[int(prev[1]):int(line),:]
img = prepareImg(rline, 50)
# execute segmentation with given parameters
# -kernelSize: size of filter kernel (odd integer)
# -sigma: standard deviation of Gaussian function used for filter kernel
# -theta: approximated width/height ratio of words, filter function is distorted by this factor
# - minArea: ignore word candidates smaller than specified area
res = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100, increase_dim=10)
# write output to 'out/inputFileName' directory
if not os.path.exists(out_path+'%s'%f):
os.mkdir(out_path+'%s'%f)
# iterate over all segmented words
#print('Segmented into %d words'%len(res))
for (j, w) in enumerate(res):
(wordBox, wordImg) = w
(x, y, w, h) = wordBox
imgloc=out_path+'%s/%d.png'%(f, j)
# increase contrast
cv2.imwrite(imgloc, wordImg) # save word
#FilePaths.fnInfer = 'out/%s/%d.png'%(f,j)
try:
result, prob = infer(model, imgloc)
except:
print("Couldn't infer: image%d"%j)
result=""
#updating output dictionary
out_dict[f].append(result)
#deleting intermediate file
##os.remove(imgloc)
cv2.rectangle(img,(x,y),(x+w,y+h),0,1) # draw bounding box in summary image
# output summary image with bounding boxes around words
cv2.imwrite(out_path+'%s/summary%d.png'%(f,n_line), img)
#path, mapy = pathfinder.search(imbw, 'A', line)
#path = [[int(line),0]]
path = [[int(line),rline.shape[1]]]
#print(rline.shape[1])
#print('path[0][0]: ',path[0][0], ' path[0][1]: ', path[0][1])
draw_box(im, path, prev)
#draw_map(immap, mapy)
prev=(0, path[0][0])
n_line+=1
# print_path(path)
#save(filename, imbw, immap)
cv2.imwrite(out_path+'%s/summary.png'%f, im)
return out_dict
print(' - Elapsed time: ' + str((timer() - begin)) + ' s') | [
"cv2.rectangle",
"cv2.imwrite",
"WordSegmentation.prepareImg",
"os.path.exists",
"numpy.ones",
"cv2.threshold",
"cv2.erode",
"lib.linelocalization.localize",
"os.path.join",
"numpy.max",
"numpy.zeros",
"WordSegmentation.wordSegmentation",
"os.mkdir",
"numpy.min",
"DataLoader.Batch",
"t... | [((426, 461), 'cv2.rectangle', 'cv2.rectangle', (['im', 'prev', 'curr', '(0)', '(3)'], {}), '(im, prev, curr, 0, 3)\n', (439, 461), False, 'import cv2\n'), ((791, 823), 'cv2.imwrite', 'cv2.imwrite', (['imbw_filename', 'imbw'], {}), '(imbw_filename, imbw)\n', (802, 823), False, 'import cv2\n'), ((885, 919), 'cv2.imwrite', 'cv2.imwrite', (['immap_filename', 'immap'], {}), '(immap_filename, immap)\n', (896, 919), False, 'import cv2\n'), ((1078, 1096), 'DataLoader.Batch', 'Batch', (['None', '[img]'], {}), '(None, [img])\n', (1083, 1096), False, 'from DataLoader import Batch\n'), ((1414, 1421), 'time.time', 'timer', ([], {}), '()\n', (1419, 1421), True, 'from time import time as timer\n'), ((1013, 1052), 'cv2.imread', 'cv2.imread', (['fnImg', 'cv2.IMREAD_GRAYSCALE'], {}), '(fnImg, cv2.IMREAD_GRAYSCALE)\n', (1023, 1052), False, 'import cv2\n'), ((1620, 1652), 'os.path.join', 'os.path.join', (['filepath', 'filename'], {}), '(filepath, filename)\n', (1632, 1652), False, 'import os\n'), ((1809, 1832), 'cv2.imread', 'cv2.imread', (['fullpath', '(0)'], {}), '(fullpath, 0)\n', (1819, 1832), False, 'import cv2\n'), ((1929, 1939), 'numpy.min', 'np.min', (['im'], {}), '(im)\n', (1935, 1939), True, 'import numpy as np\n'), ((1950, 1960), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (1956, 1960), True, 'import numpy as np\n'), ((2056, 2118), 'cv2.threshold', 'cv2.threshold', (['im', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(im, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2069, 2118), False, 'import cv2\n'), ((2182, 2207), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (2189, 2207), True, 'import numpy as np\n'), ((2217, 2254), 'cv2.erode', 'cv2.erode', (['imbw', 'kernel'], {'iterations': '(1)'}), '(imbw, kernel, iterations=1)\n', (2226, 2254), False, 'import cv2\n'), ((2300, 2331), 'lib.linelocalization.localize', 'linelocalization.localize', (['imbw'], {}), '(imbw)\n', (2325, 2331), False, 'from lib import sauvola, linelocalization, pathfinder\n'), ((2462, 2498), 'numpy.zeros', 'np.zeros', (['imbw.shape'], {'dtype': 'np.int32'}), '(imbw.shape, dtype=np.int32)\n', (2470, 2498), True, 'import numpy as np\n'), ((4360, 4408), 'cv2.imwrite', 'cv2.imwrite', (["(out_path + '%s/summary.png' % f)", 'im'], {}), "(out_path + '%s/summary.png' % f, im)\n", (4371, 4408), False, 'import cv2\n'), ((2644, 2665), 'WordSegmentation.prepareImg', 'prepareImg', (['rline', '(50)'], {}), '(rline, 50)\n', (2654, 2665), False, 'from WordSegmentation import wordSegmentation, prepareImg\n'), ((3019, 3108), 'WordSegmentation.wordSegmentation', 'wordSegmentation', (['img'], {'kernelSize': '(25)', 'sigma': '(11)', 'theta': '(7)', 'minArea': '(100)', 'increase_dim': '(10)'}), '(img, kernelSize=25, sigma=11, theta=7, minArea=100,\n increase_dim=10)\n', (3035, 3108), False, 'from WordSegmentation import wordSegmentation, prepareImg\n'), ((3945, 4006), 'cv2.imwrite', 'cv2.imwrite', (["(out_path + '%s/summary%d.png' % (f, n_line))", 'img'], {}), "(out_path + '%s/summary%d.png' % (f, n_line), img)\n", (3956, 4006), False, 'import cv2\n'), ((3170, 3205), 'os.path.exists', 'os.path.exists', (["(out_path + '%s' % f)"], {}), "(out_path + '%s' % f)\n", (3184, 3205), False, 'import os\n'), ((3207, 3236), 'os.mkdir', 'os.mkdir', (["(out_path + '%s' % f)"], {}), "(out_path + '%s' % f)\n", (3215, 3236), False, 'import os\n'), ((3475, 3503), 'cv2.imwrite', 'cv2.imwrite', (['imgloc', 'wordImg'], {}), '(imgloc, wordImg)\n', (3486, 3503), False, 'import cv2\n'), ((3803, 3851), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0)', '(1)'], {}), '(img, (x, y), (x + w, y + h), 0, 1)\n', (3816, 3851), False, 'import cv2\n'), ((4457, 4464), 'time.time', 'timer', ([], {}), '()\n', (4462, 4464), True, 'from time import time as timer\n')] |
#!/usr/bin/env python3
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from contextlib import closing as ctx_closing
from argparse import ArgumentParser
def read_daily_stats(sqc):
df = pd.read_sql_query(
"SELECT day, avg, (xx/n - avg*avg) AS var, min, max, n AS count FROM (" + \
"SELECT strftime('%Y-%m-%d',timestamp,'unixepoch') AS day," + \
" AVG(delay_ms) AS avg," + \
" MIN(delay_ms) AS min," + \
" MAX(delay_ms) AS max," + \
" SUM(delay_ms*delay_ms) AS xx," + \
" COUNT(*) AS n" + \
" FROM keystrokes WHERE (delay_ms <= 5000 AND pressed == expected)" + \
" GROUP BY day ORDER BY day)", sqc)
df['day'] = pd.to_datetime(df['day'])
return df
def main():
p = ArgumentParser()
p.add_argument('-d', '--db', nargs='+', help='sqlite database paths', default=['keystrokes.db'])
args = p.parse_args()
df = pd.DataFrame()
# Read each day from each database
for path in args.db:
with ctx_closing(sqlite3.connect(path)) as sqc:
tmp = read_daily_stats(sqc)
df = df.append(tmp)
# Combine duplicate day rows
g = df.groupby('day')
df = pd.DataFrame({
'avg' : g['avg'].mean(),
'var' : g['var'].sum() / (g['var'].count()**2),
'count' : g['count'].sum(),
'min' : g['min'].min(),
'max' : g['max'].max(),
}, index=g.groups)
df = df.sort_index()
df['std'] = np.sqrt(df['var'])
print(df)
print("Total keystrokes sampled:", sum(df['count']))
x = df.index.strftime('%d/%m')
y = df['avg']
ye = df['std']
y0 = df['min']
y0 = np.maximum(df['min'], y-ye)
y1 = np.minimum(df['max'], y+ye)
with plt.style.context('Solarize_Light2'):
fig,ax = plt.subplots()
ax.fill_between(x, y0, y1, alpha=0.2)
ax.plot(x, y, label='ms/keystroke', lw=5)
#plt.ylim((max(0, min(y) - 50), max(y) + 50))
plt.ylim((0, max(y1)*1.1))
plt.legend(loc='upper left')
plt.show()
if __name__ == "__main__":
main()
| [
"pandas.read_sql_query",
"numpy.sqrt",
"numpy.minimum",
"argparse.ArgumentParser",
"sqlite3.connect",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.style.context",
"pandas.DataFrame",
"numpy.maximum",
"matplotlib.pyplot.subplots",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((225, 648), 'pandas.read_sql_query', 'pd.read_sql_query', (['(\'SELECT day, avg, (xx/n - avg*avg) AS var, min, max, n AS count FROM (\' +\n "SELECT strftime(\'%Y-%m-%d\',timestamp,\'unixepoch\') AS day," +\n \' AVG(delay_ms) AS avg,\' + \' MIN(delay_ms) AS min,\' +\n \' MAX(delay_ms) AS max,\' + \' SUM(delay_ms*delay_ms) AS xx,\' +\n \' COUNT(*) AS n\' +\n \' FROM keystrokes WHERE (delay_ms <= 5000 AND pressed == expected)\' +\n \' GROUP BY day ORDER BY day)\')', 'sqc'], {}), '(\n \'SELECT day, avg, (xx/n - avg*avg) AS var, min, max, n AS count FROM (\' +\n "SELECT strftime(\'%Y-%m-%d\',timestamp,\'unixepoch\') AS day," +\n \' AVG(delay_ms) AS avg,\' + \' MIN(delay_ms) AS min,\' +\n \' MAX(delay_ms) AS max,\' + \' SUM(delay_ms*delay_ms) AS xx,\' +\n \' COUNT(*) AS n\' +\n \' FROM keystrokes WHERE (delay_ms <= 5000 AND pressed == expected)\' +\n \' GROUP BY day ORDER BY day)\', sqc)\n', (242, 648), True, 'import pandas as pd\n'), ((668, 693), 'pandas.to_datetime', 'pd.to_datetime', (["df['day']"], {}), "(df['day'])\n", (682, 693), True, 'import pandas as pd\n'), ((723, 739), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (737, 739), False, 'from argparse import ArgumentParser\n'), ((867, 881), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (879, 881), True, 'import pandas as pd\n'), ((1337, 1355), 'numpy.sqrt', 'np.sqrt', (["df['var']"], {}), "(df['var'])\n", (1344, 1355), True, 'import numpy as np\n'), ((1509, 1538), 'numpy.maximum', 'np.maximum', (["df['min']", '(y - ye)'], {}), "(df['min'], y - ye)\n", (1519, 1538), True, 'import numpy as np\n'), ((1543, 1572), 'numpy.minimum', 'np.minimum', (["df['max']", '(y + ye)'], {}), "(df['max'], y + ye)\n", (1553, 1572), True, 'import numpy as np\n'), ((1578, 1614), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""Solarize_Light2"""'], {}), "('Solarize_Light2')\n", (1595, 1614), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1639, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1833), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1815, 1833), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1844, 1846), True, 'import matplotlib.pyplot as plt\n'), ((960, 981), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (975, 981), False, 'import sqlite3\n')] |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import skimage.io as io
import skimage.transform as transform
from os.path import join
import vfn.network as nw
import argparse
import json
import time
global_dtype = tf.float32
global_dtype_np = np.float32
batch_size = 200
def overlap_ratio(x1, y1, w1, h1, x2, y2, w2, h2):
intersection = max(0, min(x1 + w1, x2 + w2) - max(x1, x2)) * max(0, min(y1 + h1, y2 + h2) - max(y1, y2))
union = (w1 * h1) + (w2 * h2) - intersection
return float(intersection) / float(union)
def evaluate_sliding_window(img_filename, crops):
img = io.imread(img_filename).astype(np.float32)/255
if img.ndim == 2: # Handle B/W images
img = np.expand_dims(img, axis=-1)
img = np.repeat(img, 3, 2)
img_crops = np.zeros((batch_size, 227, 227, 3))
for i in xrange(len(crops)):
crop = crops[i]
img_crop = transform.resize(img[crop[1]:crop[1]+crop[3],crop[0]:crop[0]+crop[2]], (227, 227))-0.5
img_crop = np.expand_dims(img_crop, axis=0)
img_crops[i,:,:,:] = img_crop
# compute ranking scores
scores = sess.run([score_func], feed_dict={image_placeholder: img_crops})
# find the optimal crop
idx = np.argmax(scores[:len(crops)])
best_window = crops[idx]
# return the best crop
return (best_window[0], best_window[1], best_window[2], best_window[3])
def evaluate_FCDB():
slidling_windows_string = open('./sliding_window.json', 'r').read()
sliding_windows = json.loads(slidling_windows_string)
cnt = 0
alpha = 0.75
alpha_cnt = 0
accum_boundary_displacement = 0
accum_overlap_ratio = 0
crop_cnt = 0
for item in sliding_windows:
# print 'processing', item['filename']
crops = item['crops']
img_filename = join('FCDB', item['filename'])
img = io.imread(img_filename)
height = img.shape[0]
width = img.shape[1]
# ground truth
x = crops[0][0]
y = crops[0][1]
w = crops[0][2]
h = crops[0][3]
best_x, best_y, best_w, best_h = evaluate_sliding_window(img_filename, crops)
boundary_displacement = (abs(best_x - x) + abs(best_x + best_w - x - w))/float(width) + (abs(best_y - y) + abs(best_y + best_h - y - h))/float(height)
accum_boundary_displacement += boundary_displacement
ratio = overlap_ratio(x, y, w, h, best_x, best_y, best_w, best_h)
if ratio >= alpha:
alpha_cnt += 1
accum_overlap_ratio += ratio
cnt += 1
crop_cnt += len(crops)
print('Average overlap ratio: {:.4f}'.format(accum_overlap_ratio / cnt))
print('Average boundary displacement: {:.4f}'.format(accum_boundary_displacement / (cnt * 4.0)))
print('Alpha recall: {:.4f}'.format(100 * float(alpha_cnt) / cnt))
print('Total image evaluated:', cnt)
print('Average crops per image:', float(crop_cnt) / cnt)
def evaluate_aesthetics_score(sess, score_func, image_placeholder, images):
scores = np.zeros(shape=(len(images),))
for i in range(len(images)):
img = images[i].astype(np.float32)/255
img_resize = transform.resize(img, (227, 227))-0.5
img_resize = np.expand_dims(img_resize, axis=0)
scores[i] = sess.run([score_func], feed_dict={image_placeholder: img_resize})[0]
return scores
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--embedding_dim", help="Embedding dimension before mapping to one-dimensional score", type=int, default = 1000)
parser.add_argument("--initial_parameters", help="Path to initial parameter file", type=str, default="alexnet.npy")
parser.add_argument("--ranking_loss", help="Type of ranking loss", type=str, choices=['ranknet', 'svm'], default='svm')
parser.add_argument("--snapshot", help="Name of the checkpoint files", type=str, default='./snapshots/model-spp-max')
parser.add_argument("--spp", help="Whether to use spatial pyramid pooling in the last layer or not", type=str2bool, default=True)
parser.add_argument("--pooling", help="Which pooling function to use", type=str, choices=['max', 'avg'], default='max')
args = parser.parse_args()
embedding_dim = args.embedding_dim
ranking_loss = args.ranking_loss
snapshot = args.snapshot
net_data = np.load(args.initial_parameters).item()
image_placeholder = tf.placeholder(dtype=global_dtype, shape=[batch_size,227,227,3])
var_dict = nw.get_variable_dict(net_data)
SPP = args.spp
pooling = args.pooling
with tf.variable_scope("ranker") as scope:
feature_vec = nw.build_alexconvnet(image_placeholder, var_dict, embedding_dim, SPP=SPP, pooling=pooling)
score_func = nw.score(feature_vec)
# load pre-trained model
saver = tf.train.Saver(tf.global_variables())
sess = tf.Session(config=tf.ConfigProto())
sess.run(tf.global_variables_initializer())
saver.restore(sess, snapshot)
print('Snapshot: {}'.format(snapshot))
start_time = time.time()
evaluate_FCDB()
print('--- %s seconds ---' % (time.time() - start_time))
| [
"vfn.network.score",
"numpy.repeat",
"argparse.ArgumentParser",
"tensorflow.placeholder",
"vfn.network.get_variable_dict",
"tensorflow.ConfigProto",
"vfn.network.build_alexconvnet",
"json.loads",
"tensorflow.variable_scope",
"tensorflow.global_variables",
"argparse.ArgumentTypeError",
"skimage... | [((795, 830), 'numpy.zeros', 'np.zeros', (['(batch_size, 227, 227, 3)'], {}), '((batch_size, 227, 227, 3))\n', (803, 830), True, 'import numpy as np\n'), ((1512, 1547), 'json.loads', 'json.loads', (['slidling_windows_string'], {}), '(slidling_windows_string)\n', (1522, 1547), False, 'import json\n'), ((3641, 3666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3664, 3666), False, 'import argparse\n'), ((4645, 4712), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'global_dtype', 'shape': '[batch_size, 227, 227, 3]'}), '(dtype=global_dtype, shape=[batch_size, 227, 227, 3])\n', (4659, 4712), True, 'import tensorflow as tf\n'), ((4725, 4755), 'vfn.network.get_variable_dict', 'nw.get_variable_dict', (['net_data'], {}), '(net_data)\n', (4745, 4755), True, 'import vfn.network as nw\n'), ((5275, 5286), 'time.time', 'time.time', ([], {}), '()\n', (5284, 5286), False, 'import time\n'), ((714, 742), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(-1)'}), '(img, axis=-1)\n', (728, 742), True, 'import numpy as np\n'), ((757, 777), 'numpy.repeat', 'np.repeat', (['img', '(3)', '(2)'], {}), '(img, 3, 2)\n', (766, 777), True, 'import numpy as np\n'), ((1013, 1045), 'numpy.expand_dims', 'np.expand_dims', (['img_crop'], {'axis': '(0)'}), '(img_crop, axis=0)\n', (1027, 1045), True, 'import numpy as np\n'), ((1811, 1841), 'os.path.join', 'join', (['"""FCDB"""', "item['filename']"], {}), "('FCDB', item['filename'])\n", (1815, 1841), False, 'from os.path import join\n'), ((1856, 1879), 'skimage.io.imread', 'io.imread', (['img_filename'], {}), '(img_filename)\n', (1865, 1879), True, 'import skimage.io as io\n'), ((3213, 3247), 'numpy.expand_dims', 'np.expand_dims', (['img_resize'], {'axis': '(0)'}), '(img_resize, axis=0)\n', (3227, 3247), True, 'import numpy as np\n'), ((4811, 4838), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ranker"""'], {}), "('ranker')\n", (4828, 4838), True, 'import tensorflow as tf\n'), ((4871, 4965), 'vfn.network.build_alexconvnet', 'nw.build_alexconvnet', (['image_placeholder', 'var_dict', 'embedding_dim'], {'SPP': 'SPP', 'pooling': 'pooling'}), '(image_placeholder, var_dict, embedding_dim, SPP=SPP,\n pooling=pooling)\n', (4891, 4965), True, 'import vfn.network as nw\n'), ((4983, 5004), 'vfn.network.score', 'nw.score', (['feature_vec'], {}), '(feature_vec)\n', (4991, 5004), True, 'import vfn.network as nw\n'), ((5062, 5083), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5081, 5083), True, 'import tensorflow as tf\n'), ((5145, 5178), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5176, 5178), True, 'import tensorflow as tf\n'), ((907, 998), 'skimage.transform.resize', 'transform.resize', (['img[crop[1]:crop[1] + crop[3], crop[0]:crop[0] + crop[2]]', '(227, 227)'], {}), '(img[crop[1]:crop[1] + crop[3], crop[0]:crop[0] + crop[2]],\n (227, 227))\n', (923, 998), True, 'import skimage.transform as transform\n'), ((3154, 3187), 'skimage.transform.resize', 'transform.resize', (['img', '(227, 227)'], {}), '(img, (227, 227))\n', (3170, 3187), True, 'import skimage.transform as transform\n'), ((3545, 3598), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (3571, 3598), False, 'import argparse\n'), ((4581, 4613), 'numpy.load', 'np.load', (['args.initial_parameters'], {}), '(args.initial_parameters)\n', (4588, 4613), True, 'import numpy as np\n'), ((5114, 5130), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5128, 5130), True, 'import tensorflow as tf\n'), ((611, 634), 'skimage.io.imread', 'io.imread', (['img_filename'], {}), '(img_filename)\n', (620, 634), True, 'import skimage.io as io\n'), ((5341, 5352), 'time.time', 'time.time', ([], {}), '()\n', (5350, 5352), False, 'import time\n')] |
from collections import Counter
import numpy as np
import sklearn
from pandas import DataFrame
from sklearn.impute import SimpleImputer
import data.utils.df_loader as dl
import data.utils.web_scrappers as ws
def process_data_for_labels(ticker):
"""
Computes new columns needed for label generation for specific ticker.
Getting future values by shifting a column up. Gives future values i days in
advance.
:param ticker: Company symbol
:return: Dataframe with new columns
"""
hm_days = 7
df = dl.get_dax__as_df()
df.fillna(0, inplace=True)
for i in range(1, hm_days + 1):
df["{}_{}d".format(ticker, i)] = (df[ticker].shift(-i) - df[
ticker]) / df[ticker]
df.fillna(0, inplace=True)
return df
def buy_sell_hold(*args):
cols = [c for c in args]
requirement = 0.02
for col in cols:
if col > requirement:
return 1
if col < -requirement:
return -1
return 0
def get_cls_data(ticker):
"""
Firstly computes labels based on new generated columns
:param ticker: Company symbol
:return: Features, labels, DataFrame
"""
tickers = ws.get_tickers()
df = process_data_for_labels(ticker)
df["{}_target".format(ticker)] = list(map(buy_sell_hold,
df["{}_1d".format(ticker)],
df["{}_2d".format(ticker)],
df["{}_3d".format(ticker)],
df["{}_4d".format(ticker)],
df["{}_5d".format(ticker)],
df["{}_6d".format(ticker)],
df["{}_7d".format(ticker)]))
vals = df["{}_target".format(ticker)].values.tolist()
str_vals = [str(i) for i in vals]
print("Dataspread:", Counter(str_vals))
df.fillna(0, inplace=True)
df = df.replace([np.inf, -np.inf], np.nan)
df.dropna(inplace=True)
df_vals = df[[ticker for ticker in tickers]].pct_change()
df_vals = df_vals.replace([np.inf, -np.inf], 0)
df_vals.fillna(0, inplace=True)
X = df_vals.values
y = df["{}_target".format(ticker)].values
return X, y, df
def add_new_features(df_org, forecast_out):
df = df_org.loc[:, ["Adj Close", "Volume"]]
df["high_low_pct"] = (df_org["High"] - df_org["Low"]) / df_org[
"Close"] * 100.0
lbd = lambda x: np.log(x) - np.log(x.shift(1))
df["change"] = np.log(df_org["Adj Close"]) - np.log(df_org["Adj "
"Close"].shift(
1))
df["pct_change"] = (df_org["Close"] - df_org["Open"]) / df_org[
"Open"] * 100.0
df["daily_return"] = (df_org["Close"] / df_org["Open"]) - 1
df.fillna(method="pad")
df["Volume"] = np.log(df_org["Volume"])
# log of 5 day moving average of volume
df["5d_mean_log"] = df_org["Volume"].rolling(5).mean().apply(
np.log)
# daily volume vs. 200 day moving average
df["volume_mov_avg"] = (df_org["Volume"] / df_org["Volume"].rolling(
200).mean()) - 1
# daily closing price vs. 50 day exponential moving avg
df["close_vs_moving"] = (df_org["Close"] / df_org["Close"].ewm(
span=50).mean()) - 1
# z-score
df["z_score"] = (df_org["Close"] - df_org["Close"].rolling(window=200,
min_periods=20).mean()) / \
df_org["Close"].rolling(window=200, min_periods=20).std()
df["signing"] = df["pct_change"].apply(np.sign)
df["plus_minus"] = df["signing"].rolling(20).sum()
df["label"] = df["Adj Close"].shift(-forecast_out).round(3)
df["label"] = df["label"].interpolate(limit=3, limit_direction="both")
df = df.replace([np.inf, -np.inf], np.nan)
# df = missing_values_transformer(df)
df.fillna(df.mean(), inplace=True)
return df
def missing_values_transformer(df):
imp_mean = SimpleImputer(missing_values=np.nan, strategy="mean")
np_array = imp_mean.fit_transform(df)
df = DataFrame.from_records(np_array)
return df
def get_reg_data(ticker, forecast):
df = dl.get_com_as_df(ticker)
forecast_out = int(forecast) # predict int days into future
df = add_new_features(df, forecast_out)
print("Description of data set: \n {}".format(df.describe()))
X = np.array(df.drop(["label"], 1))
scaler = sklearn.preprocessing.MinMaxScaler()
X = scaler.fit_transform(X)
X_data = X[-forecast_out:]
X = X[:-forecast_out]
data = df[-forecast_out:]
df = df[:-forecast_out]
y = np.array(df["label"])
# # Value distrib
# vals = df["label"].values.tolist()
# str_vals = [str(i) for i in vals]
# print("Data spread:", Counter(str_vals))
return X, y, df, X_data, data
| [
"pandas.DataFrame.from_records",
"data.utils.df_loader.get_dax__as_df",
"numpy.log",
"data.utils.web_scrappers.get_tickers",
"collections.Counter",
"numpy.array",
"sklearn.impute.SimpleImputer",
"data.utils.df_loader.get_com_as_df",
"sklearn.preprocessing.MinMaxScaler"
] | [((532, 551), 'data.utils.df_loader.get_dax__as_df', 'dl.get_dax__as_df', ([], {}), '()\n', (549, 551), True, 'import data.utils.df_loader as dl\n'), ((1180, 1196), 'data.utils.web_scrappers.get_tickers', 'ws.get_tickers', ([], {}), '()\n', (1194, 1196), True, 'import data.utils.web_scrappers as ws\n'), ((2911, 2935), 'numpy.log', 'np.log', (["df_org['Volume']"], {}), "(df_org['Volume'])\n", (2917, 2935), True, 'import numpy as np\n'), ((4064, 4117), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""mean"""'}), "(missing_values=np.nan, strategy='mean')\n", (4077, 4117), False, 'from sklearn.impute import SimpleImputer\n'), ((4169, 4201), 'pandas.DataFrame.from_records', 'DataFrame.from_records', (['np_array'], {}), '(np_array)\n', (4191, 4201), False, 'from pandas import DataFrame\n'), ((4263, 4287), 'data.utils.df_loader.get_com_as_df', 'dl.get_com_as_df', (['ticker'], {}), '(ticker)\n', (4279, 4287), True, 'import data.utils.df_loader as dl\n'), ((4516, 4552), 'sklearn.preprocessing.MinMaxScaler', 'sklearn.preprocessing.MinMaxScaler', ([], {}), '()\n', (4550, 4552), False, 'import sklearn\n'), ((4709, 4730), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (4717, 4730), True, 'import numpy as np\n'), ((1942, 1959), 'collections.Counter', 'Counter', (['str_vals'], {}), '(str_vals)\n', (1949, 1959), False, 'from collections import Counter\n'), ((2566, 2593), 'numpy.log', 'np.log', (["df_org['Adj Close']"], {}), "(df_org['Adj Close'])\n", (2572, 2593), True, 'import numpy as np\n'), ((2516, 2525), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (2522, 2525), True, 'import numpy as np\n')] |
# 导入包
import zipfile
import paddle
import paddle.fluid as fluid
import matplotlib.pyplot as plt
import matplotlib.image as mping
from PIL import Image
import json
import numpy as np
import cv2
import sys
import time
import h5py
# import scipy.io as io
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import scipy
from matplotlib import cm as CM
from paddle.utils.plot import Ploter
start = time.time()
#把图片对应的标签装入字典
f = open('data/data1917/train.json',encoding='utf-8')
content = json.load(f)
print(content.keys())
print('info:',content['info'])
print('stage:',content['stage'])
print('split:',content['split'])
print(content['annotations'][0].keys())
print(content['annotations'][0]['type'])
print(content['annotations'][0][ 'id'])
print(content['annotations'][0]['ignore_region'])
print(content['annotations'][0]['name'])
print(content['annotations'][0]['num'])
#把stage1都去掉:
for j in range(len(content['annotations'])):
content['annotations'][j]['name'] = content['annotations'][j]['name'].lstrip('stage1').lstrip('/')
print(content['annotations'][1]['name'])
#读取解压文件里的信息
zfile = zipfile.ZipFile("data/train_new.zip")
l = [] # l中存储了train中所有的图片路径
for fname in zfile.namelist()[1:]:
# print(fname)
l.append(fname)
print(l[3])
name = l[3]
im = Image.open(name)
plt.imshow(im)
#查看标注的信息
for j in range(len(content['annotations'])):
if content['annotations'][j]['name'] == name:
print('id = ',content['annotations'][j]['id']) #图片id
ann = content['annotations'][j]['annotation']
print(ann) #图片标注格式是x,y,w,h,有些只有x,y
print('有标注的个数:',len(ann))
#可视化第三个标注的信息
lab = 1
box = (ann[lab]['x'],ann[lab]['y'],ann[lab]['x']+ann[lab]['w'],ann[lab]['y']+ann[lab]['h'])
new_img = im.crop(box=box)
plt.imshow(new_img)
#可视化图片所有标注信息
width = im.size[0] #获取宽度
height = im.size[1] #获取长度
print(width,height)
for a in range(len(ann)): #遍历所有标注
for x in range(width):
for y in range(height):
# r,g,b = im.getpixel((x,y))
if(x > (ann[a]['x']-5) and x < (ann[a]['x']+5) and y > ann[a]['y'] and y < (ann[a]['y']+ann[a]['h'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x,y)到(x,y+h)的红线,红线宽为正负5个像素点
if(x > (ann[a]['x']+ann[a]['w']-5) and x < (ann[a]['x']+ann[a]['w']+5) and y > ann[a]['y'] and y < (ann[a]['y']+ann[a]['h'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x+w,y)到(x+w,y+h)的红线,红线宽为正负5个像素点
if(y > (ann[a]['y']-5) and y < (ann[a]['y']+5) and x > ann[a]['x'] and x < (ann[a]['x']+ann[a]['w'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x,y)到(x+w,y)的红线,红线宽为正负5个像素点
if(y > (ann[a]['y']+ann[a]['h']-5) and y < (ann[a]['y']+ann[a]['h']+5) and x > ann[a]['x'] and x < (ann[a]['x']+ann[a]['w'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x,y+h)到(x+w,y+h)的红线,红线宽为正负5个像素点
plt.imshow(im)
# 根据图片的大小,对图片的来源进行分类
l_set = []
s_2560_1920 = [] #方框 鱼眼电梯 63张
s_928_576 = [] #点 自动售货机 248张
s_1024_768 = [] #点 街拍 302
s_640_480 = [] #点 家拍 92
s_2048_2048 =[] #方框 鱼眼电梯 41
s_1080_1618 =[] #滤掉 1
s_1920_1080 = [] #方框 超市 1240
s_1440_1080 =[] #滤掉 1
s_1920_1200 =[] #方框 街拍 12
for inde in range(2000):
imm = Image.open(content['annotations'][inde]['name'])
l_set.append(imm.size)
if imm.size == (2560, 1920):s_2560_1920.append(content['annotations'][inde]['name'])
elif imm.size == (928, 576):s_928_576.append(content['annotations'][inde]['name'])
elif imm.size == (1024, 768):s_1024_768.append(content['annotations'][inde]['name'])
elif imm.size == (640, 480):s_640_480.append(content['annotations'][inde]['name'])
elif imm.size == (2048, 2048):s_2048_2048.append(content['annotations'][inde]['name'])
elif imm.size == (1080, 1618):s_1080_1618.append(content['annotations'][inde]['name'])
elif imm.size == (1920, 1080):s_1920_1080.append(content['annotations'][inde]['name'])
elif imm.size == (1440, 1080):s_1440_1080.append(content['annotations'][inde]['name'])
elif imm.size == (1920, 1200):s_1920_1200.append(content['annotations'][inde]['name'])
print(len(l_set))
sett = set(l_set)
print(sett)
print(len(s_2560_1920),len(s_928_576),len(s_1024_768),len(s_640_480),len(s_2048_2048),len(s_1080_1618),len(s_1920_1080),len(s_1440_1080),len(s_1920_1200))
print(s_1440_1080)
print(s_1080_1618)
# print(s_1024_768)
# 统计出所有的,以点为图中每个人标注的样本
point_l = []
for f in range(2000):
if 'w' not in content['annotations'][f]['annotation'][0]:
point_l.append(content['annotations'][f]['name'])
# for p_name in point_l:
# print(p_name)
print(len(point_l))
#如果标注是一个坐标不是区域, 展示其中一幅图像上 是如何使用一个点来标注人的
# name1 = 'train/b179764112252559b76a59db9fa18021.jpg'
name1 = point_l[1]
im1 = Image.open(name1)
for j in range(len(content['annotations'])):
if content['annotations'][j]['name'] == name1:
print('id = ',content['annotations'][j]['id'])
ann1 = content['annotations'][j]['annotation']
# print(ann1)
print('有标注的个数:',len(ann1))
for a in range(len(ann1)):
for x in range(im1.size[0]):
for y in range(im1.size[1]):
if(x > (ann1[a]['x']-10) and x < (ann1[a]['x']+10) and y > ann1[a]['y']-10 and y < (ann1[a]['y']+10)): #取坐标范围正负10的像素
im1.putpixel((x,y),(255,0,0)) #对所取范围的像素变成红色
plt.imshow(im1)
# 上段代码块中的标注的gt
gt = []
for a in range(len(ann1)):
gt.append([ann1[a]['x'],ann1[a]['y']])
print(gt)
gt = np.array(gt)
print(gt.shape)
# 使用高斯滤波变换生成密度图
def gaussian_filter_density(gt):
# Generates a density map using Gaussian filter transformation
# 初始化密度图
density = np.zeros(gt.shape, dtype=np.float32)
# 获取gt中不为0的元素的个数
gt_count = np.count_nonzero(gt)
# 如果gt全为0,就返回全0的密度图
if gt_count == 0:
return density
# FInd out the K nearest neighbours using a KDTree
pts = np.array(list(zip(np.nonzero(gt)[1].ravel(), np.nonzero(gt)[0].ravel())))
# if gt_count > 0 and gt_count < 20:
# leafsize = 2048
# # build kdtree
# tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)
# query kdtree
# distances, locations = tree.query(pts, k=4)
for i, pt in enumerate(pts):
pt2d = np.zeros(gt.shape, dtype=np.float32)
pt2d[pt[1], pt[0]] = 1.
if gt_count > 1:
# sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1
sigma = 25
else:
sigma = np.average(np.array(gt.shape)) / 2. / 2. # case: 1 point
# Convolve with the gaussian filter
density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')
return density
print(gt.shape)
img = plt.imread(name1)
k = np.zeros((img.shape[0], img.shape[1]))
for i in range(0, len(gt)):
if int(gt[i][1]) < img.shape[0] and int(gt[i][0]) < img.shape[1]:
k[int(gt[i][1]), int(gt[i][0])] = 1
# generate density map
k = gaussian_filter_density(k)
# 可视化 密度图
print(k.shape)
groundtruth = np.asarray(k)
# groundtruth = groundtruth.resize((80,60))
print(groundtruth.shape)
plt.imshow(groundtruth,cmap=CM.jet)
print("Sum = " ,np.sum(groundtruth))
# print(groundtruth[0][59:100])
#图片操作
def picture_opt(img,ann):
size_x,size_y = img.size
train_img_size = (640,480)
img = img.resize(train_img_size,Image.ANTIALIAS)
img = np.array(img)
img = img / 255.0
gt = []
for b_l in range(len(ann)):
# 假设人体是使用方框标注的,通过求均值的方法将框变为点
if 'w' in ann[b_l].keys():
x = (ann[b_l]['x']+(ann[b_l]['x']+ann[b_l]['w']))/2
y = ann[b_l]['y']+20
x = (x*640/size_x)/8
y = (y*480/size_y)/8
gt.append((x,y))
else:
x = ann[b_l]['x']
y = ann[b_l]['y']
x = (x*640/size_x)/8
y = (y*480/size_y)/8
gt.append((x,y))
# 返回resize后的图片 和 gt
return img,gt
# 密度图处理
def ground(img, gt):
imgs = img
x = imgs.shape[0] / 8
y = imgs.shape[1] / 8
k = np.zeros((int(x), int(y)))
for i in range(0, len(gt)):
if int(gt[i][1]) < int(x) and int(gt[i][0]) < int(y):
k[int(gt[i][1]), int(gt[i][0])] = 1
# generate density map
k = gaussian_filter_density(k)
return k
#方框变点
qt = []
img = Image.open(content['annotations'][2]['name'])
ann = content['annotations'][2]['annotation']
print(img.size)
temp = img.resize((80, 60),Image.ANTIALIAS)
im,qt = picture_opt(img,ann)
print(im.shape)
print(qt)
for a in range(len(qt)):
for x in range(temp.size[0]):
for y in range(temp.size[1]):
if(x > (qt[a][0]-1) and x < (qt[a][0]+1) and y > qt[a][1]-1 and y < (qt[a][1]+1)): #取坐标范围正负10的像素
temp.putpixel((x,y),(255,0,0)) #对所取范围的像素变成红色
plt.imshow(temp)
k = ground(im,qt)
# 定义数据生成器
def train_set():
def inner():
for ig_index in range(2000): # 遍历所有图片
if len(content['annotations'][ig_index]['annotation']) == 2: continue
if len(content['annotations'][ig_index]['annotation']) == 3: continue
if content['annotations'][ig_index]['name'] == 'train/8538edb45aaf7df78336aa5b49001be6.jpg': continue
if content['annotations'][ig_index]['name'] == 'train/377df0a7a9abc44e840e938521df3b54.jpg': continue
if content['annotations'][ig_index]['ignore_region']: # 把忽略区域都用像素为0填上
ig_list = [] # 存放忽略区1的数据
ig_list1 = [] # 存放忽略区2的数据
# print(content['annotations'][ig_index]['ignore_region'])
if len(content['annotations'][ig_index]['ignore_region']) == 1: # 因为每张图的忽略区域最多2个,这里是为1的情况
# print('ig1',ig_index)
ign_rge = content['annotations'][ig_index]['ignore_region'][0] # 取第一个忽略区的数据
for ig_len in range(len(ign_rge)): # 遍历忽略区坐标个数,组成多少变型
ig_list.append([ign_rge[ig_len]['x'], ign_rge[ig_len]['y']]) # 取出每个坐标的x,y然后组成一个小列表放到ig_list
ig_cv_img = cv2.imread(content['annotations'][ig_index]['name']) # 用cv2读取一张图片
pts = np.array(ig_list, np.int32) # 把ig_list转成numpy.ndarray数据格式,为了填充需要
cv2.fillPoly(ig_cv_img, [pts], (0, 0, 0), cv2.LINE_AA) # 使用cv2.fillPoly方法对有忽略区的图片用像素为0填充
ig_img = Image.fromarray(cv2.cvtColor(ig_cv_img, cv2.COLOR_BGR2RGB)) # cv2转PIL
ann = content['annotations'][ig_index]['annotation'] # 把所有标注的信息读取出来
ig_im, gt = picture_opt(ig_img, ann)
k = ground(ig_im, gt)
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
ig_im = ig_im.transpose().astype('float32')
yield ig_im, groundtruth
if len(content['annotations'][ig_index]['ignore_region']) == 2: # 有2个忽略区域
# print('ig2',ig_index)
ign_rge = content['annotations'][ig_index]['ignore_region'][0]
ign_rge1 = content['annotations'][ig_index]['ignore_region'][1]
for ig_len in range(len(ign_rge)):
ig_list.append([ign_rge[ig_len]['x'], ign_rge[ig_len]['y']])
for ig_len1 in range(len(ign_rge1)):
ig_list1.append([ign_rge1[ig_len1]['x'], ign_rge1[ig_len1]['y']])
ig_cv_img2 = cv2.imread(content['annotations'][ig_index]['name'])
pts = np.array(ig_list, np.int32)
pts1 = np.array(ig_list1, np.int32)
cv2.fillPoly(ig_cv_img2, [pts], (0, 0, 0), cv2.LINE_AA)
cv2.fillPoly(ig_cv_img2, [pts1], (0, 0, 0), cv2.LINE_AA)
ig_img2 = Image.fromarray(cv2.cvtColor(ig_cv_img2, cv2.COLOR_BGR2RGB)) # cv2转PIL
ann = content['annotations'][ig_index]['annotation'] # 把所有标注的信息读取出来
ig_im, gt = picture_opt(ig_img2, ann)
k = ground(ig_im, gt)
k = np.zeros((int(ig_im.shape[0] / 8), int(ig_im.shape[1] / 8)))
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
ig_im = ig_im.transpose().astype('float32')
yield ig_im, groundtruth
else:
# print('else',ig_index,content['annotations'][ig_index]['name'])
img = Image.open(content['annotations'][ig_index]['name'])
ann = content['annotations'][ig_index]['annotation'] # 把所有标注的信息读取出来
im, gt = picture_opt(img, ann)
k = ground(im, gt)
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
im = im.transpose().astype('float32')
yield im, groundtruth
return inner
BATCH_SIZE= 2 #每次取10张
# 设置训练reader
train_reader = paddle.batch(
paddle.reader.shuffle(
train_set(), buf_size=5),
batch_size=BATCH_SIZE)
def crowd_deconv_without_bn(img):
x = img
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=1, act='relu')
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=1, act='relu')
print('3-64-2', x.shape)
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
x = fluid.layers.dropout(x=x, dropout_prob=0.25)
print('pool', x.shape)
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=1, act=None)
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=1, act='relu')
print('3-128-2', x.shape)
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
x = fluid.layers.dropout(x=x, dropout_prob=0.25)
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act='relu')
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act=None)
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act='relu')
print('3-256-3', x.shape)
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
# x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
# x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
# x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1,act='relu' )
# x = fluid.layers.pool2d(input=x, pool_size=3, pool_stride=1, pool_padding=1)
# x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
# x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1)
x = fluid.layers.batch_norm(input=x, act=None)
print('3-512-3', x.shape)
# x = fluid.layers.pool2d(input=x, pool_size=3, pool_stride=2, pool_padding=1)
# x = fluid.layers.dropout(x=x, dropout_prob=0.5)
print('clowd_net output shape:', x.shape)
return x
def dilations_cnn(VGG_16_net):
x = VGG_16_net
print(x.shape)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=1, filter_size=1, act=None)
print(x.shape)
return x
img_size = [3,640,480]
images = fluid.layers.data(name='images',shape=img_size,dtype='float32')
label = fluid.layers.data(name='label',shape=[1,80,60],dtype='float32')
VGG = crowd_deconv_without_bn(images)
predict = dilations_cnn(VGG)
squar = fluid.layers.square_error_cost(input=predict, label=label)
cost = fluid.layers.sqrt(squar, name=None)
print(cost.shape)
avg_cost = fluid.layers.mean(cost)
print(avg_cost.shape)
# 创建优化器optimizer,下面列举了2种常用的优化器,不同类型优化器选一即可
# 创建Momentum优化器,并设置学习率(learning_rate)、动量(momentum)
# optimizer = fluid.optimizer.Momentum(
# learning_rate=0.001,
# momentum=0.8)
optimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-6)
# optimizer = fluid.optimizer.SGD(learning_rate=1e-5)
optimizer.minimize(avg_cost)
print('优化')
startup_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
# test_program = fluid.default_main_program().clone(for_test=True)
#optimized = fluid.transpiler.memory_optimize(input_program=fluid.default_main_program(), print_log=False)
# 设置训练场所
use_cuda = False
# use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# 创建执行器,palce在程序初始化时设定
exe = fluid.Executor(place)
# 初始化执行器
exe.run(startup_program)
feeder = fluid.DataFeeder(feed_list=[images, label],place=place)
#训练保存
model_save_dir = 'renliuyuce_model6'
train_prompt = "Train cost"
cost_ploter = Ploter(train_prompt)
def event_handler_plot(ploter_title, step, cost):
cost_ploter.append(ploter_title, step, cost)
cost_ploter.plot()
# 只训练1个EPOCH,仅仅是跑通流程
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
EPOCH_NUM = 1
# 开始训练
lists = []
step = 0
for epochs in range(EPOCH_NUM):
# 开始训练
for batch_id, train_data in enumerate(train_reader()): # 遍历train_reader的迭代器,并为数据加上索引batch_id
train_cost, sult, lab, vgg = exe.run(program=main_program, # 运行主程序
feed=feeder.feed(train_data), # 喂入一个batch的数据
fetch_list=[avg_cost, predict, label, VGG]) # fetch均方误差和准确率
if step % 10 == 0:
event_handler_plot(train_prompt, step, train_cost[0])
# print(batch_id)
if batch_id % 100 == 0: # 每100次batch打印一次训练、进行一次测试
p = [np.sum(pre) for pre in sult]
l = [np.sum(pre) for pre in lab]
print(p, l, np.sum(sult), np.sum(lab))
print('Pass:%d, Batch:%d, Cost:%0.5f' % (epochs, batch_id, train_cost[0]))
step += 1
# 保存模型
if model_save_dir is not None:
fluid.io.save_inference_model(model_save_dir, ['images'], [predict], exe)
print('训练模型保存完成!')
end = time.time()
print(time.strftime('V100训练用时:%M分%S秒', time.localtime(end - start)))
# 测试图片
import numpy as np
from PIL import Image
import paddle.fluid as fluid
import matplotlib.pyplot as plt
import zipfile
test_zfile = zipfile.ZipFile("data/test_new.zip")
l_test = []
for test_fname in test_zfile.namelist()[1:]:
l_test.append(test_fname)
test_img = Image.open(l_test[0])
plt.imshow(test_img)
test_img = test_img.resize((640, 480))
test_im = np.array(test_img)
test_im = test_im / 255.0
test_im = test_im.transpose().reshape(1, 3, 640, 480).astype('float32')
use = True
place1 = fluid.CUDAPlace(0) if use else fluid.CPUPlace()
# 定义一个executor
infer_exe = fluid.Executor(place1)
inference_scope = fluid.core.Scope() # 要想运行一个网络,需要指明它运行所在的域,确切的说: exe.Run(&scope) 。
model_save_dir = 'renliuyuce_model6'
with fluid.scope_guard(inference_scope):
# 获取训练好的模型
# 从指定目录中加载 推理model(inference model)
[inference_program, # 预测用的program
feed_target_names, # 是一个str列表,它包含需要在推理 Program 中提供数据的变量的名称。
fetch_targets] = fluid.io.load_inference_model(model_save_dir, # fetch_targets:是一个 Variable 列表,从中我们可以得到推断结果。
infer_exe) # infer_exe: 运行 inference model的 executor
results = infer_exe.run(inference_program, # 运行预测程序
feed={feed_target_names[0]: test_im}, # 喂入要预测的img
fetch_list=fetch_targets) # 得到推测结果
result = results[0][0][0]
print(result)
plt.imshow(result, cmap=CM.jet)
print(np.sum(results[0]))
# 测试输出保存CSV,仅测试了100个样本,输出结果每行代表一个样本,分布为标号 样本名称 人流密度
import numpy as np
from PIL import Image
import paddle.fluid as fluid
import matplotlib.pyplot as plt
import zipfile
test_zfile = zipfile.ZipFile("data/data1917/test_new.zip")
l_test = []
for test_fname in test_zfile.namelist()[1:]:
# print(fname)
l_test.append(test_fname)
use = True
place1 = fluid.CUDAPlace(0) if use else fluid.CPUPlace()
infer_exe = fluid.Executor(place1)
inference_scope = fluid.core.Scope()
model_save_dir = 'renliuyuce_model6'
data_dict = {}
with fluid.scope_guard(inference_scope):
[inference_program,
feed_target_names,
fetch_targets] = fluid.io.load_inference_model(model_save_dir, infer_exe)
for index in range(100):
test_img = Image.open(l_test[index])
test_img = test_img.resize((640, 480))
test_im = np.array(test_img)
test_im = test_im / 255.0
test_im = test_im.transpose().reshape(1, 3, 640, 480).astype('float32')
l_test[index] = l_test[index].lstrip('test').lstrip('/')
results = infer_exe.run(inference_program, # 运行预测程序
feed={feed_target_names[0]: test_im}, # 喂入要预测的img
fetch_list=fetch_targets) # 得到推测结果
# print(people)
people = np.sum(results)
print(index, l_test[index], int(people))
data_dict[l_test[index]] = int(people)
import csv
with open('results7.csv', 'w') as csvfile:
fieldnames = ['id', 'predicted']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for k, v in data_dict.items():
writer.writerow({'id': k, 'predicted': v})
| [
"paddle.fluid.layers.sqrt",
"csv.DictWriter",
"paddle.fluid.DataFeeder",
"zipfile.ZipFile",
"scipy.ndimage.filters.gaussian_filter",
"paddle.fluid.layers.data",
"numpy.count_nonzero",
"numpy.array",
"paddle.fluid.Executor",
"matplotlib.pyplot.imshow",
"paddle.utils.plot.Ploter",
"paddle.fluid.... | [((431, 442), 'time.time', 'time.time', ([], {}), '()\n', (440, 442), False, 'import time\n'), ((521, 533), 'json.load', 'json.load', (['f'], {}), '(f)\n', (530, 533), False, 'import json\n'), ((1133, 1170), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""data/train_new.zip"""'], {}), "('data/train_new.zip')\n", (1148, 1170), False, 'import zipfile\n'), ((1303, 1319), 'PIL.Image.open', 'Image.open', (['name'], {}), '(name)\n', (1313, 1319), False, 'from PIL import Image\n'), ((1320, 1334), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (1330, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1785), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_img'], {}), '(new_img)\n', (1776, 1785), True, 'import matplotlib.pyplot as plt\n'), ((2877, 2891), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (2887, 2891), True, 'import matplotlib.pyplot as plt\n'), ((4808, 4825), 'PIL.Image.open', 'Image.open', (['name1'], {}), '(name1)\n', (4818, 4825), False, 'from PIL import Image\n'), ((5370, 5385), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im1'], {}), '(im1)\n', (5380, 5385), True, 'import matplotlib.pyplot as plt\n'), ((5496, 5508), 'numpy.array', 'np.array', (['gt'], {}), '(gt)\n', (5504, 5508), True, 'import numpy as np\n'), ((6590, 6607), 'matplotlib.pyplot.imread', 'plt.imread', (['name1'], {}), '(name1)\n', (6600, 6607), True, 'import matplotlib.pyplot as plt\n'), ((6612, 6650), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {}), '((img.shape[0], img.shape[1]))\n', (6620, 6650), True, 'import numpy as np\n'), ((6881, 6894), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (6891, 6894), True, 'import numpy as np\n'), ((6964, 7000), 'matplotlib.pyplot.imshow', 'plt.imshow', (['groundtruth'], {'cmap': 'CM.jet'}), '(groundtruth, cmap=CM.jet)\n', (6974, 7000), True, 'import matplotlib.pyplot as plt\n'), ((8116, 8161), 'PIL.Image.open', 'Image.open', (["content['annotations'][2]['name']"], {}), "(content['annotations'][2]['name'])\n", (8126, 8161), False, 'from PIL import Image\n'), ((8601, 8617), 'matplotlib.pyplot.imshow', 'plt.imshow', (['temp'], {}), '(temp)\n', (8611, 8617), True, 'import matplotlib.pyplot as plt\n'), ((15537, 15602), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""images"""', 'shape': 'img_size', 'dtype': '"""float32"""'}), "(name='images', shape=img_size, dtype='float32')\n", (15554, 15602), True, 'import paddle.fluid as fluid\n'), ((15609, 15676), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""label"""', 'shape': '[1, 80, 60]', 'dtype': '"""float32"""'}), "(name='label', shape=[1, 80, 60], dtype='float32')\n", (15626, 15676), True, 'import paddle.fluid as fluid\n'), ((15750, 15808), 'paddle.fluid.layers.square_error_cost', 'fluid.layers.square_error_cost', ([], {'input': 'predict', 'label': 'label'}), '(input=predict, label=label)\n', (15780, 15808), True, 'import paddle.fluid as fluid\n'), ((15816, 15851), 'paddle.fluid.layers.sqrt', 'fluid.layers.sqrt', (['squar'], {'name': 'None'}), '(squar, name=None)\n', (15833, 15851), True, 'import paddle.fluid as fluid\n'), ((15881, 15904), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['cost'], {}), '(cost)\n', (15898, 15904), True, 'import paddle.fluid as fluid\n'), ((16122, 16172), 'paddle.fluid.optimizer.AdamOptimizer', 'fluid.optimizer.AdamOptimizer', ([], {'learning_rate': '(1e-06)'}), '(learning_rate=1e-06)\n', (16151, 16172), True, 'import paddle.fluid as fluid\n'), ((16287, 16318), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (16316, 16318), True, 'import paddle.fluid as fluid\n'), ((16334, 16362), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (16360, 16362), True, 'import paddle.fluid as fluid\n'), ((16673, 16694), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (16687, 16694), True, 'import paddle.fluid as fluid\n'), ((16739, 16795), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'feed_list': '[images, label]', 'place': 'place'}), '(feed_list=[images, label], place=place)\n', (16755, 16795), True, 'import paddle.fluid as fluid\n'), ((16882, 16902), 'paddle.utils.plot.Ploter', 'Ploter', (['train_prompt'], {}), '(train_prompt)\n', (16888, 16902), False, 'from paddle.utils.plot import Ploter\n'), ((18063, 18074), 'time.time', 'time.time', ([], {}), '()\n', (18072, 18074), False, 'import time\n'), ((18284, 18320), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""data/test_new.zip"""'], {}), "('data/test_new.zip')\n", (18299, 18320), False, 'import zipfile\n'), ((18417, 18438), 'PIL.Image.open', 'Image.open', (['l_test[0]'], {}), '(l_test[0])\n', (18427, 18438), False, 'from PIL import Image\n'), ((18440, 18460), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_img'], {}), '(test_img)\n', (18450, 18460), True, 'import matplotlib.pyplot as plt\n'), ((18510, 18528), 'numpy.array', 'np.array', (['test_img'], {}), '(test_img)\n', (18518, 18528), True, 'import numpy as np\n'), ((18723, 18745), 'paddle.fluid.Executor', 'fluid.Executor', (['place1'], {}), '(place1)\n', (18737, 18745), True, 'import paddle.fluid as fluid\n'), ((18765, 18783), 'paddle.fluid.core.Scope', 'fluid.core.Scope', ([], {}), '()\n', (18781, 18783), True, 'import paddle.fluid as fluid\n'), ((19756, 19801), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""data/data1917/test_new.zip"""'], {}), "('data/data1917/test_new.zip')\n", (19771, 19801), False, 'import zipfile\n'), ((19983, 20005), 'paddle.fluid.Executor', 'fluid.Executor', (['place1'], {}), '(place1)\n', (19997, 20005), True, 'import paddle.fluid as fluid\n'), ((20025, 20043), 'paddle.fluid.core.Scope', 'fluid.core.Scope', ([], {}), '()\n', (20041, 20043), True, 'import paddle.fluid as fluid\n'), ((3297, 3345), 'PIL.Image.open', 'Image.open', (["content['annotations'][inde]['name']"], {}), "(content['annotations'][inde]['name'])\n", (3307, 3345), False, 'from PIL import Image\n'), ((5661, 5697), 'numpy.zeros', 'np.zeros', (['gt.shape'], {'dtype': 'np.float32'}), '(gt.shape, dtype=np.float32)\n', (5669, 5697), True, 'import numpy as np\n'), ((5729, 5749), 'numpy.count_nonzero', 'np.count_nonzero', (['gt'], {}), '(gt)\n', (5745, 5749), True, 'import numpy as np\n'), ((7016, 7035), 'numpy.sum', 'np.sum', (['groundtruth'], {}), '(groundtruth)\n', (7022, 7035), True, 'import numpy as np\n'), ((7226, 7239), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (7234, 7239), True, 'import numpy as np\n'), ((12150, 12237), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(64)', 'filter_size': '(3)', 'padding': '(1)', 'act': '"""relu"""'}), "(input=x, num_filters=64, filter_size=3, padding=1, act=\n 'relu')\n", (12169, 12237), True, 'import paddle.fluid as fluid\n'), ((12238, 12282), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'x', 'act': '"""relu"""'}), "(input=x, act='relu')\n", (12261, 12282), True, 'import paddle.fluid as fluid\n'), ((12288, 12375), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(64)', 'filter_size': '(3)', 'padding': '(1)', 'act': '"""relu"""'}), "(input=x, num_filters=64, filter_size=3, padding=1, act=\n 'relu')\n", (12307, 12375), True, 'import paddle.fluid as fluid\n'), ((12402, 12458), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', ([], {'input': 'x', 'pool_size': '(2)', 'pool_stride': '(2)'}), '(input=x, pool_size=2, pool_stride=2)\n', (12421, 12458), True, 'import paddle.fluid as fluid\n'), ((12464, 12508), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.25)'}), '(x=x, dropout_prob=0.25)\n', (12484, 12508), True, 'import paddle.fluid as fluid\n'), ((12539, 12625), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(128)', 'filter_size': '(3)', 'padding': '(1)', 'act': 'None'}), '(input=x, num_filters=128, filter_size=3, padding=1, act\n =None)\n', (12558, 12625), True, 'import paddle.fluid as fluid\n'), ((12626, 12670), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'x', 'act': '"""relu"""'}), "(input=x, act='relu')\n", (12649, 12670), True, 'import paddle.fluid as fluid\n'), ((12676, 12764), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(128)', 'filter_size': '(3)', 'padding': '(1)', 'act': '"""relu"""'}), "(input=x, num_filters=128, filter_size=3, padding=1, act\n ='relu')\n", (12695, 12764), True, 'import paddle.fluid as fluid\n'), ((12792, 12848), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', ([], {'input': 'x', 'pool_size': '(2)', 'pool_stride': '(2)'}), '(input=x, pool_size=2, pool_stride=2)\n', (12811, 12848), True, 'import paddle.fluid as fluid\n'), ((12854, 12898), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.25)'}), '(x=x, dropout_prob=0.25)\n', (12874, 12898), True, 'import paddle.fluid as fluid\n'), ((12905, 12993), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(256)', 'filter_size': '(3)', 'padding': '(1)', 'act': '"""relu"""'}), "(input=x, num_filters=256, filter_size=3, padding=1, act\n ='relu')\n", (12924, 12993), True, 'import paddle.fluid as fluid\n'), ((12994, 13038), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'x', 'act': '"""relu"""'}), "(input=x, act='relu')\n", (13017, 13038), True, 'import paddle.fluid as fluid\n'), ((13044, 13130), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(256)', 'filter_size': '(3)', 'padding': '(1)', 'act': 'None'}), '(input=x, num_filters=256, filter_size=3, padding=1, act\n =None)\n', (13063, 13130), True, 'import paddle.fluid as fluid\n'), ((13131, 13175), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'x', 'act': '"""relu"""'}), "(input=x, act='relu')\n", (13154, 13175), True, 'import paddle.fluid as fluid\n'), ((13181, 13269), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(256)', 'filter_size': '(3)', 'padding': '(1)', 'act': '"""relu"""'}), "(input=x, num_filters=256, filter_size=3, padding=1, act\n ='relu')\n", (13200, 13269), True, 'import paddle.fluid as fluid\n'), ((13297, 13353), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', ([], {'input': 'x', 'pool_size': '(2)', 'pool_stride': '(2)'}), '(input=x, pool_size=2, pool_stride=2)\n', (13316, 13353), True, 'import paddle.fluid as fluid\n'), ((13359, 13402), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (13379, 13402), True, 'import paddle.fluid as fluid\n'), ((13879, 13967), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(512)', 'filter_size': '(3)', 'padding': '(1)', 'act': '"""relu"""'}), "(input=x, num_filters=512, filter_size=3, padding=1, act\n ='relu')\n", (13898, 13967), True, 'import paddle.fluid as fluid\n'), ((13968, 14011), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (13988, 14011), True, 'import paddle.fluid as fluid\n'), ((14017, 14105), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(512)', 'filter_size': '(3)', 'padding': '(1)', 'act': '"""relu"""'}), "(input=x, num_filters=512, filter_size=3, padding=1, act\n ='relu')\n", (14036, 14105), True, 'import paddle.fluid as fluid\n'), ((14106, 14149), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (14126, 14149), True, 'import paddle.fluid as fluid\n'), ((14155, 14226), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(512)', 'filter_size': '(3)', 'padding': '(1)'}), '(input=x, num_filters=512, filter_size=3, padding=1)\n', (14174, 14226), True, 'import paddle.fluid as fluid\n'), ((14232, 14274), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'x', 'act': 'None'}), '(input=x, act=None)\n', (14255, 14274), True, 'import paddle.fluid as fluid\n'), ((14557, 14656), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(512)', 'filter_size': '(3)', 'padding': '(2)', 'dilation': '(2)', 'act': '"""relu"""'}), "(input=x, num_filters=512, filter_size=3, padding=2,\n dilation=2, act='relu')\n", (14576, 14656), True, 'import paddle.fluid as fluid\n'), ((14658, 14701), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (14678, 14701), True, 'import paddle.fluid as fluid\n'), ((14707, 14806), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(512)', 'filter_size': '(3)', 'padding': '(2)', 'dilation': '(2)', 'act': '"""relu"""'}), "(input=x, num_filters=512, filter_size=3, padding=2,\n dilation=2, act='relu')\n", (14726, 14806), True, 'import paddle.fluid as fluid\n'), ((14808, 14851), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (14828, 14851), True, 'import paddle.fluid as fluid\n'), ((14857, 14956), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(512)', 'filter_size': '(3)', 'padding': '(2)', 'dilation': '(2)', 'act': '"""relu"""'}), "(input=x, num_filters=512, filter_size=3, padding=2,\n dilation=2, act='relu')\n", (14876, 14956), True, 'import paddle.fluid as fluid\n'), ((14958, 15001), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (14978, 15001), True, 'import paddle.fluid as fluid\n'), ((15007, 15106), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(256)', 'filter_size': '(3)', 'padding': '(2)', 'dilation': '(2)', 'act': '"""relu"""'}), "(input=x, num_filters=256, filter_size=3, padding=2,\n dilation=2, act='relu')\n", (15026, 15106), True, 'import paddle.fluid as fluid\n'), ((15108, 15151), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (15128, 15151), True, 'import paddle.fluid as fluid\n'), ((15157, 15256), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(128)', 'filter_size': '(3)', 'padding': '(2)', 'dilation': '(2)', 'act': '"""relu"""'}), "(input=x, num_filters=128, filter_size=3, padding=2,\n dilation=2, act='relu')\n", (15176, 15256), True, 'import paddle.fluid as fluid\n'), ((15258, 15301), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', ([], {'x': 'x', 'dropout_prob': '(0.5)'}), '(x=x, dropout_prob=0.5)\n', (15278, 15301), True, 'import paddle.fluid as fluid\n'), ((15307, 15405), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(64)', 'filter_size': '(3)', 'padding': '(2)', 'dilation': '(2)', 'act': '"""relu"""'}), "(input=x, num_filters=64, filter_size=3, padding=2,\n dilation=2, act='relu')\n", (15326, 15405), True, 'import paddle.fluid as fluid\n'), ((15408, 15476), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'x', 'num_filters': '(1)', 'filter_size': '(1)', 'act': 'None'}), '(input=x, num_filters=1, filter_size=1, act=None)\n', (15427, 15476), True, 'import paddle.fluid as fluid\n'), ((16591, 16609), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (16606, 16609), True, 'import paddle.fluid as fluid\n'), ((16627, 16643), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (16641, 16643), True, 'import paddle.fluid as fluid\n'), ((18648, 18666), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (18663, 18666), True, 'import paddle.fluid as fluid\n'), ((18679, 18695), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (18693, 18695), True, 'import paddle.fluid as fluid\n'), ((18876, 18910), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['inference_scope'], {}), '(inference_scope)\n', (18893, 18910), True, 'import paddle.fluid as fluid\n'), ((19079, 19135), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', (['model_save_dir', 'infer_exe'], {}), '(model_save_dir, infer_exe)\n', (19108, 19135), True, 'import paddle.fluid as fluid\n'), ((19511, 19542), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {'cmap': 'CM.jet'}), '(result, cmap=CM.jet)\n', (19521, 19542), True, 'import matplotlib.pyplot as plt\n'), ((19923, 19941), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (19938, 19941), True, 'import paddle.fluid as fluid\n'), ((19954, 19970), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (19968, 19970), True, 'import paddle.fluid as fluid\n'), ((20103, 20137), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['inference_scope'], {}), '(inference_scope)\n', (20120, 20137), True, 'import paddle.fluid as fluid\n'), ((20200, 20256), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', (['model_save_dir', 'infer_exe'], {}), '(model_save_dir, infer_exe)\n', (20229, 20256), True, 'import paddle.fluid as fluid\n'), ((20982, 21028), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (20996, 21028), False, 'import csv\n'), ((6187, 6223), 'numpy.zeros', 'np.zeros', (['gt.shape'], {'dtype': 'np.float32'}), '(gt.shape, dtype=np.float32)\n', (6195, 6223), True, 'import numpy as np\n'), ((6480, 6547), 'scipy.ndimage.filters.gaussian_filter', 'scipy.ndimage.filters.gaussian_filter', (['pt2d', 'sigma'], {'mode': '"""constant"""'}), "(pt2d, sigma, mode='constant')\n", (6517, 6547), False, 'import scipy\n'), ((17962, 18035), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', (['model_save_dir', "['images']", '[predict]', 'exe'], {}), "(model_save_dir, ['images'], [predict], exe)\n", (17991, 18035), True, 'import paddle.fluid as fluid\n'), ((18129, 18156), 'time.localtime', 'time.localtime', (['(end - start)'], {}), '(end - start)\n', (18143, 18156), False, 'import time\n'), ((19551, 19569), 'numpy.sum', 'np.sum', (['results[0]'], {}), '(results[0])\n', (19557, 19569), True, 'import numpy as np\n'), ((20297, 20322), 'PIL.Image.open', 'Image.open', (['l_test[index]'], {}), '(l_test[index])\n', (20307, 20322), False, 'from PIL import Image\n'), ((20376, 20394), 'numpy.array', 'np.array', (['test_img'], {}), '(test_img)\n', (20384, 20394), True, 'import numpy as np\n'), ((20781, 20796), 'numpy.sum', 'np.sum', (['results'], {}), '(results)\n', (20787, 20796), True, 'import numpy as np\n'), ((11589, 11641), 'PIL.Image.open', 'Image.open', (["content['annotations'][ig_index]['name']"], {}), "(content['annotations'][ig_index]['name'])\n", (11599, 11641), False, 'from PIL import Image\n'), ((11793, 11806), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (11803, 11806), True, 'import numpy as np\n'), ((17717, 17728), 'numpy.sum', 'np.sum', (['pre'], {}), '(pre)\n', (17723, 17728), True, 'import numpy as np\n'), ((17755, 17766), 'numpy.sum', 'np.sum', (['pre'], {}), '(pre)\n', (17761, 17766), True, 'import numpy as np\n'), ((17799, 17811), 'numpy.sum', 'np.sum', (['sult'], {}), '(sult)\n', (17805, 17811), True, 'import numpy as np\n'), ((17813, 17824), 'numpy.sum', 'np.sum', (['lab'], {}), '(lab)\n', (17819, 17824), True, 'import numpy as np\n'), ((9656, 9708), 'cv2.imread', 'cv2.imread', (["content['annotations'][ig_index]['name']"], {}), "(content['annotations'][ig_index]['name'])\n", (9666, 9708), False, 'import cv2\n'), ((9734, 9761), 'numpy.array', 'np.array', (['ig_list', 'np.int32'], {}), '(ig_list, np.int32)\n', (9742, 9761), True, 'import numpy as np\n'), ((9805, 9859), 'cv2.fillPoly', 'cv2.fillPoly', (['ig_cv_img', '[pts]', '(0, 0, 0)', 'cv2.LINE_AA'], {}), '(ig_cv_img, [pts], (0, 0, 0), cv2.LINE_AA)\n', (9817, 9859), False, 'import cv2\n'), ((10146, 10159), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (10156, 10159), True, 'import numpy as np\n'), ((10775, 10827), 'cv2.imread', 'cv2.imread', (["content['annotations'][ig_index]['name']"], {}), "(content['annotations'][ig_index]['name'])\n", (10785, 10827), False, 'import cv2\n'), ((10839, 10866), 'numpy.array', 'np.array', (['ig_list', 'np.int32'], {}), '(ig_list, np.int32)\n', (10847, 10866), True, 'import numpy as np\n'), ((10879, 10907), 'numpy.array', 'np.array', (['ig_list1', 'np.int32'], {}), '(ig_list1, np.int32)\n', (10887, 10907), True, 'import numpy as np\n'), ((10913, 10968), 'cv2.fillPoly', 'cv2.fillPoly', (['ig_cv_img2', '[pts]', '(0, 0, 0)', 'cv2.LINE_AA'], {}), '(ig_cv_img2, [pts], (0, 0, 0), cv2.LINE_AA)\n', (10925, 10968), False, 'import cv2\n'), ((10974, 11030), 'cv2.fillPoly', 'cv2.fillPoly', (['ig_cv_img2', '[pts1]', '(0, 0, 0)', 'cv2.LINE_AA'], {}), '(ig_cv_img2, [pts1], (0, 0, 0), cv2.LINE_AA)\n', (10986, 11030), False, 'import cv2\n'), ((11355, 11368), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (11365, 11368), True, 'import numpy as np\n'), ((6380, 6398), 'numpy.array', 'np.array', (['gt.shape'], {}), '(gt.shape)\n', (6388, 6398), True, 'import numpy as np\n'), ((9926, 9968), 'cv2.cvtColor', 'cv2.cvtColor', (['ig_cv_img', 'cv2.COLOR_BGR2RGB'], {}), '(ig_cv_img, cv2.COLOR_BGR2RGB)\n', (9938, 9968), False, 'import cv2\n'), ((11063, 11106), 'cv2.cvtColor', 'cv2.cvtColor', (['ig_cv_img2', 'cv2.COLOR_BGR2RGB'], {}), '(ig_cv_img2, cv2.COLOR_BGR2RGB)\n', (11075, 11106), False, 'import cv2\n'), ((5887, 5901), 'numpy.nonzero', 'np.nonzero', (['gt'], {}), '(gt)\n', (5897, 5901), True, 'import numpy as np\n'), ((5914, 5928), 'numpy.nonzero', 'np.nonzero', (['gt'], {}), '(gt)\n', (5924, 5928), True, 'import numpy as np\n')] |
'''
FFT curves submodule for the SLab project
It requires and imports slab.py
History:
Version 1.0 : First version (7/4/2017)
Version 1.1 : Compatibility with Python 3.x (1/3/2018)
'''
from __future__ import print_function
import slab
import slab_ac as ac
import numpy as np # Numpy for math calculations
import pylab as pl # Pylab and Mathplotlib for plotting
import matplotlib.pyplot as plt
import math # Math module
import numbers # Numbers module
# Version information
version_major = 1
version_minor = 0
version_date = "7/4/2017"
###################### INFO FOR THE HELP FILE ##########################
'''
@fft@
FFT Submodule command topics:
ftransform
distortion
'''
###################### FFT COMMANDS ##################
'''
@ftransform@
ftransform(signal,time,ts)
Transforms from time to frequency domain
Uses the FFT of a signal but:
1) Only positive frequencies are provided
2) Factor 2/N applied except for DC that use 1/N
Parameters:
signal : Signal to transform
time : Time vector
ts : Sample time
If neither time nor ts is provided, the command
will use the current sample time
Returns a tuple with:
Complex amplitude vector
Frequency vector
Included in slab_fft.py
'''
def ftransform(signal,time=[],ts=-1):
if time != []:
ts = time[1]-time[0]
elif ts == -1:
ts = sampleTime
data = np.fft.fft(signal)
N = len(data)
FI = 1/(ts*N)
rv = []
fv = []
for i in range(0,N//2):
if i == 0:
rv.append(data[i]/N)
else:
rv.append(2*data[i]/N)
fv.append(i*FI)
return rv,fv
'''
@distortion@
distortion(v1,v2,freq,show)
Generates sine wave at DAC1
Reads circuit output adt ADC1
Calculates four values related to distortion
Noise floor limits measurements
Required parameters:
v1 : Minimum value of sine
v2 : Maximum value of sine
freq : Sine frequency
Optional parameters:
show : Select if plots and text are shown
(Defaults to True)
Returs a four element tuple:
1) THD (%)
2) THD+ N (%)
3) 2nd Harmonic (dBc)
4) 3rd Harmonic (dBc)
Included in slab_fft.py
'''
def distortion(v1,v2,freq,show=True):
points = int(slab.maxSFfresponse/freq)
if points > 100:
points = 100
if points < 50:
raise slab.SlabEx("Frequency too high")
cycles = 10
slab.waveCosine(v1,v2,points)
slab.setWaveFrequency(freq)
slab.tranStore(cycles*points)
t,s = slab.singleWaveResponse()
if show:
slab.plot11(t,s,"Time plot","time(s)","ADC1(V)")
c,f = ftransform(s,t)
if show:
ac.plotFreq(f,c)
# THD
base = np.abs(c[cycles])
tot = 0
for i in range(2,7):
tot = tot + np.abs(c[i*cycles])*np.abs(c[i*cycles])
tot = np.sqrt(tot)
print("tot: " +str(tot))
thd = 100.0 * tot/base
# THD+N
rms_total = std(s)
rms_signal = base/np.sqrt(2.0)
rms_no_signal = np.sqrt(rms_total*rms_total - rms_signal*rms_signal)
thdn = 100.0 * rms_no_signal/rms_signal
# Harmonic Distortion 2nd
h2 = dB(np.abs(c[2*cycles])/base)
# Harmonic Distortion 3rd
h3 = dB(np.abs(c[3*cycles])/base)
if show:
print()
print("THD : " + str(thd) + " %")
print("THD+N : " + str(thdn) + " %")
print("Harmonic distortion 2nd : " + str(h2) + " dBc")
print("Harmonic distortion 3rd : " + str(h3) + " dBc")
print()
return thd,thdn,h2,h3
################## CODE EXECUTED AT IMPORT ####################
# Show version information upon load
slab.message(1,"SLab FFT Submodule")
slab.message(1,"Version "+str(version_major)+"."+str(version_minor)+" ("+version_date+")")
slab.message(1,"")
| [
"slab.singleWaveResponse",
"numpy.abs",
"numpy.sqrt",
"slab.tranStore",
"numpy.fft.fft",
"slab.message",
"slab_ac.plotFreq",
"slab.plot11",
"slab.setWaveFrequency",
"slab.waveCosine",
"slab.SlabEx"
] | [((3856, 3893), 'slab.message', 'slab.message', (['(1)', '"""SLab FFT Submodule"""'], {}), "(1, 'SLab FFT Submodule')\n", (3868, 3893), False, 'import slab\n'), ((3986, 4005), 'slab.message', 'slab.message', (['(1)', '""""""'], {}), "(1, '')\n", (3998, 4005), False, 'import slab\n'), ((1501, 1519), 'numpy.fft.fft', 'np.fft.fft', (['signal'], {}), '(signal)\n', (1511, 1519), True, 'import numpy as np\n'), ((2582, 2613), 'slab.waveCosine', 'slab.waveCosine', (['v1', 'v2', 'points'], {}), '(v1, v2, points)\n', (2597, 2613), False, 'import slab\n'), ((2617, 2644), 'slab.setWaveFrequency', 'slab.setWaveFrequency', (['freq'], {}), '(freq)\n', (2638, 2644), False, 'import slab\n'), ((2650, 2681), 'slab.tranStore', 'slab.tranStore', (['(cycles * points)'], {}), '(cycles * points)\n', (2664, 2681), False, 'import slab\n'), ((2691, 2716), 'slab.singleWaveResponse', 'slab.singleWaveResponse', ([], {}), '()\n', (2714, 2716), False, 'import slab\n'), ((2886, 2903), 'numpy.abs', 'np.abs', (['c[cycles]'], {}), '(c[cycles])\n', (2892, 2903), True, 'import numpy as np\n'), ((3017, 3029), 'numpy.sqrt', 'np.sqrt', (['tot'], {}), '(tot)\n', (3024, 3029), True, 'import numpy as np\n'), ((3190, 3246), 'numpy.sqrt', 'np.sqrt', (['(rms_total * rms_total - rms_signal * rms_signal)'], {}), '(rms_total * rms_total - rms_signal * rms_signal)\n', (3197, 3246), True, 'import numpy as np\n'), ((2512, 2545), 'slab.SlabEx', 'slab.SlabEx', (['"""Frequency too high"""'], {}), "('Frequency too high')\n", (2523, 2545), False, 'import slab\n'), ((2740, 2792), 'slab.plot11', 'slab.plot11', (['t', 's', '"""Time plot"""', '"""time(s)"""', '"""ADC1(V)"""'], {}), "(t, s, 'Time plot', 'time(s)', 'ADC1(V)')\n", (2751, 2792), False, 'import slab\n'), ((2840, 2857), 'slab_ac.plotFreq', 'ac.plotFreq', (['f', 'c'], {}), '(f, c)\n', (2851, 2857), True, 'import slab_ac as ac\n'), ((3156, 3168), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (3163, 3168), True, 'import numpy as np\n'), ((3334, 3355), 'numpy.abs', 'np.abs', (['c[2 * cycles]'], {}), '(c[2 * cycles])\n', (3340, 3355), True, 'import numpy as np\n'), ((3410, 3431), 'numpy.abs', 'np.abs', (['c[3 * cycles]'], {}), '(c[3 * cycles])\n', (3416, 3431), True, 'import numpy as np\n'), ((2965, 2986), 'numpy.abs', 'np.abs', (['c[i * cycles]'], {}), '(c[i * cycles])\n', (2971, 2986), True, 'import numpy as np\n'), ((2985, 3006), 'numpy.abs', 'np.abs', (['c[i * cycles]'], {}), '(c[i * cycles])\n', (2991, 3006), True, 'import numpy as np\n')] |
import numpy as np
import numba as nb
from dataclasses import dataclass
from numba import types
from numba.typed import Dict
from numba import njit
import pandas as pd
import time
import datetime
import csv
from openpyxl import load_workbook
from pyModbusTCP.client import ModbusClient
from pyModbusTCP import utils
@dataclass
class Data:
Ppv: np.array
Pbat: np.array
Pperi: np.array
soc: np.array
soc0: int
Pbs0: int
E: dict
class BatModDC(object):
"""Performance Simulation Class for DC-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pr: Residual power for battery charging
:type Pr: numpy array
:param Prpv: AC residual power
:type Pr: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param ppv2ac: Normalized AC output power of the PV2AC conversion pathway to cover the AC power demand
:type ppv2ac: numpy array
:param Ppv2ac_out: Target AC output power of the PV2AC conversion pathway
:type Ppv2ac_out: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = 0.1
def __init__(self, parameter, d, ppv, pl, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.dt = dt
self.th = False # Start threshold for the recharging of the battery
self.spi = float()
# Initialization and preallocation
self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.ppv2ac, self.Real.Ppv2ac_out = max_self_consumption(parameter, ppv, pl, pvmod=True)
self.Real.Ppv2ac_out0 = 0
self.Real.Ppv2bat_in0 = 0
self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery
self.Real.soc0 = 0 # State of charge of the battery in the first time step
# Input power of the PV2BAT conversion pathway in W
self.Real.Ppv2bat_in = np.zeros_like(self.ppv)
# Output power of the BAT2AC conversion pathway in W
self.Real.Pbat2ac_out = np.zeros_like(self.ppv)
self.Real.Pbat2ac_out0 = 0
# AC power of the PV-battery system in W
self.Real.Ppvbs = np.zeros_like(self.ppv)
# Additional power consumption of other system components (e.g. AC power meter) in W
self.Real.Pperi = np.ones(self.ppv.size) * self.parameter['P_PERI_AC']
self.Ideal.Ppv = np.maximum(0, self.ppv) * self.parameter['P_PV'] * 1000
self.Ideal.Pr = self.Ideal.Ppv - self.pl
self.Ideal.Pbat = np.zeros_like(self.ppv)
self.Ideal.soc = np.zeros_like(self.ppv)
self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ideal.Pbat2ac_out = np.zeros_like(self.ppv)
self.Ideal.Ppvbs = np.zeros_like(self.ppv)
@dataclass
class Real(Data):
Pr : np.array
Prpv : np.array
ppv2ac : np.array
Ppv2ac_out : np.array
Ppv2ac_out0 : int
Ppv2bat_in : np.array
Pbat2ac_out : np.array
Ppvbs : np.array
@dataclass
class Ideal(Real):
def __init__(self):
super().__init__()
def simulation(self, pvmod=True):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.Real.Ppv2ac_out, self.Real.Ppv2bat_in, self.Real.Ppv2bat_in0, self.Real.Pbat2ac_out, self.Real.Pbat2ac_out0, self.Real.Ppvbs, self.Real.Pbat, self.Real.soc, self.Real.soc0 = batmod_dc(
self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.Ppv2bat_in0, self.Real.Ppv2bat_in,
self.Real.Pbat2ac_out0, self.Real.Pbat2ac_out, self.Real.Ppv2ac_out, self.Real.Ppvbs, self.Real.Pbat)
self.Ideal.Pbat, self.Ideal.soc, self.Ideal.soc0 = batmod_dc_ideal(self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)
# Define missing parameters
self.Real.Ppv2ac = self.Real.Ppv2ac_out # AC output power of the PV2AC conversion pathway
self.Real.Ppv2bat = self.Real.Ppv2bat_in # DC input power of the PV2BAT conversion pathway
self.Ideal.Ppvbs = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) - (np.minimum(0, self.Ideal.Pbat)) # Realized AC power of the PV-battery system
self.Ideal.Ppv2ac = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) # AC output power of the PV2AC conversion pathway
self.Ideal.Ppv2bat = np.maximum(0, self.Ideal.Pbat) # DC input power of the PV2BAT conversion pathway
print()
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.Real.E = bat_res_mod(self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat,
self.dt, self.Real.Ppv2ac, self.Real.Ppv2bat, self.Real.Ppvbs, self.Real.Pperi)
self.Ideal.E = bat_res_mod_ideal(self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat,
self.dt, self.Ideal.Ppv2ac, self.Ideal.Ppv2bat, self.Ideal.Ppvbs, self.Ideal.Pperi)
def calculate_spi(self):
self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.Real.E, self.Ideal.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
def get_SPI(self):
return self.spi
class BatModAC(object):
"""Performance Simulation Class for AC-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pr: AC residual power
:type Pr: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param Ppvs: AC power output of the PV inverter taking into account the conversion losses and maximum output power of the PV inverter
:type Ppvs: numpy array
:param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W
:type Pperi: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = '0.1'
def __init__(self, parameter, d, ppv, pl, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.dt = dt
self.spi = float()
self.th = False # Start threshold for the recharging of the battery
# Initialization and preallocation
self.Real.Pr, self.Real.Ppv, self.Real.Ppvs, self.Real.Pperi = max_self_consumption(parameter, ppv, pl, pvmod=True)
self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.Real.Pbs = np.zeros_like(self.ppv) # AC power of the battery system in W
self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery
self.Real.soc0 = 0 # State of charge of the battery in the first time step
self.Real.Pbs0 = 0 # State of the battery storage in the previous time step
self.Ideal.Ppv = np.maximum(0, ppv) * parameter['P_PV'] * 1000
self.Ideal.Pr = self.Ideal.Ppv - pl
self.Ideal.Pbat = np.zeros_like(self.ppv)
self.Ideal.Pbs = np.zeros_like(self.ppv)
self.Ideal.Pbs0 = 0
self.Ideal.soc = np.zeros_like(self.ppv)
self.Ideal.soc0 = 0
self.Ideal.Ppvs = self.Ideal.Ppv
self.Ideal.Pperi = np.zeros_like(self.ppv)
@dataclass
class Real(Data):
Pr : np.array
Ppvs : np.array
Pbs : np.array
@dataclass
class Ideal(Real):
def __init__(self):
super().__init__()
def simulation(self):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.Real.Pbat, self.Real.Pbs, self.Real.soc, self.Real.soc0, self.Real.Pbs0 = batmod_ac(
self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Pbs0, self.Real.Pbs, self.Real.Pbat)
self.Ideal.Pbs, self.Ideal.Pbat, self.Ideal.soc0, self.Ideal.soc = batmod_ac_ideal(
self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.Real.E = bat_res_mod(
self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat, self.dt, self.Real.Ppvs, self.Real.Pbs, self.Real.Pperi)
self.Ideal.E = bat_res_mod_ideal(
self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat, self.dt, self.Ideal.Ppvs, self.Ideal.Pbs, self.Ideal.Pperi)
def calculate_spi(self):
self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.Real.E, self.Ideal.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
def get_Pbs(self):
"""Returns the AC power of the battery system in W
:return: AC power of the battery system in W
:rtype: numpy array
"""
return self.Pbs
def get_SPI(self):
return self.spi
class BatModPV(object):
"""Performance Simulation Class for PV-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pac: Power demand on the AC side
:type Pac: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W
:type Pperi: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = '0.1'
def __init__(self, parameter, d, ppv, pl, Pac, Ppv, Pperi, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.Pac = Pac
self.Ppv = Ppv
self.Pperi = Pperi
self.dt = dt
# Initialization and preallocation
self.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.soc = np.zeros_like(self.ppv) # State of charge of the battery
# Output power of the PV2AC conversion pathway in W
self.Ppv2ac_out = np.zeros_like(self.ppv)
# Input power of the PV2BAT conversion pathway in W
self.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ppv2bat_in0 = 0
# Output power of the BAT2PV conversion pathway in W
self.Pbat2pv_out = np.zeros_like(self.ppv)
self.Pbat2pv_out0 = 0
# AC power of the PV-battery system in W
self.Ppvbs = np.zeros_like(self.ppv)
self.simulation()
self.bat_mod_res()
def simulation(self, pvmod=True):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.th = 0 # Start threshold for the recharging of the battery
self.soc0 = 0 # Initial state of charge of the battery in the first time step
# Simulation of the battery system
#start = time.process_time()
self.soc, self.soc0, self.Ppv, self.Ppvbs, self.Pbat, self.Ppv2ac_out, self.Pbat2pv_out, self.Ppv2bat_in = batmod_pv(self.d, self.dt, self.soc0, self.soc, self.Ppv, self.Pac, self.Ppv2bat_in0, self.Ppv2bat_in, self.Ppv2ac_out, self.Pbat2pv_out0, self.Pbat2pv_out, self.Ppvbs, self.Pbat)
#print(time.process_time()-start)
# Define missing parameters
self.Ppv2ac = self.Ppv2ac_out # AC output power of the PV2AC conversion pathway
self.Ppv2bat = self.Ppv2bat_in # DC input power of the PV2BAT conversion pathway
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.E = bat_res_mod(self.parameter, self.pl, self.Ppv, self.Pbat, self.dt, self.Ppv2ac, self.Ppv2bat, self.Ppvbs, self.Pperi)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
class ModBus(object):
"""Establishes connection to a battery system via ModBus protocol
:param host: IP address of the host
:type host: string
:param port: Server port of the host
:type port: integer
:param unit_id: Unit-ID of the host
:type unit_id: integer
"""
def __init__(self, host, port, unit_id, input_vals, dt, fname):
"""Constructor method
"""
self.host = host
self.port = port
self.unit_id = unit_id
self.dt = dt
self.input_vals = input_vals
self.fname = fname
self.open_connection()
self.create_csv_file()
self.start_loop()
def open_connection(self):
"""Opens the connection to the host
"""
# Open ModBus connection
try:
self.c = ModbusClient(host=self.host, port=self.port,
unit_id=self.unit_id, auto_open=True, auto_close=True)
except ValueError:
print("Error with host: {}, port: {} or unit-ID: {} params".format(
self.host, self.port, self.unit_id))
def start_loop(self):
"""Starts the writing and reading process
"""
# Transform the array to fit the 1 minute time duration
#self.set_vals = np.repeat(self.input_vals, self.dt * 60)
i = 0
idx = pd.date_range(start=datetime.datetime.now(),
periods=(self.input_vals.size), freq='S')
while i < len(idx):
if datetime.datetime.now().second == idx[i].second:
# Set chrging value
self.set_val = int(self.input_vals[i])
if self.set_val < 0:
# Write negative value to battery charge power (AC) setpoint register
self.c.write_single_register(1024, self.set_val & 0xFFFF)
# Log writing time
self.set_time = datetime.datetime.now()
else:
# Write positive value to battery charge power (AC) setpoint to register
self.c.write_single_register(1024, self.set_val)
# Log writing time
self.set_time = datetime.datetime.now()
try:
# Read total AC power value from register
_P_ac = self.c.read_holding_registers(172, 2)
self.read_time_P_ac = datetime.datetime.now()
except:
print('Could not read register 172!')
try:
# Read actual battery charge/discharge power value from register
_P_bat = self.c.read_holding_registers(582, 1)
self.read_time_P_bat = datetime.datetime.now()
except:
print('Could not read register 582!')
# Load content of two registers into a single float value
zregs = utils.word_list_to_long(_P_ac, big_endian=False)
# Decode and store float value of the AC-power
self.P_ac = utils.decode_ieee(*zregs)
# Store the DC charging power
self.P_bat = np.int16(*_P_bat)
# Read actual soc
self.soc0 = self.read_soc(210)
try:
# Save the values to a csv file
self.save_to_csv()
except:
print('Could not save to csv!')
i += 1
def read_soc(self, reg):
"""Reads the state of charge of the battery
"""
# Load the actual state fo charge of the battery
regs = self.c.read_holding_registers(reg, 2)
# Load content of two registers into a single float value
zregs = utils.word_list_to_long(regs, big_endian=False)
return utils.decode_ieee(*zregs)
def create_csv_file(self):
"""Creates a csv file from set and read values
"""
# Create a new csv-file
with open(self.fname, 'w') as f:
writer = csv.writer(f, dialect='excel')
writer.writerow(['set_time',
'read_time_P_ac',
'read_time_P_bat',
'soc',
'set_value',
'P_ac',
'P_bat'])
def save_to_csv(self):
"""Saves the set and read values to s csv file
"""
# Save the read values to a csv file
with open(self.fname, "a") as f:
wr = csv.writer(f, dialect='excel')
wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,
self.soc0, self.set_val, self.P_ac, self.P_bat])
def max_self_consumption(parameter, ppv, pl, pvmod=True, ideal=False):
"""Function for maximizing self consumption
:param parameter: PV battery system parameters
:type parameter: dict
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
"""
# Maximize self consumption for AC-coupled systems
if parameter['Top'] == 'AC':
# DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
if ideal:
Ppv = np.maximum(0, ppv ) * parameter['P_PV'] * 1000
else:
Ppv = np.minimum(ppv * parameter['P_PV'], parameter['P_PV2AC_in']) * 1000
else: # ppv: DC power output of the PV generator in W
if ideal:
Ppv = np.maximum(0, ppv)
else:
Ppv = np.minimum(ppv, parameter['P_PV2AC_in'] * 1000)
# Normalized input power of the PV inverter
ppvinvin = Ppv / parameter['P_PV2AC_in'] / 1000
# AC power output of the PV inverter taking into account the conversion losses and maximum
# output power of the PV inverter
Ppvs = np.minimum(np.maximum(0, Ppv-(parameter['PV2AC_a_in'] * ppvinvin * ppvinvin + parameter['PV2AC_b_in'] * ppvinvin + parameter['PV2AC_c_in'])), parameter['P_PV2AC_out'] * 1000)
# 3.2 Residual power
# Additional power consumption of other system components (e.g. AC power meter) in W
Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']
# Adding the standby consumption of the PV inverter in times without any AC power output of the PV system
# to the additional power consumption
Pperi[Ppvs == 0] += parameter['P_PVINV_AC']
# Residual power
if ideal:
Pr = Ppv - pl
else:
Pr = Ppvs - pl - Pperi
return Pr, Ppv, Ppvs, Pperi
# Maximize self consumption for DC-coupled systems
elif parameter['Top'] == 'DC':
# Initialization and preallocation
Ppv2ac_in_ac = np.zeros_like(ppv)
Ppv = np.empty_like(ppv) # DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
Ppv = ppv * parameter['P_PV'] * 1000
else:
Ppv = ppv
# DC power output of the PV generator taking into account the maximum
# DC input power of the PV2AC conversion pathway
Ppv = np.minimum(Ppv, parameter['P_PV2AC_in'] * 1000)
# Residual power
# Power demand on the AC side
Pac = pl + parameter['P_PERI_AC']
# Normalized AC output power of the PV2AC conversion pathway to cover the AC
# power demand
ppv2ac = np.minimum(
Pac, parameter['P_PV2AC_out'] * 1000) / parameter['P_PV2AC_out'] / 1000
# Target DC input power of the PV2AC conversion pathway
Ppv2ac_in_ac = np.minimum(Pac, parameter['P_PV2AC_out'] * 1000) + (
parameter['PV2AC_a_out'] * ppv2ac**2 + parameter['PV2AC_b_out'] * ppv2ac + parameter['PV2AC_c_out'])
# Normalized DC input power of the PV2AC conversion pathway TODO 1
ppv2ac = Ppv / parameter['P_PV2AC_in'] / 1000
# Target AC output power of the PV2AC conversion pathway
Ppv2ac_out = np.maximum(
0, Ppv - (parameter['PV2AC_a_in'] * ppv2ac**2 + parameter['PV2AC_b_in'] * ppv2ac + parameter['PV2AC_c_in']))
# Residual power for battery charging
Prpv = Ppv - Ppv2ac_in_ac
# Residual power for battery discharging
Pr = Ppv2ac_out - Pac
return Pr, Prpv, Ppv, ppv2ac, Ppv2ac_out
# Maximize self consumption for PV-coupled systems
elif parameter['Top'] == 'PV':
# Preallocation
# Pbat = np.zeros_like(ppv) # DC power of the battery in W
# soc = np.zeros_like(ppv) # State of charge of the battery
# Ppv2ac_out = np.zeros_like(ppv) # Output power of the PV2AC conversion pathway in W
# Ppv2bat_in = np.zeros_like(ppv) # Input power of the PV2BAT conversion pathway in W
# Pbat2pv_out = np.zeros_like(ppv) # Output power of the BAT2PV conversion pathway in W
# Ppvbs = np.zeros_like(ppv) # AC power of the PV-battery system in W
Ppv = np.empty_like(ppv) # DC power output of the PV generator
# Additional power consumption of other system components (e.g. AC power meter) in W
Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']
# dt = 1 # Time increment in s
# th = 0 # Start threshold for the recharging of the battery
# soc0 = 0 # State of charge of the battery in the first time step
# DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
Ppv = ppv * parameter['P_PV'] * 1000
else: # ppv: DC power output of the PV generator in W
Ppv = ppv
# Power demand on the AC side
Pac = pl + Pperi
return Pac, Ppv, Pperi
@nb.jit(nopython=True)
def batmod_ac(d, _dt, _soc0, _soc, _Pr, _Pbs0, _Pbs, _Pbat):
"""Performance Simulation function for AC-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge in the previous time step
:type soc0: float
:param Pr: residual power
:type Pr: numpy array
:param Pbs0: AC-power of the battery system in the previous time step
:type Pbs0: float
:param Pbs: AC-power of the battery syste
:type Pbs: numpy array
:param Pbat: DC-power oof the battery
:type Pbat: numpy array
"""
# Loading of particular variables
_E_BAT = d[0]
_eta_BAT = d[1]
_t_CONSTANT = d[2]
_P_SYS_SOC0_DC = d[3]
_P_SYS_SOC0_AC = d[4]
_P_SYS_SOC1_DC = d[5]
_P_SYS_SOC1_AC = d[6]
_AC2BAT_a_in = d[7]
_AC2BAT_b_in = d[8]
_AC2BAT_c_in = d[9]
_BAT2AC_a_out = d[10]
_BAT2AC_b_out = d[11]
_BAT2AC_c_out = d[12]
_P_AC2BAT_DEV = d[13]
_P_BAT2AC_DEV = d[14]
_P_BAT2AC_out = d[15]
_P_AC2BAT_in = d[16]
_t_DEAD = int(round(d[17]))
_SOC_h = d[18]
_P_AC2BAT_min = _AC2BAT_c_in
_P_BAT2AC_min = _BAT2AC_c_out
# Correction factor to avoid over charge and discharge the battery
corr = 0.1
# Initialization of particular variables
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Pr.size)
_th = 0
# Capacity of the battery, conversion from kWh to Wh
_E_BAT *= 1000
# Effiency of the battery in percent
_eta_BAT /= 100
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Calculate the AC power of the battery system from the residual power
# with regard to the dead time of the system control
if T_DEAD:
P_bs = _Pr[t - _t_DEAD]
else:
P_bs = _Pr[t]
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy in Wh that is supplied to or discharged from the storage unit.
E_bs_est = P_bs * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_est > 0 and E_bs_est > (_E_BAT - E_b0):
P_bs = (_E_BAT - E_b0) * 3600 / _dt
# When discharging take the correction factor into account
elif E_bs_est < 0 and np.abs(E_bs_est) > (E_b0):
P_bs = (E_b0 * 3600 / _dt) * (1-corr)
# Adjust the AC power of the battery system due to the stationary
# deviations taking the minimum charging and discharging power into
# account
if P_bs > _P_AC2BAT_min:
P_bs = np.maximum(_P_AC2BAT_min, P_bs + _P_AC2BAT_DEV)
elif P_bs < -_P_BAT2AC_min:
P_bs = np.minimum(-_P_BAT2AC_min, P_bs - _P_BAT2AC_DEV)
else:
P_bs = 0
# Limit the AC power of the battery system to the rated power of the
# battery converter
P_bs = np.maximum(-_P_BAT2AC_out * 1000,
np.minimum(_P_AC2BAT_in * 1000, P_bs))
# Adjust the AC power of the battery system due to the settling time
# (modeled by a first-order time delay element) Hier hat der Schritt vorher eine Null?
# Muss der vorherige Wert mit übergeben werden?
if SETTLING:
if t > 0:
P_bs = _tde * _Pbs[t-1] + _tde * (P_bs - _Pbs[t-1]) * _ftde + P_bs * (not _tde)
else:
P_bs = _tde * _Pbs0 + _tde * (P_bs - _Pbs0) * _ftde + P_bs * (not _tde)
# Decision if the battery should be charged or discharged
if P_bs > 0 and _soc0 < 1 - _th * (1 - _SOC_h):
# The last term th*(1-SOC_h) avoids the alternation between
# charging and standby mode due to the DC power consumption of the
# battery converter when the battery is fully charged. The battery
# will not be recharged until the SOC falls below the SOC-threshold
# (SOC_h) for recharging from PV.
# Normalized AC power of the battery system
p_bs = P_bs / _P_AC2BAT_in / 1000
# DC power of the battery affected by the AC2BAT conversion losses
# of the battery converter
P_bat = np.maximum(
0, P_bs - (_AC2BAT_a_in * p_bs * p_bs + _AC2BAT_b_in * p_bs + _AC2BAT_c_in))
elif P_bs < 0 and _soc0 > 0:
# Normalized AC power of the battery system
p_bs = np.abs(P_bs / _P_BAT2AC_out / 1000)
# DC power of the battery affected by the BAT2AC conversion losses
# of the battery converter
P_bat = P_bs - (_BAT2AC_a_out * p_bs * p_bs +
_BAT2AC_b_out * p_bs + _BAT2AC_c_out)
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Decision if the standby mode is active
if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC and AC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
P_bs = _P_SYS_SOC0_AC
elif P_bat == 0 and _soc0 > 0: # Standby mode in fully charged state
# DC and AC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
P_bs = _P_SYS_SOC1_AC
# Transfer the realized AC power of the battery system and
# the DC power of the battery
_Pbs0 = P_bs
_Pbs[t] = P_bs
_Pbat[t] = P_bat
# Change the energy content of the battery from Ws to Wh conversion
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT)
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation
# between charging and standby mode due to the DC power
# consumption of the battery converter.
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _Pbat, _Pbs, _soc, _soc0, _Pbs0
@nb.jit(nopython=True)
def batmod_ac_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat):
_E_BAT = d[0]
for t in range(_Pr.size):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT * 1000
# Calculate the DC power of the battery from the residual power
P_bat = _Pr[t]
# Decision if the battery should be charged or discharged
if P_bat > 0 and _soc0 < 1: # Battery charging
E_b = E_b0 + P_bat * _dt / 3600 # Change the energy content of the battery
elif P_bat < 0 and _soc0 > 0: # Battery discharging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# No change in the energy content of the battery
E_b = E_b0
# Transfer the realized DC power of the battery
_Pbat[t] = P_bat
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT * 1000)
_soc[t] = _soc0
# Define missing parameters
_Pbs = _Pbat # Realized AC power of the battery system
return _Pbs, _Pbat, _soc0, _soc
@nb.jit(nopython=True)
def batmod_dc(d, _dt, _soc0, _soc, _Pr, _Prpv, _Ppv, _Ppv2bat_in0, _Ppv2bat_in, _Pbat2ac_out0, _Pbat2ac_out, _Ppv2ac_out, _Ppvbs, _Pbat):
"""Performance simulation function for DC-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge in the previous time step
:type soc0: float
:param Pr: residual power
:type Pr: numpy array
:param Prpv: residual power of the PV-system
:type Prpv: numpy array
:param Ppv: PV-power
:type Ppv: numpy array
:param Ppv2bat_in0: AC input power of the battery system in the previous time step
:type Ppv2bat_in0: float
:param Ppv2bat_in: AC input power of the battery system
:type Ppv2bat_in: numpy array
:param Pbat2ac_out0: AC output power of the battery system in the previous time step
:type Pbat2ac_out0: float
:param Pbat2ac_out: AC output power of the battery system
:type Pbat2ac_out: numpy array
:param Ppv2ac_out0: AC output power of the PV inverter in the previous time step
:type Ppv2ac_out0: float
:param Ppv2ac_out: AC output power of the PV inverter
:type Ppv2ac_out: numpy array
:param Ppvbs: AC power from the PV system to the battery system
:type Ppvbs: numpy array
:param Pbat: DC power of the battery
:type Pbat: float
"""
_E_BAT = d[0]
_P_PV2AC_in = d[1]
_P_PV2AC_out = d[2]
_P_PV2BAT_in = d[3]
_P_BAT2AC_out = d[4]
_PV2AC_a_in = d[5]
_PV2AC_b_in = d[6]
_PV2AC_c_in = d[7]
_PV2BAT_a_in = d[8]
_PV2BAT_b_in = d[9]
_BAT2AC_a_out = d[10]
_BAT2AC_b_out = d[11]
_BAT2AC_c_out = d[12]
_eta_BAT = d[13]
_SOC_h = d[14]
_P_PV2BAT_DEV = d[15]
_P_BAT2AC_DEV = d[16]
_t_DEAD = int(round(d[17]))
_t_CONSTANT = d[18]
_P_SYS_SOC1_DC = d[19]
_P_SYS_SOC0_AC = d[20]
_P_SYS_SOC0_DC = d[21]
_P_PV2AC_min = _PV2AC_c_in
# Capacity of the battery, conversion from kWh to Wh
_E_BAT *= 1000
# Effiency of the battery in percent
_eta_BAT /= 100
# Initialization of particular variables
# _P_PV2AC_min = _parameter['PV2AC_c_in'] # Minimum input power of the PV2AC conversion pathway
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Pr.size)
_th = 0
corr = 0.1
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Residual power with regard to the dead time of the system control
if T_DEAD:
P_rpv = _Prpv[t - _t_DEAD]
P_r = _Pr[t - _t_DEAD]
else:
P_rpv = _Prpv[t]
P_r = _Pr[t]
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy that is supplied to or discharged from the storage unit.
E_bs_rpv = P_rpv * _dt / 3600
E_bs_r = P_r * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0):
P_rpv = (_E_BAT - E_b0) * 3600 / _dt
# When discharging take the correction factor into account
elif E_bs_r < 0 and np.abs(E_bs_r) > (E_b0):
P_r = ((E_b0) * 3600 / _dt) * (1-corr)
# Decision if the battery should be charged or discharged
if P_rpv > 0 and _soc0 < 1 - _th * (1 - _SOC_h):
'''
The last term th*(1-SOC_h) avoids the alternation between
charging and standby mode due to the DC power consumption of the
battery converter when the battery is fully charged. The battery
will not be recharged until the SOC falls below the SOC-threshold
(SOC_h) for recharging from PV.
'''
# Charging power
P_pv2bat_in = P_rpv
# Adjust the charging power due to the stationary deviations
P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV)
# Limit the charging power to the maximum charging power
P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000)
# Adjust the charging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_pv2bat_in = _tde * _Ppv2bat_in[(t-1)] + _tde * (
P_pv2bat_in - _Ppv2bat_in[(t-1)]) * _ftde + P_pv2bat_in * (not _tde)
else:
P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \
(P_pv2bat_in - _Ppv2bat_in0) * \
_ftde + P_pv2bat_in * (not _tde)
# Limit the charging power to the current power output of the PV generator
P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t])
# Normalized charging power
ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000
# DC power of the battery affected by the PV2BAT conversion losses
# (the idle losses of the PV2BAT conversion pathway are not taken
# into account)
P_bat = np.maximum(
0, P_pv2bat_in - (_PV2BAT_a_in * ppv2bat**2 + _PV2BAT_b_in * ppv2bat))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] - P_pv2bat_in
# Normalized DC input power of the PV2AC conversion pathway
_ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pv2ac_out = np.maximum(
0, P_pv2ac_in - (_PV2AC_a_in * _ppv2ac**2 + _PV2AC_b_in * _ppv2ac + _PV2AC_c_in))
P_pvbs = P_pv2ac_out
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Ppv2bat_in0 = P_pv2bat_in
_Ppv2bat_in[t] = P_pv2bat_in
elif P_rpv < 0 and _soc0 > 0:
# Discharging power
P_bat2ac_out = P_r * -1
# Adjust the discharging power due to the stationary deviations
P_bat2ac_out = np.maximum(0, P_bat2ac_out + _P_BAT2AC_DEV)
# Adjust the discharging power to the maximum discharging power
P_bat2ac_out = np.minimum(P_bat2ac_out, _P_BAT2AC_out * 1000)
# Adjust the discharging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_bat2ac_out = _tde * _Pbat2ac_out[t-1] + _tde * (
P_bat2ac_out - _Pbat2ac_out[t-1]) * _ftde + P_bat2ac_out * (not _tde)
else:
P_bat2ac_out = _tde * _Pbat2ac_out0 + _tde * \
(P_bat2ac_out - _Pbat2ac_out0) * \
_ftde + P_bat2ac_out * (not _tde)
# Limit the discharging power to the maximum AC power output of the PV-battery system
P_bat2ac_out = np.minimum(
_P_PV2AC_out * 1000 - _Ppv2ac_out[t], P_bat2ac_out)
# Normalized discharging power
ppv2bat = P_bat2ac_out / _P_BAT2AC_out / 1000
# DC power of the battery affected by the BAT2AC conversion losses
# (if the idle losses of the PV2AC conversion pathway are covered by
# the PV generator, the idle losses of the BAT2AC conversion pathway
# are not taken into account)
if _Ppv[t] > _P_PV2AC_min:
P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out *
ppv2bat**2 + _BAT2AC_b_out * ppv2bat))
else:
P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out * ppv2bat **
2 + _BAT2AC_b_out * ppv2bat + _BAT2AC_c_out)) + _Ppv[t]
# Realized AC power of the PV-battery system
P_pvbs = _Ppv2ac_out[t] + P_bat2ac_out
# Transfer the final values
_Pbat2ac_out0 = P_bat2ac_out
_Pbat2ac_out[t] = P_bat2ac_out
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Realized AC power of the PV-battery system
P_pvbs = _Ppv2ac_out[t]
# Decision if the standby mode is active
if P_bat == 0 and P_pvbs == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC and AC power consumption of the PV-battery inverter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
P_pvbs = -_P_SYS_SOC0_AC
elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state
# DC power consumption of the PV-battery inverter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
# Transfer the realized AC power of the PV-battery system and the DC power of the battery
_Ppvbs[t] = P_pvbs
_Pbat[t] = P_bat
# Change the energy content of the battery Wx to Wh conversion
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / _E_BAT
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation between charging
# and standby mode due to the DC power consumption of the
# PV-battery inverter
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _Ppv2ac_out, _Ppv2bat_in, _Ppv2bat_in0, _Pbat2ac_out, _Pbat2ac_out0, _Ppvbs, _Pbat, _soc, _soc0
@nb.jit(nopython=True)
def batmod_dc_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat):
_E_BAT = d[0]
for t in range(_Pr.size):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT * 1000
P_bat = _Pr[t]
if P_bat > 0 and _soc0 < 1: # Battery charging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
elif P_bat < 0 and _soc0 > 0: # Battery discharging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
else: # Neither charging nor discharging of the battery
P_bat = 0
E_b = E_b0
_Pbat[t] = P_bat
_soc0 = E_b / (_E_BAT * 1000)
_soc[t] = _soc0
return _Pbat, _soc, _soc0
@nb.jit(nopython=True)
def batmod_pv(d, _dt, _soc0, _soc, _Ppv, _Pac, _Ppv2bat_in0, _Ppv2bat_in, _Ppv2ac_out, _Pbat2pv_out0, _Pbat2pv_out, _Ppvbs, _Pbat):
"""Performance simulation function for PV-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge of the battery in the previous time step
:type soc0: float
:param soc: state of charge of the battery
:type soc: numpy array
:param Pr: residual power
:type Pr: numpy array
:param Ppv: PV-power
:type Ppv: numpy array
:param Pac: AC output power of the PV inverter
:type Pac: numpy array
:param Ppv2bat_in: AC input power of the battery system
:type Ppv2bat_in: numpy array
:param Ppv2bat_in0: AC input power of the battery system in the previous time step
:type Ppv2bat_in0: float
:param Pbat2pv_out0: AC output power of the battery system in the previous time step
:type Pbat2pv_out0: float
:param Pbat2pv_out: AC output power of the battery system
:type Pbat2pv_out: numpy array
:param Ppvbs: AC power from the PV system to the battery system
:type Ppvbs: numpy array
:param Pbat: DC power of the battery
:type Pbat: float
"""
# Initialization of particular variables
_E_BAT = d[0]
_P_PV2AC_in = d[1]
_P_PV2AC_out = d[2]
_P_PV2BAT_in = d[3]
_P_BAT2PV_out = d[4]
_PV2AC_a_in = d[5]
_PV2AC_b_in = d[6]
_PV2AC_c_in = d[7]
_PV2BAT_a_in = d[8]
_PV2BAT_b_in = d[9]
_PV2BAT_c_in = d[10]
_PV2AC_a_out = d[11]
_PV2AC_b_out = d[12]
_PV2AC_c_out = d[13]
_BAT2PV_a_out = d[14]
_BAT2PV_b_out = d[15]
_BAT2PV_c_out = d[16]
_eta_BAT = d[17]
_SOC_h = d[18]
_P_PV2BAT_DEV = d[19]
_P_BAT2AC_DEV = d[20]
_P_SYS_SOC1_DC = d[21]
_P_SYS_SOC0_AC = d[22]
_P_SYS_SOC0_DC = d[23]
_t_DEAD = int(round(d[24]))
_t_CONSTANT = d[25]
# Correction factor to avoid over charge and discharge the battery
corr = 0.1
_P_PV2BAT_min = _PV2BAT_c_in # Minimum DC charging power
_P_BAT2PV_min = _BAT2PV_c_out # Minimum DC discharging power
# Initialization of particular variables
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Ppv.size)
_th = 0
_E_BAT *= 1000 # Conversion from W to kW
_eta_BAT /= 100
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Target AC output power of the PV-battery system to cover the AC power demand
if T_DEAD:
P_pvbs = np.minimum(_Pac[t - _t_DEAD], _P_PV2AC_out * 1000)
else:
P_pvbs = np.minimum(_Pac[t], _P_PV2AC_out * 1000)
# Normalized AC output power of the PV2AC conversion pathway
ppv2ac = P_pvbs / _P_PV2AC_out / 1000
# Target DC input power of the PV2AC conversion pathway
P_pv2ac_in = P_pvbs + (_PV2AC_a_out * ppv2ac **
2 + _PV2AC_b_out * ppv2ac + _PV2AC_c_out)
# Residual power
if T_DEAD:
P_rpv = _Ppv[t - _t_DEAD] - P_pv2ac_in
else:
P_rpv = _Ppv[t] - P_pv2ac_in
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy that is supplied to or discharged from the storage unit.
E_bs_rpv = P_rpv * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0):
P_rpv = ((_E_BAT - E_b0) * 3600) / _dt
# When charging take the correction factor into account
elif E_bs_rpv < 0 and np.abs(E_bs_rpv) > (E_b0):
P_rpv = ((E_b0) * 3600 / _dt) * (1-corr)
# Decision if the battery should be charged or discharged
if P_rpv > _P_PV2BAT_min and _soc0 < 1 - _th * (1 - _SOC_h):
'''
The last term th*(1-SOC_h) avoids the alternation between
charging and standby mode due to the DC power consumption of the
battery converter when the battery is fully charged. The battery
will not be recharged until the SOC falls below the SOC-threshold
(SOC_h) for recharging from PV.
'''
# Charging power
P_pv2bat_in = P_rpv
# Adjust the charging power due to stationary deviations
P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV)
# Limit the charging power to the maximum charging power
P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000)
# Adjust the charging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_pv2bat_in = _tde * _Ppv2bat_in[t-1] + _tde * (
P_pv2bat_in - _Ppv2bat_in[t-1]) * _ftde + P_pv2bat_in * (not _tde)
else:
P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \
(P_pv2bat_in - _Ppv2bat_in0) * \
_ftde + P_pv2bat_in * (not _tde)
# Limit the charging power to the current power output of the PV generator
P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t])
# Normalized charging power
ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000
# DC power of the battery
P_bat = np.maximum(0, P_pv2bat_in - (_PV2BAT_a_in *
ppv2bat**2 + _PV2BAT_b_in * ppv2bat + _PV2BAT_c_in))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] - P_pv2bat_in
# Limit the DC input power of the PV2AC conversion pathway
P_pv2ac_in = np.minimum(P_pv2ac_in, _P_PV2AC_in * 1000)
# Recalculate Ppv(t) with limited PV2AC input power
_Ppv[t] = P_pv2ac_in + P_pv2bat_in
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pv2ac_out = np.maximum(
0, P_pv2ac_in - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
P_pvbs = P_pv2ac_out
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Ppv2bat_in0 = P_pv2bat_in
_Ppv2bat_in[t] = P_pv2bat_in
elif P_rpv < -_P_BAT2PV_min and _soc0 > 0:
# Target discharging power of the battery
P_bat2pv_out = np.abs(P_rpv)
# Adjust the discharging power due to the stationary deviations
P_bat2pv_out = np.maximum(0, P_bat2pv_out + _P_BAT2AC_DEV)
# Adjust the discharging power to the maximum discharging power
P_bat2pv_out = np.minimum(P_bat2pv_out, _P_BAT2PV_out * 1000)
# Adjust the discharging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_bat2pv_out = _tde * _Pbat2pv_out[t-1] + _tde * (P_bat2pv_out - _Pbat2pv_out[t-1]) * _ftde + P_bat2pv_out * (not _tde)
else:
P_bat2pv_out = _tde * _Pbat2pv_out0 + _tde * (P_bat2pv_out - _Pbat2pv_out0) * _ftde + P_bat2pv_out * (not _tde)
# Recalculate Ppv(t) with limited PV2AC input power
_Ppv[t] = np.minimum(_P_PV2AC_in * 1000, _Ppv[t])
# Limit the discharging power to the maximum AC power output of the PV-battery system
P_bat2pv_out = np.minimum(_P_PV2AC_in * 1000 - _Ppv[t], P_bat2pv_out)
# Normalized discharging power
pbat2pv = P_bat2pv_out / _P_BAT2PV_out / 1000
# DC power of the battery affected by the BAT2PV conversion losses
P_bat = -1*(P_bat2pv_out+(_BAT2PV_a_out * pbat2pv**2 + _BAT2PV_b_out * pbat2pv + _BAT2PV_c_out))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] + P_bat2pv_out
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# AC power of the PV-battery system
P_pvbs = np.maximum(0, P_pv2ac_in-(_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
P_pv2ac_out = P_pvbs
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Pbat2pv_out0 = P_bat2pv_out
_Pbat2pv_out[t] = P_bat2pv_out
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Limit the power output of the PV generator to the maximum input power
# of the PV inverter
_Ppv[t] = np.minimum(_Ppv[t], _P_PV2AC_in * 1000)
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = _Ppv[t] / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pvbs = np.maximum(0, _Ppv[t] - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
# Transfer the final values
_Ppv2ac_out[t] = P_pvbs
# Decision if the standby mode is active
if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
if P_pvbs == 0:
P_pvbs = -_P_SYS_SOC0_AC
elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state
# DC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
# Transfer the realized AC power of the battery system and
# the DC power of the battery
_Ppvbs[t] = P_pvbs
_Pbat[t] = P_bat
# Change the energy content of the battery Wx to Wh conversio
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT)
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation
# between charging and standby mode due to the DC power
# consumption of the battery converter.
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _soc, _soc0, _Ppv, _Ppvbs, _Pbat, _Ppv2ac_out, _Pbat2pv_out, _Ppv2bat_in
def bat_res_mod(_parameter, _Pl, _Ppv, _Pbat, _dt, *args):
"""Function for calculating energy sums
:param _parameter: parameter of the system
:type _parameter: dict
:param _Pl: load power
:type _Pl: numpy array
:param _Ppv: output power of the PV generator
:type _Ppv: numpy array
:param _Pbat: DC power of the battery
:type _Pbat: numpy array
:param _dt: time step width
:type _dt: integer
:return: energy sums
:rtype: dict
"""
_E = dict()
if _parameter['Top'] == 'AC': # AC-coupled systems
_Ppvs = args[0] # AC output power of the PV system
_Pbs = args[1] # AC power of the battery system
# Additional power consumption of the other system components
_Pperi = args[2]
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
_Ppv2ac = args[0] # AC output power of the PV2AC conversion pathway
_Ppv2bat_in = args[1] # Input power of the PV2BAT conversion pathway
_Ppvbs = args[2] # AC power of the PV-battery system
# Additional power consumption of the other system components
_Pperi = args[3]
_Ppv2ac_in = _Ppv - _Ppv2bat_in # Input power of the PV2AC conversion pathway
# Total load including the power consumption of the other system components
_Plt = _Pl + _Pperi
# DC input power of the battery (charged)
_Pbatin = np.maximum(0, _Pbat)
# DC output power of the battery (discharged)
_Pbatout = np.minimum(0, _Pbat)
# Maximum PV feed-in power
_P_ac2g_max = _parameter['p_ac2g_max'] * _parameter['P_PV'] * 1000
if _parameter['Top'] == 'AC': # AC-coupled systems
# Residual power without curtailment
_Pr = _Ppvs - _Plt
# AC input power of the battery system
_Pac2bs = np.maximum(0, _Pbs)
# AC output power of the battery system
_Pbs2ac = np.minimum(0, _Pbs)
# Negative residual power (residual load demand)
_Prn = np.minimum(0, _Pr)
# Positive residual power (surplus PV power)
_Prp = np.maximum(0, _Pr)
# Direct use of PV power by the load
_Ppvs2l = np.minimum(_Ppvs, _Plt)
# PV charging power
_Ppvs2bs = np.minimum(_Prp, _Pac2bs)
# Grid charging power
_Pg2bs = np.maximum(_Pac2bs - _Prp, 0)
# Grid supply power of the load
_Pg2l = np.minimum(_Prn - _Pbs2ac, 0)
# Battery supply power of the load
_Pbs2l = np.maximum(_Prn, _Pbs2ac)
# Battery feed-in power
_Pbs2g = np.minimum(_Pbs2ac - _Prn, 0)
# PV feed-in power including curtailment
_Ppvs2g = np.minimum(np.maximum(_Prp - _Pac2bs, 0), _P_ac2g_max)
# Power demand from the grid
_Pg2ac = _Pg2l - _Pg2bs
# Feed-in power to the grid
_Pac2g = _Ppvs2g - _Pbs2g
# Grid power
_Pg = _Pac2g + _Pg2ac
# Curtailed PV power (AC output power)
_Pct = np.maximum(_Prp - _Pac2bs, 0) - _Ppvs2g
# AC output power of the PV system including curtailment
_Ppvs = _Ppvs - _Pct
# Residual power including curtailment
_Pr = _Ppvs - _Plt
# Index for PV curtailment
_idx = np.where(_Pct > 0)[0]
for i in range(len(_idx)):
_tct = _idx[i]
# Normalized output power of the PV inverter
_ppvinvout = _Ppvs[_tct] / _parameter['P_PV2AC_out'] / 1000
# DC output power of the PV generator taking into account the
# conversion and curtailment losses
_Ppv[_tct] = _Ppvs[_tct] + (_parameter['PV2AC_a_out'] * _ppvinvout **
2 + _parameter['PV2AC_b_out'] * _ppvinvout + _parameter['PV2AC_c_out'])
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
# Grid power demand of the PV-battery system
_Pg2pvbs = np.minimum(0, _Ppvbs)
# AC input power of the PV-battery system
_Pac2pvbs = _Pg2pvbs
# AC output power of the PV-battery system
_Ppvbs2ac = np.maximum(0, _Ppvbs)
# Load supply power by the PV-battery system
_Ppvbs2l = np.minimum(_Plt, _Ppvbs2ac)
# Load supply power by the grid
_Pg2l = _Plt - _Ppvbs2l
# Direct use of PV power by the load
_Ppv2l = np.minimum(_Plt, _Ppv2ac)
# PV feed-in power including curtailment
_Ppv2g = np.minimum(_Ppv2ac - _Ppv2l, _P_ac2g_max)
# Curtailed PV power (AC output power)
_Pct = _Ppv2ac - _Ppv2l - _Ppv2g
if np.sum(_Pct) > 0:
# Power of the PV-battery system including curtailment
_Ppvbs = _Ppvbs - _Pct
# AC output power of the PV-battery system including curtailment
_Ppvbs2ac = np.maximum(0, _Ppvbs)
# AC output power of the PV2AC conversion pathway including curtailment
_Ppv2ac = _Ppv2ac - _Pct
# Index for PV curtailment
_idx = np.where(_Pct > 0)[0]
for i in range(len(_idx)):
_tct = _idx[i]
# Specific AC output power of the PV2AC conversion pathway
_ppv2ac = _Ppv2ac[_tct] / _parameter['P_PV2AC_out'] / 1000
# DC input power of the PV2AC conversion pathway including curtailment
_Ppv2ac_in[_tct] = _Ppv2ac[_tct] + (_parameter['PV2AC_a_out'] * _ppv2ac **
2 + _parameter['PV2AC_b_out'] * _ppv2ac + _parameter['PV2AC_c_out'])
# DC output power of the PV generator including curtailment
_Ppv = _Ppv2ac_in + _Ppv2bat_in
# Grid power including curtailment
_Pg = _Ppvbs-_Plt
# Feed-in power to the grid including curtailment
_Pac2g = np.maximum(0, _Pg)
# Power demand from the grid
_Pg2ac = np.minimum(0, _Pg)
# Energy sums in MWH
# Electrical demand including the energy consumption of the other system components
_E['El'] = np.sum(np.abs(_Plt)) * _dt / 3.6e9
# DC output of the PV generator including curtailment
_E['Epv'] = np.sum(np.abs(_Ppv)) * _dt / 3.6e9
# DC input of the battery (charged)
_E['Ebatin'] = np.sum(np.abs(_Pbatin)) * _dt / 3.6e9
# DC output of the battery (discharged)
_E['Ebatout'] = np.sum(np.abs(_Pbatout)) * _dt / 3.6e9
# Grid feed-in
_E['Eac2g'] = np.sum(np.abs(_Pac2g)) * _dt / 3.6e9
# Grid demand
_E['Eg2ac'] = np.sum(np.abs(_Pg2ac)) * _dt / 3.6e9
# Load supply by the grid
_E['Eg2l'] = np.sum(np.abs(_Pg2l)) * _dt / 3.6e9
# Demand of the other system components
_E['Eperi'] = np.sum(np.abs(_Pperi)) * _dt / 3.6e9
# Curtailed PV energy
_E['Ect'] = np.sum(np.abs(_Pct)) * _dt / 3.6e9
if _parameter['Top'] == 'AC': # AC-coupled systems
# AC output of the PV system including curtailment
_E['Epvs'] = np.sum(np.abs(_Ppvs)) * _dt / 3.6e9
# AC input of the battery system
_E['Eac2bs'] = np.sum(np.abs(_Pac2bs)) * _dt / 3.6e9
# AC output of the battery system
_E['Ebs2ac'] = np.sum(np.abs(_Pbs2ac)) * _dt / 3.6e9
# Direct use of PV energy
_E['Epvs2l'] = np.sum(np.abs(_Ppvs2l)) * _dt / 3.6e9
# PV charging
_E['Epvs2bs'] = np.sum(np.abs(_Ppvs2bs)) * _dt / 3.6e9
# Grid charging
_E['Eg2bs'] = np.sum(np.abs(_Pg2bs)) * _dt / 3.6e9
# PV feed-in
_E['Epvs2g'] = np.sum(np.abs(_Ppvs2g)) * _dt / 3.6e9
# Load supply by the battery system
_E['Ebs2l'] = np.sum(np.abs(_Pbs2l)) * _dt / 3.6e9
# Battery feed-in
_E['Ebs2g'] = np.sum(np.abs(_Pbs2g)) * _dt / 3.6e9
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
# Grid demand of the PV-battery system
_E['Eg2pvbs'] = np.sum(np.abs(_Pg2pvbs)) * _dt / 3.6e9
# AC input of the PV-battery system
_E['Eac2pvbs'] = np.sum(np.abs(_Pac2pvbs)) * _dt / 3.6e9
# AC output of the PV-battery system
_E['Epvbs2ac'] = np.sum(np.abs(_Ppvbs2ac)) * _dt / 3.6e9
# Load supply by the PV-battery system
_E['Epvbs2l'] = np.sum(np.abs(_Ppvbs2l)) * _dt / 3.6e9
return _E
def bat_res_mod_ideal(_parameter, _Pl, _Ppv, _Pbat, _dt, *args):
E = dict() # Dictionary to store energy sums
if _parameter['Top'] == 'AC':
Ppvs = args[0] # AC output power of the PV system
Pbs = args[1] # AC power of the battery system
Pperi = args[2] # Additional power consumption of the other system components
elif _parameter['Top'] == 'DC':
Ppv2ac = args[0]
Ppv2bat_in = args[1]
Ppvbs = args[2]
Pperi = args[3]
Ppv2ac_in = _Ppv - Ppv2bat_in
# Additional power consumption of the other system components
Pperi = np.zeros_like(_Ppv)
# Total load including the power consumption of the other system components
Plt = _Pl
# DC input power of the battery (charged)
Pbatin = np.maximum(0, _Pbat)
# DC output power of the battery (discharged)
Pbatout = np.minimum(0, _Pbat)
if _parameter['Top'] == 'AC':
# Grid power
Pg = Ppvs - _Pl - Pbs
# Residual power
Pr = Ppvs - Plt
# AC input power of the battery system
Pac2bs = np.maximum(0, Pbs)
# AC output power of the battery system
Pbs2ac = np.minimum(0, Pbs)
# Negative residual power (residual load demand)
Prn = np.minimum(0, Pr)
# Positive residual power (surplus PV power)
Prp = np.maximum(0, Pr)
# Direct use of PV power by the load
Ppvs2l = np.minimum(Ppvs, Plt)
# PV charging power
Ppvs2bs=np.minimum(Prp, Pac2bs)
# Grid charging power
Pg2bs=np.maximum(Pac2bs - Prp, 0)
# Grid supply power of the load
Pg2l=np.minimum(Prn - Pbs2ac, 0)
# Battery supply power of the load
Pbs2l=np.maximum(Prn, Pbs2ac)
# Battery feed-in power
Pbs2g=np.minimum(Pbs2ac - Prn, 0)
# PV feed-in power
Ppvs2g=np.maximum(Prp - Pac2bs, 0)
elif _parameter['Top'] == 'DC':
# Grid power
Pg = Ppvbs - _Pl
# Grid power demand of the PV-battery system
Pg2pvbs = np.minimum(0, Ppvbs)
# AC input power of the PV-battery system
Pac2pvbs = Pg2pvbs
# AC output power of the PV-battery system
Ppvbs2ac = np.maximum(0, Ppvbs)
# Load supply power by the PV-battery system
Ppvbs2l = np.minimum(_Pl, Ppvbs2ac)
# Load supply power by the grid
Pg2l = (Plt - Ppvbs2l)
# Curtailed PV power (AC output power)
Pct = np.zeros_like(_Ppv)
# Power demand from the grid
Pg2ac = np.minimum(0, Pg)
# Feed-in power to the grid
Pac2g=np.maximum(0, Pg)
# Energy sums
# Electrical demand including the energy consumption of the other system components
E['El'] = np.sum(np.abs(Plt)) / 3.6e9
# DC output of the PV generator including curtailment
E['Epv'] = np.sum(np.abs(_Ppv)) / 3.6e9
# DC input of the battery (charged)
E['Ebatin'] = np.sum(np.abs(Pbatin)) / 3.6e9
# DC output of the battery (discharged)
E['Ebatout'] = np.sum(np.abs(Pbatout)) / 3.6e9
# Grid feed-in
E['Eac2g'] = np.sum(np.abs(Pac2g)) / 3.6e9
# Grid demand
E['Eg2ac'] = np.sum(np.abs(Pg2ac)) / 3.6e9
# Load supply by the grid
E['Eg2l'] = np.sum(np.abs(Pg2l)) / 3.6e9
# Demand of the other system components
E['Eperi'] = np.sum(np.abs(Pperi)) / 3.6e9
# Curtailed PV energy
E['Ect'] = np.sum(np.abs(Pct)) / 3.6e9
if _parameter['Top'] == 'AC':
# AC output of the PV system including curtailment
E['Epvs']=np.sum(np.abs(Ppvs)) / 3.6e9
# AC input of the battery system
E['Eac2bs']=np.sum(np.abs(Pac2bs)) / 3.6e9
# AC output of the battery system
E['Ebs2ac']=np.sum(np.abs(Pbs2ac)) / 3.6e9
# Direct use of PV energy
E['Epvs2l']=np.sum(np.abs(Ppvs2l)) / 3.6e9
# PV charging
E['Epvs2bs']=np.sum(np.abs(Ppvs2bs)) / 3.6e9
# Grid charging
E['Eg2bs']=np.sum(np.abs(Pg2bs)) / 3.6e9
# PV feed-in
E['Epvs2g']=np.sum(np.abs(Ppvs2g)) / 3.6e9
# Load supply by the battery system
E['Ebs2l']=np.sum(np.abs(Pbs2l)) / 3.6e9
# Battery feed-in
E['Ebs2g']=np.sum(np.abs(Pbs2g)) / 3.6e9
elif _parameter['Top'] == 'DC':
# Grid demand of the PV-battery system
E['Eg2pvbs'] = np.sum(np.abs(Pg2pvbs)) / 3.6e9
# AC input of the PV-battery system
E['Eac2pvbs'] = np.sum(np.abs(Pac2pvbs)) / 3.6e9
# AC output of the PV-battery system
E['Epvbs2ac'] = np.sum(np.abs(Ppvbs2ac)) / 3.6e9
# Load supply by the PV-battery system
E['Epvbs2l'] = np.sum(np.abs(Ppvbs2l)) / 3.6e9
return E
def load_parameter(fname, col_name):
"""Loads system parameter from excel file
:param fname: Path to the excel file
:type fname: string
:param col_name: Column to read data from
:type col_name: string
:return: Dictionary holding parameters from the Excel sheet
:rtype: dict
"""
wb = load_workbook(fname, data_only=True)
ws = wb['Data'] # Load Data sheet of excel file
# read keys and values from Excel sheet
keys = (c.value for c in ws['E'][1:])
values = (c.value if c.value != 'ns' else None for c in ws[col_name][1:])
parameter = dict(zip(keys, values))
# deletes entries where key is None
del parameter[None]
# Assign specific parameters
parameter['P_PV2AC_out_PVINV'] = ws[col_name][15].value
parameter['P_PV2AC_out'] = ws[col_name][24].value
parameter['P_AC2BAT_in_DCC'] = ws[col_name][25].value
parameter['P_AC2BAT_in'] = ws[col_name][26].value
parameter['P_BAT2AC_out'] = ws[col_name][27].value
parameter['P_BAT2AC_out_DCC'] = ws[col_name][28].value
# Set refrence case values to boolean
if parameter['ref_1'] == 'yes':
parameter['ref_1'] = True
elif parameter['ref_1'] == 'no':
parameter['ref_1'] = False
if parameter['ref_2'] == 'yes':
parameter['ref_2'] = True
elif parameter['ref_2'] == 'no':
parameter['ref_2'] = False
# Specific parameters of DC-coupled systems
if parameter['Top'] == 'DC':
parameter['P_AC2BAT_in'] = parameter['P_AC2BAT_in_DCC'] # Nominal charging power (AC) in kW
parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']
# Specific parameters of PV inverters and AC-coupled systems
if parameter['Top'] == 'PVINV' or parameter['Top'] == 'AC' and parameter['P_PV2AC_out_PVINV'] is not None:
parameter['P_PV2AC_out'] = parameter['P_PV2AC_out_PVINV']
# Specific parameters of PV-coupled systems
if parameter['Top'] == 'PV':
parameter['P_BAT2PV_in'] = parameter['P_BAT2AC_in']
parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']
# replace 'ns', 'o' and 'c' entries to None
for key, value in parameter.items():
if value == 'ns' or value == 'o' or value == 'c' or value == ' ':
parameter[key] = None
# Convert to kW
convert_to_kw = ['P_PV2AC_in', 'P_PV2AC_out_PVINV','P_PV2AC_out','P_AC2BAT_in_DCC','P_AC2BAT_in','P_BAT2AC_out',
'P_BAT2AC_out_DCC','P_PV2BAT_in','P_BAT2PV_out','P_PV2BAT_out','P_BAT2AC_in']
for par in convert_to_kw:
if parameter[par]:
parameter[par] /= 1000
return parameter
def eta2abc(parameter):
"""Function to calculate the parameters of the power loss functions (quadratic equations) from the path efficiencies
:param parameter: Holds parameters of the system
:type parameter: dict
:return: Dictionary holding parameters from the Excel sheet
:rtype: dict
"""
# PV2AC conversion pathway TODO
if parameter['Top'] == 'DC' or parameter['Top'] == 'PVINV' or parameter['Top'] == 'PV' and parameter['P_PV2AC_out'] is not None or parameter['Top'] == 'AC' and parameter['P_PV2AC_out'] is not None:
# Create variables for the sampling points and corresponding efficiencies TODO
p_pv2ac = np.fromiter((value for key, value in parameter.items() if 'p_PV2AC_' in key and value is not None), float)
eta_pv2ac = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_PV2AC_' in key and value is not None), float)
# Absolute input and output power in W
p_pv2ac_out = parameter['P_PV2AC_out'] * p_pv2ac * 1000
p_pv2ac_in = p_pv2ac_out / eta_pv2ac
# Absolute power loss in W
P_l_pv2ac_in = (1 - eta_pv2ac) * p_pv2ac_in
P_l_pv2ac_out = (1 / eta_pv2ac - 1) * p_pv2ac_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_pv2ac_in / parameter['P_PV2AC_in'] / 1000, P_l_pv2ac_in, 2)
parameter['PV2AC_a_in'] = p[0]
parameter['PV2AC_b_in'] = p[1]
parameter['PV2AC_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_pv2ac, P_l_pv2ac_out, 2)
parameter['PV2AC_a_out'] = p[0]
parameter['PV2AC_b_out'] = p[1]
parameter['PV2AC_c_out'] = p[2]
# PV2BAT conversion pathway
if parameter['Top'] == 'DC' or parameter['Top'] == 'PV':
# Create variables for the sampling points and corresponding efficiencies
p_pv2bat = np.array([value for key, value in parameter.items() if 'p_PV2BAT_' in key])
eta_pv2bat = np.array([value / 100 for key, value in parameter.items() if 'eta_PV2BAT_' in key])
# Create missing variables
# Nominal input power of the PV2BAT conversion pathway of DC-coupled systems
if parameter['P_PV2BAT_in'] is None:
parameter['P_PV2BAT_in'] = parameter['P_PV2BAT_out'] / (parameter['eta_PV2BAT_100'] / 100)
# Absolute input and output power in W
p_pv2bat_out = parameter['P_PV2BAT_out'] * p_pv2bat * 1000
p_pv2bat_in = p_pv2bat_out / eta_pv2bat
# Absolute power loss in W
P_l_pv2bat_in = (1 - eta_pv2bat) * p_pv2bat_in
P_l_pv2bat_out = (1 / eta_pv2bat - 1) * p_pv2bat_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_pv2bat_in / parameter['P_PV2BAT_in'] / 1000, P_l_pv2bat_in, 2)
parameter['PV2BAT_a_in'] = p[0]
parameter['PV2BAT_b_in'] = p[1]
parameter['PV2BAT_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_pv2bat, P_l_pv2bat_out, 2)
parameter['PV2BAT_a_out'] = p[0]
parameter['PV2BAT_b_out'] = p[1]
parameter['PV2BAT_c_out'] = p[2]
# AC2BAT conversion pathway
if parameter['Top'] == 'AC' or parameter['Top'] == 'DC' and parameter['P_AC2BAT_in'] is not None:
# Create variables for the sampling points and corresponding efficiencies TODO
p_ac2bat = np.fromiter((value for key, value in parameter.items() if 'p_AC2BAT_' in key), float)
eta_ac2bat = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_AC2BAT_' in key), float)
# Absolute input and output power in W
p_ac2bat_out = parameter['P_PV2BAT_out'] * p_ac2bat * 1000
p_ac2bat_in = p_ac2bat_out / eta_ac2bat
# Absolute power loss in W
P_l_ac2bat_in = (1 - eta_ac2bat) * p_ac2bat_in
P_l_ac2bat_out = (1 / eta_ac2bat - 1) * p_ac2bat_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_ac2bat_in / parameter['P_AC2BAT_in'] / 1000, P_l_ac2bat_in, 2)
parameter['AC2BAT_a_in'] = p[0]
parameter['AC2BAT_b_in'] = p[1]
parameter['AC2BAT_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_ac2bat, P_l_ac2bat_out, 2)
parameter['AC2BAT_a_out'] = p[0]
parameter['AC2BAT_b_out'] = p[1]
parameter['AC2BAT_c_out'] = p[2]
# BAT2AC conversion pathway
if parameter['Top'] =='AC' or parameter['Top'] =='DC' or parameter['Top'] =='PV' and parameter['P_BAT2AC_out'] is not None:
# Create variables for the sampling points and corresponding efficiencies TODO
p_bat2ac = np.fromiter((value for key, value in parameter.items() if 'p_BAT2AC_' in key), float)
eta_bat2ac = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_BAT2AC_' in key), float)
# Absolute input and output power in W
p_bat2ac_out = parameter['P_BAT2AC_out'] * p_bat2ac * 1000
p_bat2ac_in = p_bat2ac_out / eta_bat2ac
# Absolute power loss in W
P_l_bat2ac_in = (1 - eta_bat2ac) * p_bat2ac_in
P_l_bat2ac_out = (1 / eta_bat2ac - 1) * p_bat2ac_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_bat2ac_in / parameter['P_BAT2AC_in'] / 1000, P_l_bat2ac_in, 2)
parameter['BAT2AC_a_in'] = p[0]
parameter['BAT2AC_b_in'] = p[1]
parameter['BAT2AC_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_bat2ac, P_l_bat2ac_out, 2)
parameter['BAT2AC_a_out'] = p[0]
parameter['BAT2AC_b_out'] = p[1]
parameter['BAT2AC_c_out'] = p[2]
# BAT2PV conversion pathway
if parameter['Top'] =='PV':
# Create variables for the sampling points and corresponding efficiencies TODO
p_bat2pv = np.fromiter((value for key, value in parameter.items() if 'p_BAT2PV_' in key), float)
eta_bat2pv = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_BAT2PV_' in key), float)
# Absolute input and output power in W
p_bat2pv_out = parameter['P_BAT2PV_out'] * p_bat2pv * 1000
p_bat2pv_in = p_bat2pv_out / eta_bat2pv
# Absolute power loss in W
P_l_bat2pv_in = (1 - eta_bat2pv) * p_bat2pv_in
P_l_bat2pv_out = (1 / eta_bat2pv - 1) * p_bat2pv_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power TODO
p = np.polyfit(p_bat2pv_in / parameter['P_BAT2AC_in'] / 1000, P_l_bat2pv_in, 2)
parameter['BAT2PV_a_in'] = p[0]
parameter['BAT2PV_b_in'] = p[1]
parameter['BAT2PV_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_bat2pv, P_l_bat2pv_out, 2)
parameter['BAT2PV_a_out'] = p[0]
parameter['BAT2PV_b_out'] = p[1]
parameter['BAT2PV_c_out'] = p[2]
# Additional parameters
# Mean battery capacity in kWh
try:
parameter['E_BAT'] = (parameter['E_BAT_usable'] / parameter['eta_BAT'] * 100 + parameter['E_BAT_usable']) / 2
except:
parameter['E_BAT'] = None
# Mean stationary deviation of the charging power in W
try:
parameter['P_PV2BAT_DEV'] = parameter['P_PV2BAT_DEV_IMPORT'] - parameter['P_PV2BAT_DEV_EXPORT']
except:
parameter['P_PV2BAT_DEV'] = None
if parameter['Top'] == 'AC':
parameter['P_AC2BAT_DEV'] = parameter['P_PV2BAT_DEV']
# Mean stationary deviation of the discharging power in W
try:
parameter['P_BAT2AC_DEV'] = parameter['P_BAT2AC_DEV_EXPORT'] - parameter['P_BAT2AC_DEV_IMPORT']
except:
parameter['P_BAT2AC_DEV'] = None
# Time constant for the first-order time delay element in s
try:
parameter['t_CONSTANT'] = (parameter['t_SETTLING'] - round(parameter['t_DEAD'])) / 3
except:
parameter['t_CONSTANT'] = None
# Hysteresis threshold for the recharging of the battery
parameter['SOC_h'] = 0.98
# Feed-in power limit in kW/kWp
parameter['p_ac2g_max'] = 0.7
return parameter
def load_ref_case(fname, name):
"""Loads PV power or Load from the reference cases
:param fname: Path to mat file
:type fname: string
:param name: Identifier for PV Power or Load
:type name: string
:return: Returns PV power or load from the reference case
:rtype: numpy array
"""
with open(fname, 'rb') as f:
a = np.load(f)
data = a[name]
return data
def resample_data_frame(df):
"""Function for resampling data frames
:param df: data frame
:type df: pandas data frame
:return: data frame
:rtype: pandas data frame
"""
df_rs = df.resample('15min').mean()
return df_rs
def transform_dict_to_array(parameter):
"""Function for transforming a dict to an numpy array
:param parameter: dict of system parameters
:type parameter: dict
:return: array of system parameters
:rtype: numpy array
"""
if parameter['Top'] == 'AC':
d = np.array(parameter['E_BAT']) # 0
d = np.append(d, parameter['eta_BAT']) # 1
d = np.append(d, parameter['t_CONSTANT']) # 2
d = np.append(d, parameter['P_SYS_SOC0_DC']) # 3
d = np.append(d, parameter['P_SYS_SOC0_AC']) # 4
d = np.append(d, parameter['P_SYS_SOC1_DC']) # 5
d = np.append(d, parameter['P_SYS_SOC1_AC']) # 6
d = np.append(d, parameter['AC2BAT_a_in']) # 7
d = np.append(d, parameter['AC2BAT_b_in']) # 8
d = np.append(d, parameter['AC2BAT_c_in']) # 9
d = np.append(d, parameter['BAT2AC_a_out']) # 10
d = np.append(d, parameter['BAT2AC_b_out']) # 11
d = np.append(d, parameter['BAT2AC_c_out']) # 12
d = np.append(d, parameter['P_AC2BAT_DEV']) # 13
d = np.append(d, parameter['P_BAT2AC_DEV']) # 14
d = np.append(d, parameter['P_BAT2AC_out']) # 15
d = np.append(d, parameter['P_AC2BAT_in']) # 16
d = np.append(d, parameter['t_DEAD']) # 17
d = np.append(d, parameter['SOC_h']) # 18
if parameter['Top'] == 'DC':
d = np.array(parameter['E_BAT']) # 1
d = np.append(d, parameter['P_PV2AC_in']) # 2
d = np.append(d, parameter['P_PV2AC_out']) # 3
d = np.append(d, parameter['P_PV2BAT_in']) # 4
d = np.append(d, parameter['P_BAT2AC_out']) # 5
d = np.append(d, parameter['PV2AC_a_in']) # 6
d = np.append(d, parameter['PV2AC_b_in']) # 7
d = np.append(d, parameter['PV2AC_c_in']) # 8
d = np.append(d, parameter['PV2BAT_a_in']) # 9
d = np.append(d, parameter['PV2BAT_b_in']) # 10
d = np.append(d, parameter['BAT2AC_a_out']) # 11
d = np.append(d, parameter['BAT2AC_b_out']) # 12
d = np.append(d, parameter['BAT2AC_c_out']) # 13
d = np.append(d, parameter['eta_BAT']) # 14
d = np.append(d, parameter['SOC_h']) # 15
d = np.append(d, parameter['P_PV2BAT_DEV']) # 16
d = np.append(d, parameter['P_BAT2AC_DEV']) # 17
d = np.append(d, parameter['t_DEAD']) # 18
d = np.append(d, parameter['t_CONSTANT']) # 19
d = np.append(d, parameter['P_SYS_SOC1_DC']) # 20
d = np.append(d, parameter['P_SYS_SOC0_AC']) # 21
d = np.append(d, parameter['P_SYS_SOC0_DC']) # 22
if parameter['Top'] == 'PV':
d = np.array(parameter['E_BAT'])
d = np.append(d, parameter['P_PV2AC_in'])
d = np.append(d, parameter['P_PV2AC_out'])
d = np.append(d, parameter['P_PV2BAT_in'])
d = np.append(d, parameter['P_BAT2PV_out'])
d = np.append(d, parameter['PV2AC_a_in'])
d = np.append(d, parameter['PV2AC_b_in'])
d = np.append(d, parameter['PV2AC_c_in'])
d = np.append(d, parameter['PV2BAT_a_in'])
d = np.append(d, parameter['PV2BAT_b_in'])
d = np.append(d, parameter['PV2BAT_c_in'])
d = np.append(d, parameter['PV2AC_a_out'])
d = np.append(d, parameter['PV2AC_b_out'])
d = np.append(d, parameter['PV2AC_c_out'])
d = np.append(d, parameter['BAT2PV_a_out'])
d = np.append(d, parameter['BAT2PV_b_out'])
d = np.append(d, parameter['BAT2PV_c_out'])
d = np.append(d, parameter['eta_BAT'])
d = np.append(d, parameter['SOC_h'])
d = np.append(d, parameter['P_PV2BAT_DEV'])
d = np.append(d, parameter['P_BAT2AC_DEV'])
d = np.append(d, parameter['P_SYS_SOC1_DC'])
d = np.append(d, parameter['P_SYS_SOC0_AC'])
d = np.append(d, parameter['P_SYS_SOC0_DC'])
d = np.append(d, parameter['t_DEAD'])
d = np.append(d, parameter['t_CONSTANT'])
return d
def calculate_spi(_E_real, _E_ideal):
# SPI calculation for the reference case:
# Grid electricity price in Euro/kWh
pg2ac = 0.30
# Grid feed-in tariff in Euro/kWh
pac2g = 0.12
# Grid electricity costs without a PV-battery system in Euro/a
Cref = _E_ideal['El'] * pg2ac * 1000
# Net grid electricity costs with the lossless PV-battery system in Euro/a
Cideal = _E_ideal['Eg2ac'] * pg2ac * 1000 - _E_ideal['Eac2g'] * pac2g * 1000
# Net grid electricity costs with the real PV-battery system in Euro/a
Creal = _E_real['Eg2ac'] * pg2ac * 1000 - _E_real['Eac2g'] * pac2g * 1000
# Reduction of the net grid electricity costs by the lossless PV-battery system in Euro/a
dCideal = Cref - Cideal
# Reduction of the net grid electricity costs by the real PV-battery system in Euro/a
dCreal = Cref - Creal
# System Performance Index (SPI)
spi = dCreal / dCideal
return spi
| [
"numpy.sqrt",
"numpy.polyfit",
"numpy.array",
"numpy.where",
"numpy.exp",
"numpy.maximum",
"numpy.abs",
"numpy.ones",
"pyModbusTCP.utils.word_list_to_long",
"numpy.int16",
"csv.writer",
"pyModbusTCP.utils.decode_ieee",
"numba.jit",
"pyModbusTCP.client.ModbusClient",
"numpy.ones_like",
... | [((25096, 25117), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (25102, 25117), True, 'import numba as nb\n'), ((32273, 32294), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (32279, 32294), True, 'import numba as nb\n'), ((33578, 33599), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (33584, 33599), True, 'import numba as nb\n'), ((44207, 44228), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (44213, 44228), True, 'import numba as nb\n'), ((45042, 45063), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (45048, 45063), True, 'import numba as nb\n'), ((26791, 26817), 'numpy.maximum', 'np.maximum', (['(2)', '(1 + _t_DEAD)'], {}), '(2, 1 + _t_DEAD)\n', (26801, 26817), True, 'import numpy as np\n'), ((36195, 36221), 'numpy.maximum', 'np.maximum', (['(2)', '(1 + _t_DEAD)'], {}), '(2, 1 + _t_DEAD)\n', (36205, 36221), True, 'import numpy as np\n'), ((47654, 47680), 'numpy.maximum', 'np.maximum', (['(2)', '(1 + _t_DEAD)'], {}), '(2, 1 + _t_DEAD)\n', (47664, 47680), True, 'import numpy as np\n'), ((58210, 58230), 'numpy.maximum', 'np.maximum', (['(0)', '_Pbat'], {}), '(0, _Pbat)\n', (58220, 58230), True, 'import numpy as np\n'), ((58298, 58318), 'numpy.minimum', 'np.minimum', (['(0)', '_Pbat'], {}), '(0, _Pbat)\n', (58308, 58318), True, 'import numpy as np\n'), ((65829, 65848), 'numpy.zeros_like', 'np.zeros_like', (['_Ppv'], {}), '(_Ppv)\n', (65842, 65848), True, 'import numpy as np\n'), ((66006, 66026), 'numpy.maximum', 'np.maximum', (['(0)', '_Pbat'], {}), '(0, _Pbat)\n', (66016, 66026), True, 'import numpy as np\n'), ((66093, 66113), 'numpy.minimum', 'np.minimum', (['(0)', '_Pbat'], {}), '(0, _Pbat)\n', (66103, 66113), True, 'import numpy as np\n'), ((67730, 67749), 'numpy.zeros_like', 'np.zeros_like', (['_Ppv'], {}), '(_Ppv)\n', (67743, 67749), True, 'import numpy as np\n'), ((67797, 67814), 'numpy.minimum', 'np.minimum', (['(0)', 'Pg'], {}), '(0, Pg)\n', (67807, 67814), True, 'import numpy as np\n'), ((67859, 67876), 'numpy.maximum', 'np.maximum', (['(0)', 'Pg'], {}), '(0, Pg)\n', (67869, 67876), True, 'import numpy as np\n'), ((70332, 70368), 'openpyxl.load_workbook', 'load_workbook', (['fname'], {'data_only': '(True)'}), '(fname, data_only=True)\n', (70345, 70368), False, 'from openpyxl import load_workbook\n'), ((2091, 2114), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (2104, 2114), True, 'import numpy as np\n'), ((2172, 2195), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (2185, 2195), True, 'import numpy as np\n'), ((2408, 2431), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (2421, 2431), True, 'import numpy as np\n'), ((2527, 2550), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (2540, 2550), True, 'import numpy as np\n'), ((2664, 2687), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (2677, 2687), True, 'import numpy as np\n'), ((3025, 3048), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (3038, 3048), True, 'import numpy as np\n'), ((3075, 3098), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (3088, 3098), True, 'import numpy as np\n'), ((3132, 3155), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (3145, 3155), True, 'import numpy as np\n'), ((3189, 3212), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (3202, 3212), True, 'import numpy as np\n'), ((3247, 3270), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (3260, 3270), True, 'import numpy as np\n'), ((3299, 3322), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (3312, 3322), True, 'import numpy as np\n'), ((5187, 5217), 'numpy.maximum', 'np.maximum', (['(0)', 'self.Ideal.Pbat'], {}), '(0, self.Ideal.Pbat)\n', (5197, 5217), True, 'import numpy as np\n'), ((8129, 8152), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (8142, 8152), True, 'import numpy as np\n'), ((8210, 8233), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (8223, 8233), True, 'import numpy as np\n'), ((8298, 8321), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (8311, 8321), True, 'import numpy as np\n'), ((8673, 8696), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (8686, 8696), True, 'import numpy as np\n'), ((8723, 8746), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (8736, 8746), True, 'import numpy as np\n'), ((8802, 8825), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (8815, 8825), True, 'import numpy as np\n'), ((8925, 8948), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (8938, 8948), True, 'import numpy as np\n'), ((12392, 12415), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (12405, 12415), True, 'import numpy as np\n'), ((12468, 12491), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (12481, 12491), True, 'import numpy as np\n'), ((12614, 12637), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (12627, 12637), True, 'import numpy as np\n'), ((12726, 12749), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (12739, 12749), True, 'import numpy as np\n'), ((12870, 12893), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (12883, 12893), True, 'import numpy as np\n'), ((12997, 13020), 'numpy.zeros_like', 'np.zeros_like', (['self.ppv'], {}), '(self.ppv)\n', (13010, 13020), True, 'import numpy as np\n'), ((18811, 18858), 'pyModbusTCP.utils.word_list_to_long', 'utils.word_list_to_long', (['regs'], {'big_endian': '(False)'}), '(regs, big_endian=False)\n', (18834, 18858), False, 'from pyModbusTCP import utils\n'), ((18877, 18902), 'pyModbusTCP.utils.decode_ieee', 'utils.decode_ieee', (['*zregs'], {}), '(*zregs)\n', (18894, 18902), False, 'from pyModbusTCP import utils\n'), ((26673, 26699), 'numpy.exp', 'np.exp', (['(-_dt / _t_CONSTANT)'], {}), '(-_dt / _t_CONSTANT)\n', (26679, 26699), True, 'import numpy as np\n'), ((36079, 36105), 'numpy.exp', 'np.exp', (['(-_dt / _t_CONSTANT)'], {}), '(-_dt / _t_CONSTANT)\n', (36085, 36105), True, 'import numpy as np\n'), ((47538, 47564), 'numpy.exp', 'np.exp', (['(-_dt / _t_CONSTANT)'], {}), '(-_dt / _t_CONSTANT)\n', (47544, 47564), True, 'import numpy as np\n'), ((58625, 58644), 'numpy.maximum', 'np.maximum', (['(0)', '_Pbs'], {}), '(0, _Pbs)\n', (58635, 58644), True, 'import numpy as np\n'), ((58713, 58732), 'numpy.minimum', 'np.minimum', (['(0)', '_Pbs'], {}), '(0, _Pbs)\n', (58723, 58732), True, 'import numpy as np\n'), ((58807, 58825), 'numpy.minimum', 'np.minimum', (['(0)', '_Pr'], {}), '(0, _Pr)\n', (58817, 58825), True, 'import numpy as np\n'), ((58896, 58914), 'numpy.maximum', 'np.maximum', (['(0)', '_Pr'], {}), '(0, _Pr)\n', (58906, 58914), True, 'import numpy as np\n'), ((58980, 59003), 'numpy.minimum', 'np.minimum', (['_Ppvs', '_Plt'], {}), '(_Ppvs, _Plt)\n', (58990, 59003), True, 'import numpy as np\n'), ((59053, 59078), 'numpy.minimum', 'np.minimum', (['_Prp', '_Pac2bs'], {}), '(_Prp, _Pac2bs)\n', (59063, 59078), True, 'import numpy as np\n'), ((59128, 59157), 'numpy.maximum', 'np.maximum', (['(_Pac2bs - _Prp)', '(0)'], {}), '(_Pac2bs - _Prp, 0)\n', (59138, 59157), True, 'import numpy as np\n'), ((59216, 59245), 'numpy.minimum', 'np.minimum', (['(_Prn - _Pbs2ac)', '(0)'], {}), '(_Prn - _Pbs2ac, 0)\n', (59226, 59245), True, 'import numpy as np\n'), ((59308, 59333), 'numpy.maximum', 'np.maximum', (['_Prn', '_Pbs2ac'], {}), '(_Prn, _Pbs2ac)\n', (59318, 59333), True, 'import numpy as np\n'), ((59385, 59414), 'numpy.minimum', 'np.minimum', (['(_Pbs2ac - _Prn)', '(0)'], {}), '(_Pbs2ac - _Prn, 0)\n', (59395, 59414), True, 'import numpy as np\n'), ((66321, 66339), 'numpy.maximum', 'np.maximum', (['(0)', 'Pbs'], {}), '(0, Pbs)\n', (66331, 66339), True, 'import numpy as np\n'), ((66407, 66425), 'numpy.minimum', 'np.minimum', (['(0)', 'Pbs'], {}), '(0, Pbs)\n', (66417, 66425), True, 'import numpy as np\n'), ((66499, 66516), 'numpy.minimum', 'np.minimum', (['(0)', 'Pr'], {}), '(0, Pr)\n', (66509, 66516), True, 'import numpy as np\n'), ((66586, 66603), 'numpy.maximum', 'np.maximum', (['(0)', 'Pr'], {}), '(0, Pr)\n', (66596, 66603), True, 'import numpy as np\n'), ((66668, 66689), 'numpy.minimum', 'np.minimum', (['Ppvs', 'Plt'], {}), '(Ppvs, Plt)\n', (66678, 66689), True, 'import numpy as np\n'), ((66736, 66759), 'numpy.minimum', 'np.minimum', (['Prp', 'Pac2bs'], {}), '(Prp, Pac2bs)\n', (66746, 66759), True, 'import numpy as np\n'), ((66806, 66833), 'numpy.maximum', 'np.maximum', (['(Pac2bs - Prp)', '(0)'], {}), '(Pac2bs - Prp, 0)\n', (66816, 66833), True, 'import numpy as np\n'), ((66889, 66916), 'numpy.minimum', 'np.minimum', (['(Prn - Pbs2ac)', '(0)'], {}), '(Prn - Pbs2ac, 0)\n', (66899, 66916), True, 'import numpy as np\n'), ((66976, 66999), 'numpy.maximum', 'np.maximum', (['Prn', 'Pbs2ac'], {}), '(Prn, Pbs2ac)\n', (66986, 66999), True, 'import numpy as np\n'), ((67048, 67075), 'numpy.minimum', 'np.minimum', (['(Pbs2ac - Prn)', '(0)'], {}), '(Pbs2ac - Prn, 0)\n', (67058, 67075), True, 'import numpy as np\n'), ((67120, 67147), 'numpy.maximum', 'np.maximum', (['(Prp - Pac2bs)', '(0)'], {}), '(Prp - Pac2bs, 0)\n', (67130, 67147), True, 'import numpy as np\n'), ((74091, 74163), 'numpy.polyfit', 'np.polyfit', (["(p_pv2ac_in / parameter['P_PV2AC_in'] / 1000)", 'P_l_pv2ac_in', '(2)'], {}), "(p_pv2ac_in / parameter['P_PV2AC_in'] / 1000, P_l_pv2ac_in, 2)\n", (74101, 74163), True, 'import numpy as np\n'), ((74332, 74369), 'numpy.polyfit', 'np.polyfit', (['p_pv2ac', 'P_l_pv2ac_out', '(2)'], {}), '(p_pv2ac, P_l_pv2ac_out, 2)\n', (74342, 74369), True, 'import numpy as np\n'), ((75634, 75709), 'numpy.polyfit', 'np.polyfit', (["(p_pv2bat_in / parameter['P_PV2BAT_in'] / 1000)", 'P_l_pv2bat_in', '(2)'], {}), "(p_pv2bat_in / parameter['P_PV2BAT_in'] / 1000, P_l_pv2bat_in, 2)\n", (75644, 75709), True, 'import numpy as np\n'), ((75881, 75920), 'numpy.polyfit', 'np.polyfit', (['p_pv2bat', 'P_l_pv2bat_out', '(2)'], {}), '(p_pv2bat, P_l_pv2bat_out, 2)\n', (75891, 75920), True, 'import numpy as np\n'), ((76961, 77036), 'numpy.polyfit', 'np.polyfit', (["(p_ac2bat_in / parameter['P_AC2BAT_in'] / 1000)", 'P_l_ac2bat_in', '(2)'], {}), "(p_ac2bat_in / parameter['P_AC2BAT_in'] / 1000, P_l_ac2bat_in, 2)\n", (76971, 77036), True, 'import numpy as np\n'), ((77208, 77247), 'numpy.polyfit', 'np.polyfit', (['p_ac2bat', 'P_l_ac2bat_out', '(2)'], {}), '(p_ac2bat, P_l_ac2bat_out, 2)\n', (77218, 77247), True, 'import numpy as np\n'), ((78314, 78389), 'numpy.polyfit', 'np.polyfit', (["(p_bat2ac_in / parameter['P_BAT2AC_in'] / 1000)", 'P_l_bat2ac_in', '(2)'], {}), "(p_bat2ac_in / parameter['P_BAT2AC_in'] / 1000, P_l_bat2ac_in, 2)\n", (78324, 78389), True, 'import numpy as np\n'), ((78561, 78600), 'numpy.polyfit', 'np.polyfit', (['p_bat2ac', 'P_l_bat2ac_out', '(2)'], {}), '(p_bat2ac, P_l_bat2ac_out, 2)\n', (78571, 78600), True, 'import numpy as np\n'), ((79576, 79651), 'numpy.polyfit', 'np.polyfit', (["(p_bat2pv_in / parameter['P_BAT2AC_in'] / 1000)", 'P_l_bat2pv_in', '(2)'], {}), "(p_bat2pv_in / parameter['P_BAT2AC_in'] / 1000, P_l_bat2pv_in, 2)\n", (79586, 79651), True, 'import numpy as np\n'), ((79823, 79862), 'numpy.polyfit', 'np.polyfit', (['p_bat2pv', 'P_l_bat2pv_out', '(2)'], {}), '(p_bat2pv, P_l_bat2pv_out, 2)\n', (79833, 79862), True, 'import numpy as np\n'), ((81612, 81622), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (81619, 81622), True, 'import numpy as np\n'), ((82242, 82270), 'numpy.array', 'np.array', (["parameter['E_BAT']"], {}), "(parameter['E_BAT'])\n", (82250, 82270), True, 'import numpy as np\n'), ((82303, 82337), 'numpy.append', 'np.append', (['d', "parameter['eta_BAT']"], {}), "(d, parameter['eta_BAT'])\n", (82312, 82337), True, 'import numpy as np\n'), ((82364, 82401), 'numpy.append', 'np.append', (['d', "parameter['t_CONSTANT']"], {}), "(d, parameter['t_CONSTANT'])\n", (82373, 82401), True, 'import numpy as np\n'), ((82420, 82460), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC0_DC']"], {}), "(d, parameter['P_SYS_SOC0_DC'])\n", (82429, 82460), True, 'import numpy as np\n'), ((82479, 82519), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC0_AC']"], {}), "(d, parameter['P_SYS_SOC0_AC'])\n", (82488, 82519), True, 'import numpy as np\n'), ((82538, 82578), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC1_DC']"], {}), "(d, parameter['P_SYS_SOC1_DC'])\n", (82547, 82578), True, 'import numpy as np\n'), ((82597, 82637), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC1_AC']"], {}), "(d, parameter['P_SYS_SOC1_AC'])\n", (82606, 82637), True, 'import numpy as np\n'), ((82656, 82694), 'numpy.append', 'np.append', (['d', "parameter['AC2BAT_a_in']"], {}), "(d, parameter['AC2BAT_a_in'])\n", (82665, 82694), True, 'import numpy as np\n'), ((82713, 82751), 'numpy.append', 'np.append', (['d', "parameter['AC2BAT_b_in']"], {}), "(d, parameter['AC2BAT_b_in'])\n", (82722, 82751), True, 'import numpy as np\n'), ((82770, 82808), 'numpy.append', 'np.append', (['d', "parameter['AC2BAT_c_in']"], {}), "(d, parameter['AC2BAT_c_in'])\n", (82779, 82808), True, 'import numpy as np\n'), ((82827, 82866), 'numpy.append', 'np.append', (['d', "parameter['BAT2AC_a_out']"], {}), "(d, parameter['BAT2AC_a_out'])\n", (82836, 82866), True, 'import numpy as np\n'), ((82886, 82925), 'numpy.append', 'np.append', (['d', "parameter['BAT2AC_b_out']"], {}), "(d, parameter['BAT2AC_b_out'])\n", (82895, 82925), True, 'import numpy as np\n'), ((82945, 82984), 'numpy.append', 'np.append', (['d', "parameter['BAT2AC_c_out']"], {}), "(d, parameter['BAT2AC_c_out'])\n", (82954, 82984), True, 'import numpy as np\n'), ((83004, 83043), 'numpy.append', 'np.append', (['d', "parameter['P_AC2BAT_DEV']"], {}), "(d, parameter['P_AC2BAT_DEV'])\n", (83013, 83043), True, 'import numpy as np\n'), ((83063, 83102), 'numpy.append', 'np.append', (['d', "parameter['P_BAT2AC_DEV']"], {}), "(d, parameter['P_BAT2AC_DEV'])\n", (83072, 83102), True, 'import numpy as np\n'), ((83122, 83161), 'numpy.append', 'np.append', (['d', "parameter['P_BAT2AC_out']"], {}), "(d, parameter['P_BAT2AC_out'])\n", (83131, 83161), True, 'import numpy as np\n'), ((83181, 83219), 'numpy.append', 'np.append', (['d', "parameter['P_AC2BAT_in']"], {}), "(d, parameter['P_AC2BAT_in'])\n", (83190, 83219), True, 'import numpy as np\n'), ((83239, 83272), 'numpy.append', 'np.append', (['d', "parameter['t_DEAD']"], {}), "(d, parameter['t_DEAD'])\n", (83248, 83272), True, 'import numpy as np\n'), ((83292, 83324), 'numpy.append', 'np.append', (['d', "parameter['SOC_h']"], {}), "(d, parameter['SOC_h'])\n", (83301, 83324), True, 'import numpy as np\n'), ((83380, 83408), 'numpy.array', 'np.array', (["parameter['E_BAT']"], {}), "(parameter['E_BAT'])\n", (83388, 83408), True, 'import numpy as np\n'), ((83427, 83464), 'numpy.append', 'np.append', (['d', "parameter['P_PV2AC_in']"], {}), "(d, parameter['P_PV2AC_in'])\n", (83436, 83464), True, 'import numpy as np\n'), ((83483, 83521), 'numpy.append', 'np.append', (['d', "parameter['P_PV2AC_out']"], {}), "(d, parameter['P_PV2AC_out'])\n", (83492, 83521), True, 'import numpy as np\n'), ((83540, 83578), 'numpy.append', 'np.append', (['d', "parameter['P_PV2BAT_in']"], {}), "(d, parameter['P_PV2BAT_in'])\n", (83549, 83578), True, 'import numpy as np\n'), ((83602, 83641), 'numpy.append', 'np.append', (['d', "parameter['P_BAT2AC_out']"], {}), "(d, parameter['P_BAT2AC_out'])\n", (83611, 83641), True, 'import numpy as np\n'), ((83665, 83702), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_a_in']"], {}), "(d, parameter['PV2AC_a_in'])\n", (83674, 83702), True, 'import numpy as np\n'), ((83729, 83766), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_b_in']"], {}), "(d, parameter['PV2AC_b_in'])\n", (83738, 83766), True, 'import numpy as np\n'), ((83794, 83831), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_c_in']"], {}), "(d, parameter['PV2AC_c_in'])\n", (83803, 83831), True, 'import numpy as np\n'), ((83860, 83898), 'numpy.append', 'np.append', (['d', "parameter['PV2BAT_a_in']"], {}), "(d, parameter['PV2BAT_a_in'])\n", (83869, 83898), True, 'import numpy as np\n'), ((83927, 83965), 'numpy.append', 'np.append', (['d', "parameter['PV2BAT_b_in']"], {}), "(d, parameter['PV2BAT_b_in'])\n", (83936, 83965), True, 'import numpy as np\n'), ((83985, 84024), 'numpy.append', 'np.append', (['d', "parameter['BAT2AC_a_out']"], {}), "(d, parameter['BAT2AC_a_out'])\n", (83994, 84024), True, 'import numpy as np\n'), ((84044, 84083), 'numpy.append', 'np.append', (['d', "parameter['BAT2AC_b_out']"], {}), "(d, parameter['BAT2AC_b_out'])\n", (84053, 84083), True, 'import numpy as np\n'), ((84103, 84142), 'numpy.append', 'np.append', (['d', "parameter['BAT2AC_c_out']"], {}), "(d, parameter['BAT2AC_c_out'])\n", (84112, 84142), True, 'import numpy as np\n'), ((84162, 84196), 'numpy.append', 'np.append', (['d', "parameter['eta_BAT']"], {}), "(d, parameter['eta_BAT'])\n", (84171, 84196), True, 'import numpy as np\n'), ((84216, 84248), 'numpy.append', 'np.append', (['d', "parameter['SOC_h']"], {}), "(d, parameter['SOC_h'])\n", (84225, 84248), True, 'import numpy as np\n'), ((84268, 84307), 'numpy.append', 'np.append', (['d', "parameter['P_PV2BAT_DEV']"], {}), "(d, parameter['P_PV2BAT_DEV'])\n", (84277, 84307), True, 'import numpy as np\n'), ((84332, 84371), 'numpy.append', 'np.append', (['d', "parameter['P_BAT2AC_DEV']"], {}), "(d, parameter['P_BAT2AC_DEV'])\n", (84341, 84371), True, 'import numpy as np\n'), ((84391, 84424), 'numpy.append', 'np.append', (['d', "parameter['t_DEAD']"], {}), "(d, parameter['t_DEAD'])\n", (84400, 84424), True, 'import numpy as np\n'), ((84444, 84481), 'numpy.append', 'np.append', (['d', "parameter['t_CONSTANT']"], {}), "(d, parameter['t_CONSTANT'])\n", (84453, 84481), True, 'import numpy as np\n'), ((84509, 84549), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC1_DC']"], {}), "(d, parameter['P_SYS_SOC1_DC'])\n", (84518, 84549), True, 'import numpy as np\n'), ((84569, 84609), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC0_AC']"], {}), "(d, parameter['P_SYS_SOC0_AC'])\n", (84578, 84609), True, 'import numpy as np\n'), ((84629, 84669), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC0_DC']"], {}), "(d, parameter['P_SYS_SOC0_DC'])\n", (84638, 84669), True, 'import numpy as np\n'), ((84725, 84753), 'numpy.array', 'np.array', (["parameter['E_BAT']"], {}), "(parameter['E_BAT'])\n", (84733, 84753), True, 'import numpy as np\n'), ((84767, 84804), 'numpy.append', 'np.append', (['d', "parameter['P_PV2AC_in']"], {}), "(d, parameter['P_PV2AC_in'])\n", (84776, 84804), True, 'import numpy as np\n'), ((84818, 84856), 'numpy.append', 'np.append', (['d', "parameter['P_PV2AC_out']"], {}), "(d, parameter['P_PV2AC_out'])\n", (84827, 84856), True, 'import numpy as np\n'), ((84870, 84908), 'numpy.append', 'np.append', (['d', "parameter['P_PV2BAT_in']"], {}), "(d, parameter['P_PV2BAT_in'])\n", (84879, 84908), True, 'import numpy as np\n'), ((84922, 84961), 'numpy.append', 'np.append', (['d', "parameter['P_BAT2PV_out']"], {}), "(d, parameter['P_BAT2PV_out'])\n", (84931, 84961), True, 'import numpy as np\n'), ((84975, 85012), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_a_in']"], {}), "(d, parameter['PV2AC_a_in'])\n", (84984, 85012), True, 'import numpy as np\n'), ((85026, 85063), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_b_in']"], {}), "(d, parameter['PV2AC_b_in'])\n", (85035, 85063), True, 'import numpy as np\n'), ((85077, 85114), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_c_in']"], {}), "(d, parameter['PV2AC_c_in'])\n", (85086, 85114), True, 'import numpy as np\n'), ((85128, 85166), 'numpy.append', 'np.append', (['d', "parameter['PV2BAT_a_in']"], {}), "(d, parameter['PV2BAT_a_in'])\n", (85137, 85166), True, 'import numpy as np\n'), ((85180, 85218), 'numpy.append', 'np.append', (['d', "parameter['PV2BAT_b_in']"], {}), "(d, parameter['PV2BAT_b_in'])\n", (85189, 85218), True, 'import numpy as np\n'), ((85232, 85270), 'numpy.append', 'np.append', (['d', "parameter['PV2BAT_c_in']"], {}), "(d, parameter['PV2BAT_c_in'])\n", (85241, 85270), True, 'import numpy as np\n'), ((85284, 85322), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_a_out']"], {}), "(d, parameter['PV2AC_a_out'])\n", (85293, 85322), True, 'import numpy as np\n'), ((85336, 85374), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_b_out']"], {}), "(d, parameter['PV2AC_b_out'])\n", (85345, 85374), True, 'import numpy as np\n'), ((85388, 85426), 'numpy.append', 'np.append', (['d', "parameter['PV2AC_c_out']"], {}), "(d, parameter['PV2AC_c_out'])\n", (85397, 85426), True, 'import numpy as np\n'), ((85440, 85479), 'numpy.append', 'np.append', (['d', "parameter['BAT2PV_a_out']"], {}), "(d, parameter['BAT2PV_a_out'])\n", (85449, 85479), True, 'import numpy as np\n'), ((85493, 85532), 'numpy.append', 'np.append', (['d', "parameter['BAT2PV_b_out']"], {}), "(d, parameter['BAT2PV_b_out'])\n", (85502, 85532), True, 'import numpy as np\n'), ((85546, 85585), 'numpy.append', 'np.append', (['d', "parameter['BAT2PV_c_out']"], {}), "(d, parameter['BAT2PV_c_out'])\n", (85555, 85585), True, 'import numpy as np\n'), ((85599, 85633), 'numpy.append', 'np.append', (['d', "parameter['eta_BAT']"], {}), "(d, parameter['eta_BAT'])\n", (85608, 85633), True, 'import numpy as np\n'), ((85647, 85679), 'numpy.append', 'np.append', (['d', "parameter['SOC_h']"], {}), "(d, parameter['SOC_h'])\n", (85656, 85679), True, 'import numpy as np\n'), ((85693, 85732), 'numpy.append', 'np.append', (['d', "parameter['P_PV2BAT_DEV']"], {}), "(d, parameter['P_PV2BAT_DEV'])\n", (85702, 85732), True, 'import numpy as np\n'), ((85746, 85785), 'numpy.append', 'np.append', (['d', "parameter['P_BAT2AC_DEV']"], {}), "(d, parameter['P_BAT2AC_DEV'])\n", (85755, 85785), True, 'import numpy as np\n'), ((85799, 85839), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC1_DC']"], {}), "(d, parameter['P_SYS_SOC1_DC'])\n", (85808, 85839), True, 'import numpy as np\n'), ((85853, 85893), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC0_AC']"], {}), "(d, parameter['P_SYS_SOC0_AC'])\n", (85862, 85893), True, 'import numpy as np\n'), ((85907, 85947), 'numpy.append', 'np.append', (['d', "parameter['P_SYS_SOC0_DC']"], {}), "(d, parameter['P_SYS_SOC0_DC'])\n", (85916, 85947), True, 'import numpy as np\n'), ((85961, 85994), 'numpy.append', 'np.append', (['d', "parameter['t_DEAD']"], {}), "(d, parameter['t_DEAD'])\n", (85970, 85994), True, 'import numpy as np\n'), ((86008, 86045), 'numpy.append', 'np.append', (['d', "parameter['t_CONSTANT']"], {}), "(d, parameter['t_CONSTANT'])\n", (86017, 86045), True, 'import numpy as np\n'), ((2811, 2833), 'numpy.ones', 'np.ones', (['self.ppv.size'], {}), '(self.ppv.size)\n', (2818, 2833), True, 'import numpy as np\n'), ((4951, 4981), 'numpy.minimum', 'np.minimum', (['(0)', 'self.Ideal.Pbat'], {}), '(0, self.Ideal.Pbat)\n', (4961, 4981), True, 'import numpy as np\n'), ((5075, 5105), 'numpy.maximum', 'np.maximum', (['(0)', 'self.Ideal.Pbat'], {}), '(0, self.Ideal.Pbat)\n', (5085, 5105), True, 'import numpy as np\n'), ((15758, 15861), 'pyModbusTCP.client.ModbusClient', 'ModbusClient', ([], {'host': 'self.host', 'port': 'self.port', 'unit_id': 'self.unit_id', 'auto_open': '(True)', 'auto_close': '(True)'}), '(host=self.host, port=self.port, unit_id=self.unit_id,\n auto_open=True, auto_close=True)\n', (15770, 15861), False, 'from pyModbusTCP.client import ModbusClient\n'), ((19103, 19133), 'csv.writer', 'csv.writer', (['f'], {'dialect': '"""excel"""'}), "(f, dialect='excel')\n", (19113, 19133), False, 'import csv\n'), ((19636, 19666), 'csv.writer', 'csv.writer', (['f'], {'dialect': '"""excel"""'}), "(f, dialect='excel')\n", (19646, 19666), False, 'import csv\n'), ((21146, 21282), 'numpy.maximum', 'np.maximum', (['(0)', "(Ppv - (parameter['PV2AC_a_in'] * ppvinvin * ppvinvin + parameter[\n 'PV2AC_b_in'] * ppvinvin + parameter['PV2AC_c_in']))"], {}), "(0, Ppv - (parameter['PV2AC_a_in'] * ppvinvin * ppvinvin + \n parameter['PV2AC_b_in'] * ppvinvin + parameter['PV2AC_c_in']))\n", (21156, 21282), True, 'import numpy as np\n'), ((21455, 21472), 'numpy.ones_like', 'np.ones_like', (['ppv'], {}), '(ppv)\n', (21467, 21472), True, 'import numpy as np\n'), ((22041, 22059), 'numpy.zeros_like', 'np.zeros_like', (['ppv'], {}), '(ppv)\n', (22054, 22059), True, 'import numpy as np\n'), ((22075, 22093), 'numpy.empty_like', 'np.empty_like', (['ppv'], {}), '(ppv)\n', (22088, 22093), True, 'import numpy as np\n'), ((22461, 22508), 'numpy.minimum', 'np.minimum', (['Ppv', "(parameter['P_PV2AC_in'] * 1000)"], {}), "(Ppv, parameter['P_PV2AC_in'] * 1000)\n", (22471, 22508), True, 'import numpy as np\n'), ((23329, 23455), 'numpy.maximum', 'np.maximum', (['(0)', "(Ppv - (parameter['PV2AC_a_in'] * ppv2ac ** 2 + parameter['PV2AC_b_in'] *\n ppv2ac + parameter['PV2AC_c_in']))"], {}), "(0, Ppv - (parameter['PV2AC_a_in'] * ppv2ac ** 2 + parameter[\n 'PV2AC_b_in'] * ppv2ac + parameter['PV2AC_c_in']))\n", (23339, 23455), True, 'import numpy as np\n'), ((28510, 28557), 'numpy.maximum', 'np.maximum', (['_P_AC2BAT_min', '(P_bs + _P_AC2BAT_DEV)'], {}), '(_P_AC2BAT_min, P_bs + _P_AC2BAT_DEV)\n', (28520, 28557), True, 'import numpy as np\n'), ((28891, 28928), 'numpy.minimum', 'np.minimum', (['(_P_AC2BAT_in * 1000)', 'P_bs'], {}), '(_P_AC2BAT_in * 1000, P_bs)\n', (28901, 28928), True, 'import numpy as np\n'), ((30149, 30240), 'numpy.maximum', 'np.maximum', (['(0)', '(P_bs - (_AC2BAT_a_in * p_bs * p_bs + _AC2BAT_b_in * p_bs + _AC2BAT_c_in))'], {}), '(0, P_bs - (_AC2BAT_a_in * p_bs * p_bs + _AC2BAT_b_in * p_bs +\n _AC2BAT_c_in))\n', (30159, 30240), True, 'import numpy as np\n'), ((38286, 38328), 'numpy.maximum', 'np.maximum', (['(0)', '(P_pv2bat_in + _P_PV2BAT_DEV)'], {}), '(0, P_pv2bat_in + _P_PV2BAT_DEV)\n', (38296, 38328), True, 'import numpy as np\n'), ((38428, 38472), 'numpy.minimum', 'np.minimum', (['P_pv2bat_in', '(_P_PV2BAT_in * 1000)'], {}), '(P_pv2bat_in, _P_PV2BAT_in * 1000)\n', (38438, 38472), True, 'import numpy as np\n'), ((39167, 39199), 'numpy.minimum', 'np.minimum', (['P_pv2bat_in', '_Ppv[t]'], {}), '(P_pv2bat_in, _Ppv[t])\n', (39177, 39199), True, 'import numpy as np\n'), ((39511, 39598), 'numpy.maximum', 'np.maximum', (['(0)', '(P_pv2bat_in - (_PV2BAT_a_in * ppv2bat ** 2 + _PV2BAT_b_in * ppv2bat))'], {}), '(0, P_pv2bat_in - (_PV2BAT_a_in * ppv2bat ** 2 + _PV2BAT_b_in *\n ppv2bat))\n', (39521, 39598), True, 'import numpy as np\n'), ((39949, 40047), 'numpy.maximum', 'np.maximum', (['(0)', '(P_pv2ac_in - (_PV2AC_a_in * _ppv2ac ** 2 + _PV2AC_b_in * _ppv2ac +\n _PV2AC_c_in))'], {}), '(0, P_pv2ac_in - (_PV2AC_a_in * _ppv2ac ** 2 + _PV2AC_b_in *\n _ppv2ac + _PV2AC_c_in))\n', (39959, 40047), True, 'import numpy as np\n'), ((48387, 48437), 'numpy.minimum', 'np.minimum', (['_Pac[t - _t_DEAD]', '(_P_PV2AC_out * 1000)'], {}), '(_Pac[t - _t_DEAD], _P_PV2AC_out * 1000)\n', (48397, 48437), True, 'import numpy as np\n'), ((48475, 48515), 'numpy.minimum', 'np.minimum', (['_Pac[t]', '(_P_PV2AC_out * 1000)'], {}), '(_Pac[t], _P_PV2AC_out * 1000)\n', (48485, 48515), True, 'import numpy as np\n'), ((50266, 50308), 'numpy.maximum', 'np.maximum', (['(0)', '(P_pv2bat_in + _P_PV2BAT_DEV)'], {}), '(0, P_pv2bat_in + _P_PV2BAT_DEV)\n', (50276, 50308), True, 'import numpy as np\n'), ((50408, 50452), 'numpy.minimum', 'np.minimum', (['P_pv2bat_in', '(_P_PV2BAT_in * 1000)'], {}), '(P_pv2bat_in, _P_PV2BAT_in * 1000)\n', (50418, 50452), True, 'import numpy as np\n'), ((51119, 51151), 'numpy.minimum', 'np.minimum', (['P_pv2bat_in', '_Ppv[t]'], {}), '(P_pv2bat_in, _Ppv[t])\n', (51129, 51151), True, 'import numpy as np\n'), ((51314, 51416), 'numpy.maximum', 'np.maximum', (['(0)', '(P_pv2bat_in - (_PV2BAT_a_in * ppv2bat ** 2 + _PV2BAT_b_in * ppv2bat +\n _PV2BAT_c_in))'], {}), '(0, P_pv2bat_in - (_PV2BAT_a_in * ppv2bat ** 2 + _PV2BAT_b_in *\n ppv2bat + _PV2BAT_c_in))\n', (51324, 51416), True, 'import numpy as np\n'), ((51682, 51724), 'numpy.minimum', 'np.minimum', (['P_pv2ac_in', '(_P_PV2AC_in * 1000)'], {}), '(P_pv2ac_in, _P_PV2AC_in * 1000)\n', (51692, 51724), True, 'import numpy as np\n'), ((52056, 52152), 'numpy.maximum', 'np.maximum', (['(0)', '(P_pv2ac_in - (_PV2AC_a_in * ppv2ac ** 2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))'], {}), '(0, P_pv2ac_in - (_PV2AC_a_in * ppv2ac ** 2 + _PV2AC_b_in *\n ppv2ac + _PV2AC_c_in))\n', (52066, 52152), True, 'import numpy as np\n'), ((59495, 59524), 'numpy.maximum', 'np.maximum', (['(_Prp - _Pac2bs)', '(0)'], {}), '(_Prp - _Pac2bs, 0)\n', (59505, 59524), True, 'import numpy as np\n'), ((59799, 59828), 'numpy.maximum', 'np.maximum', (['(_Prp - _Pac2bs)', '(0)'], {}), '(_Prp - _Pac2bs, 0)\n', (59809, 59828), True, 'import numpy as np\n'), ((60063, 60081), 'numpy.where', 'np.where', (['(_Pct > 0)'], {}), '(_Pct > 0)\n', (60071, 60081), True, 'import numpy as np\n'), ((60778, 60799), 'numpy.minimum', 'np.minimum', (['(0)', '_Ppvbs'], {}), '(0, _Ppvbs)\n', (60788, 60799), True, 'import numpy as np\n'), ((60954, 60975), 'numpy.maximum', 'np.maximum', (['(0)', '_Ppvbs'], {}), '(0, _Ppvbs)\n', (60964, 60975), True, 'import numpy as np\n'), ((61050, 61077), 'numpy.minimum', 'np.minimum', (['_Plt', '_Ppvbs2ac'], {}), '(_Plt, _Ppvbs2ac)\n', (61060, 61077), True, 'import numpy as np\n'), ((61216, 61241), 'numpy.minimum', 'np.minimum', (['_Plt', '_Ppv2ac'], {}), '(_Plt, _Ppv2ac)\n', (61226, 61241), True, 'import numpy as np\n'), ((61310, 61351), 'numpy.minimum', 'np.minimum', (['(_Ppv2ac - _Ppv2l)', '_P_ac2g_max'], {}), '(_Ppv2ac - _Ppv2l, _P_ac2g_max)\n', (61320, 61351), True, 'import numpy as np\n'), ((62708, 62726), 'numpy.maximum', 'np.maximum', (['(0)', '_Pg'], {}), '(0, _Pg)\n', (62718, 62726), True, 'import numpy as np\n'), ((62783, 62801), 'numpy.minimum', 'np.minimum', (['(0)', '_Pg'], {}), '(0, _Pg)\n', (62793, 62801), True, 'import numpy as np\n'), ((67308, 67328), 'numpy.minimum', 'np.minimum', (['(0)', 'Ppvbs'], {}), '(0, Ppvbs)\n', (67318, 67328), True, 'import numpy as np\n'), ((67480, 67500), 'numpy.maximum', 'np.maximum', (['(0)', 'Ppvbs'], {}), '(0, Ppvbs)\n', (67490, 67500), True, 'import numpy as np\n'), ((67574, 67599), 'numpy.minimum', 'np.minimum', (['_Pl', 'Ppvbs2ac'], {}), '(_Pl, Ppvbs2ac)\n', (67584, 67599), True, 'import numpy as np\n'), ((68011, 68022), 'numpy.abs', 'np.abs', (['Plt'], {}), '(Plt)\n', (68017, 68022), True, 'import numpy as np\n'), ((68114, 68126), 'numpy.abs', 'np.abs', (['_Ppv'], {}), '(_Ppv)\n', (68120, 68126), True, 'import numpy as np\n'), ((68203, 68217), 'numpy.abs', 'np.abs', (['Pbatin'], {}), '(Pbatin)\n', (68209, 68217), True, 'import numpy as np\n'), ((68299, 68314), 'numpy.abs', 'np.abs', (['Pbatout'], {}), '(Pbatout)\n', (68305, 68314), True, 'import numpy as np\n'), ((68369, 68382), 'numpy.abs', 'np.abs', (['Pac2g'], {}), '(Pac2g)\n', (68375, 68382), True, 'import numpy as np\n'), ((68436, 68449), 'numpy.abs', 'np.abs', (['Pg2ac'], {}), '(Pg2ac)\n', (68442, 68449), True, 'import numpy as np\n'), ((68514, 68526), 'numpy.abs', 'np.abs', (['Pg2l'], {}), '(Pg2l)\n', (68520, 68526), True, 'import numpy as np\n'), ((68606, 68619), 'numpy.abs', 'np.abs', (['Pperi'], {}), '(Pperi)\n', (68612, 68619), True, 'import numpy as np\n'), ((68679, 68690), 'numpy.abs', 'np.abs', (['Pct'], {}), '(Pct)\n', (68685, 68690), True, 'import numpy as np\n'), ((2892, 2915), 'numpy.maximum', 'np.maximum', (['(0)', 'self.ppv'], {}), '(0, self.ppv)\n', (2902, 2915), True, 'import numpy as np\n'), ((4917, 4947), 'numpy.maximum', 'np.maximum', (['(0)', 'self.Ideal.Pbat'], {}), '(0, self.Ideal.Pbat)\n', (4927, 4947), True, 'import numpy as np\n'), ((8555, 8573), 'numpy.maximum', 'np.maximum', (['(0)', 'ppv'], {}), '(0, ppv)\n', (8565, 8573), True, 'import numpy as np\n'), ((16333, 16356), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16354, 16356), False, 'import datetime\n'), ((17950, 17998), 'pyModbusTCP.utils.word_list_to_long', 'utils.word_list_to_long', (['_P_ac'], {'big_endian': '(False)'}), '(_P_ac, big_endian=False)\n', (17973, 17998), False, 'from pyModbusTCP import utils\n'), ((18092, 18117), 'pyModbusTCP.utils.decode_ieee', 'utils.decode_ieee', (['*zregs'], {}), '(*zregs)\n', (18109, 18117), False, 'from pyModbusTCP import utils\n'), ((18195, 18212), 'numpy.int16', 'np.int16', (['*_P_bat'], {}), '(*_P_bat)\n', (18203, 18212), True, 'import numpy as np\n'), ((20751, 20769), 'numpy.maximum', 'np.maximum', (['(0)', 'ppv'], {}), '(0, ppv)\n', (20761, 20769), True, 'import numpy as np\n'), ((20812, 20859), 'numpy.minimum', 'np.minimum', (['ppv', "(parameter['P_PV2AC_in'] * 1000)"], {}), "(ppv, parameter['P_PV2AC_in'] * 1000)\n", (20822, 20859), True, 'import numpy as np\n'), ((22939, 22987), 'numpy.minimum', 'np.minimum', (['Pac', "(parameter['P_PV2AC_out'] * 1000)"], {}), "(Pac, parameter['P_PV2AC_out'] * 1000)\n", (22949, 22987), True, 'import numpy as np\n'), ((24319, 24337), 'numpy.empty_like', 'np.empty_like', (['ppv'], {}), '(ppv)\n', (24332, 24337), True, 'import numpy as np\n'), ((28617, 28665), 'numpy.minimum', 'np.minimum', (['(-_P_BAT2AC_min)', '(P_bs - _P_BAT2AC_DEV)'], {}), '(-_P_BAT2AC_min, P_bs - _P_BAT2AC_DEV)\n', (28627, 28665), True, 'import numpy as np\n'), ((30374, 30409), 'numpy.abs', 'np.abs', (['(P_bs / _P_BAT2AC_out / 1000)'], {}), '(P_bs / _P_BAT2AC_out / 1000)\n', (30380, 30409), True, 'import numpy as np\n'), ((31025, 31054), 'numpy.maximum', 'np.maximum', (['(0)', '_P_SYS_SOC0_DC'], {}), '(0, _P_SYS_SOC0_DC)\n', (31035, 31054), True, 'import numpy as np\n'), ((40481, 40524), 'numpy.maximum', 'np.maximum', (['(0)', '(P_bat2ac_out + _P_BAT2AC_DEV)'], {}), '(0, P_bat2ac_out + _P_BAT2AC_DEV)\n', (40491, 40524), True, 'import numpy as np\n'), ((40632, 40678), 'numpy.minimum', 'np.minimum', (['P_bat2ac_out', '(_P_BAT2AC_out * 1000)'], {}), '(P_bat2ac_out, _P_BAT2AC_out * 1000)\n', (40642, 40678), True, 'import numpy as np\n'), ((41370, 41432), 'numpy.minimum', 'np.minimum', (['(_P_PV2AC_out * 1000 - _Ppv2ac_out[t])', 'P_bat2ac_out'], {}), '(_P_PV2AC_out * 1000 - _Ppv2ac_out[t], P_bat2ac_out)\n', (41380, 41432), True, 'import numpy as np\n'), ((42953, 42982), 'numpy.maximum', 'np.maximum', (['(0)', '_P_SYS_SOC0_DC'], {}), '(0, _P_SYS_SOC0_DC)\n', (42963, 42982), True, 'import numpy as np\n'), ((52503, 52516), 'numpy.abs', 'np.abs', (['P_rpv'], {}), '(P_rpv)\n', (52509, 52516), True, 'import numpy as np\n'), ((52624, 52667), 'numpy.maximum', 'np.maximum', (['(0)', '(P_bat2pv_out + _P_BAT2AC_DEV)'], {}), '(0, P_bat2pv_out + _P_BAT2AC_DEV)\n', (52634, 52667), True, 'import numpy as np\n'), ((52775, 52821), 'numpy.minimum', 'np.minimum', (['P_bat2pv_out', '(_P_BAT2PV_out * 1000)'], {}), '(P_bat2pv_out, _P_BAT2PV_out * 1000)\n', (52785, 52821), True, 'import numpy as np\n'), ((53406, 53445), 'numpy.minimum', 'np.minimum', (['(_P_PV2AC_in * 1000)', '_Ppv[t]'], {}), '(_P_PV2AC_in * 1000, _Ppv[t])\n', (53416, 53445), True, 'import numpy as np\n'), ((53575, 53629), 'numpy.minimum', 'np.minimum', (['(_P_PV2AC_in * 1000 - _Ppv[t])', 'P_bat2pv_out'], {}), '(_P_PV2AC_in * 1000 - _Ppv[t], P_bat2pv_out)\n', (53585, 53629), True, 'import numpy as np\n'), ((54251, 54347), 'numpy.maximum', 'np.maximum', (['(0)', '(P_pv2ac_in - (_PV2AC_a_in * ppv2ac ** 2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))'], {}), '(0, P_pv2ac_in - (_PV2AC_a_in * ppv2ac ** 2 + _PV2AC_b_in *\n ppv2ac + _PV2AC_c_in))\n', (54261, 54347), True, 'import numpy as np\n'), ((54837, 54876), 'numpy.minimum', 'np.minimum', (['_Ppv[t]', '(_P_PV2AC_in * 1000)'], {}), '(_Ppv[t], _P_PV2AC_in * 1000)\n', (54847, 54876), True, 'import numpy as np\n'), ((55085, 55178), 'numpy.maximum', 'np.maximum', (['(0)', '(_Ppv[t] - (_PV2AC_a_in * ppv2ac ** 2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))'], {}), '(0, _Ppv[t] - (_PV2AC_a_in * ppv2ac ** 2 + _PV2AC_b_in * ppv2ac +\n _PV2AC_c_in))\n', (55095, 55178), True, 'import numpy as np\n'), ((55465, 55494), 'numpy.maximum', 'np.maximum', (['(0)', '_P_SYS_SOC0_DC'], {}), '(0, _P_SYS_SOC0_DC)\n', (55475, 55494), True, 'import numpy as np\n'), ((61456, 61468), 'numpy.sum', 'np.sum', (['_Pct'], {}), '(_Pct)\n', (61462, 61468), True, 'import numpy as np\n'), ((61681, 61702), 'numpy.maximum', 'np.maximum', (['(0)', '_Ppvbs'], {}), '(0, _Ppvbs)\n', (61691, 61702), True, 'import numpy as np\n'), ((62944, 62956), 'numpy.abs', 'np.abs', (['_Plt'], {}), '(_Plt)\n', (62950, 62956), True, 'import numpy as np\n'), ((63055, 63067), 'numpy.abs', 'np.abs', (['_Ppv'], {}), '(_Ppv)\n', (63061, 63067), True, 'import numpy as np\n'), ((63151, 63166), 'numpy.abs', 'np.abs', (['_Pbatin'], {}), '(_Pbatin)\n', (63157, 63166), True, 'import numpy as np\n'), ((63255, 63271), 'numpy.abs', 'np.abs', (['_Pbatout'], {}), '(_Pbatout)\n', (63261, 63271), True, 'import numpy as np\n'), ((63333, 63347), 'numpy.abs', 'np.abs', (['_Pac2g'], {}), '(_Pac2g)\n', (63339, 63347), True, 'import numpy as np\n'), ((63408, 63422), 'numpy.abs', 'np.abs', (['_Pg2ac'], {}), '(_Pg2ac)\n', (63414, 63422), True, 'import numpy as np\n'), ((63494, 63507), 'numpy.abs', 'np.abs', (['_Pg2l'], {}), '(_Pg2l)\n', (63500, 63507), True, 'import numpy as np\n'), ((63594, 63608), 'numpy.abs', 'np.abs', (['_Pperi'], {}), '(_Pperi)\n', (63600, 63608), True, 'import numpy as np\n'), ((63675, 63687), 'numpy.abs', 'np.abs', (['_Pct'], {}), '(_Pct)\n', (63681, 63687), True, 'import numpy as np\n'), ((68825, 68837), 'numpy.abs', 'np.abs', (['Ppvs'], {}), '(Ppvs)\n', (68831, 68837), True, 'import numpy as np\n'), ((68917, 68931), 'numpy.abs', 'np.abs', (['Pac2bs'], {}), '(Pac2bs)\n', (68923, 68931), True, 'import numpy as np\n'), ((69012, 69026), 'numpy.abs', 'np.abs', (['Pbs2ac'], {}), '(Pbs2ac)\n', (69018, 69026), True, 'import numpy as np\n'), ((69099, 69113), 'numpy.abs', 'np.abs', (['Ppvs2l'], {}), '(Ppvs2l)\n', (69105, 69113), True, 'import numpy as np\n'), ((69175, 69190), 'numpy.abs', 'np.abs', (['Ppvs2bs'], {}), '(Ppvs2bs)\n', (69181, 69190), True, 'import numpy as np\n'), ((69252, 69265), 'numpy.abs', 'np.abs', (['Pg2bs'], {}), '(Pg2bs)\n', (69258, 69265), True, 'import numpy as np\n'), ((69325, 69339), 'numpy.abs', 'np.abs', (['Ppvs2g'], {}), '(Ppvs2g)\n', (69331, 69339), True, 'import numpy as np\n'), ((69421, 69434), 'numpy.abs', 'np.abs', (['Pbs2l'], {}), '(Pbs2l)\n', (69427, 69434), True, 'import numpy as np\n'), ((69498, 69511), 'numpy.abs', 'np.abs', (['Pbs2g'], {}), '(Pbs2g)\n', (69504, 69511), True, 'import numpy as np\n'), ((16474, 16497), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16495, 16497), False, 'import datetime\n'), ((16901, 16924), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16922, 16924), False, 'import datetime\n'), ((17189, 17212), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17210, 17212), False, 'import datetime\n'), ((17410, 17433), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17431, 17433), False, 'import datetime\n'), ((17740, 17763), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17761, 17763), False, 'import datetime\n'), ((20559, 20619), 'numpy.minimum', 'np.minimum', (["(ppv * parameter['P_PV'])", "parameter['P_PV2AC_in']"], {}), "(ppv * parameter['P_PV'], parameter['P_PV2AC_in'])\n", (20569, 20619), True, 'import numpy as np\n'), ((22751, 22799), 'numpy.minimum', 'np.minimum', (['Pac', "(parameter['P_PV2AC_out'] * 1000)"], {}), "(Pac, parameter['P_PV2AC_out'] * 1000)\n", (22761, 22799), True, 'import numpy as np\n'), ((24488, 24505), 'numpy.ones_like', 'np.ones_like', (['ppv'], {}), '(ppv)\n', (24500, 24505), True, 'import numpy as np\n'), ((28205, 28221), 'numpy.abs', 'np.abs', (['E_bs_est'], {}), '(E_bs_est)\n', (28211, 28221), True, 'import numpy as np\n'), ((31263, 31292), 'numpy.maximum', 'np.maximum', (['(0)', '_P_SYS_SOC1_DC'], {}), '(0, _P_SYS_SOC1_DC)\n', (31273, 31292), True, 'import numpy as np\n'), ((37523, 37537), 'numpy.abs', 'np.abs', (['E_bs_r'], {}), '(E_bs_r)\n', (37529, 37537), True, 'import numpy as np\n'), ((43204, 43233), 'numpy.maximum', 'np.maximum', (['(0)', '_P_SYS_SOC1_DC'], {}), '(0, _P_SYS_SOC1_DC)\n', (43214, 43233), True, 'import numpy as np\n'), ((49491, 49507), 'numpy.abs', 'np.abs', (['E_bs_rpv'], {}), '(E_bs_rpv)\n', (49497, 49507), True, 'import numpy as np\n'), ((55747, 55776), 'numpy.maximum', 'np.maximum', (['(0)', '_P_SYS_SOC1_DC'], {}), '(0, _P_SYS_SOC1_DC)\n', (55757, 55776), True, 'import numpy as np\n'), ((61886, 61904), 'numpy.where', 'np.where', (['(_Pct > 0)'], {}), '(_Pct > 0)\n', (61894, 61904), True, 'import numpy as np\n'), ((63853, 63866), 'numpy.abs', 'np.abs', (['_Ppvs'], {}), '(_Ppvs)\n', (63859, 63866), True, 'import numpy as np\n'), ((63955, 63970), 'numpy.abs', 'np.abs', (['_Pac2bs'], {}), '(_Pac2bs)\n', (63961, 63970), True, 'import numpy as np\n'), ((64060, 64075), 'numpy.abs', 'np.abs', (['_Pbs2ac'], {}), '(_Pbs2ac)\n', (64066, 64075), True, 'import numpy as np\n'), ((64157, 64172), 'numpy.abs', 'np.abs', (['_Ppvs2l'], {}), '(_Ppvs2l)\n', (64163, 64172), True, 'import numpy as np\n'), ((64243, 64259), 'numpy.abs', 'np.abs', (['_Ppvs2bs'], {}), '(_Ppvs2bs)\n', (64249, 64259), True, 'import numpy as np\n'), ((64330, 64344), 'numpy.abs', 'np.abs', (['_Pg2bs'], {}), '(_Pg2bs)\n', (64336, 64344), True, 'import numpy as np\n'), ((64413, 64428), 'numpy.abs', 'np.abs', (['_Ppvs2g'], {}), '(_Ppvs2g)\n', (64419, 64428), True, 'import numpy as np\n'), ((64519, 64533), 'numpy.abs', 'np.abs', (['_Pbs2l'], {}), '(_Pbs2l)\n', (64525, 64533), True, 'import numpy as np\n'), ((64606, 64620), 'numpy.abs', 'np.abs', (['_Pbs2g'], {}), '(_Pbs2g)\n', (64612, 64620), True, 'import numpy as np\n'), ((69639, 69654), 'numpy.abs', 'np.abs', (['Pg2pvbs'], {}), '(Pg2pvbs)\n', (69645, 69654), True, 'import numpy as np\n'), ((69741, 69757), 'numpy.abs', 'np.abs', (['Pac2pvbs'], {}), '(Pac2pvbs)\n', (69747, 69757), True, 'import numpy as np\n'), ((69845, 69861), 'numpy.abs', 'np.abs', (['Ppvbs2ac'], {}), '(Ppvbs2ac)\n', (69851, 69861), True, 'import numpy as np\n'), ((69950, 69965), 'numpy.abs', 'np.abs', (['Ppvbs2l'], {}), '(Ppvbs2l)\n', (69956, 69965), True, 'import numpy as np\n'), ((20470, 20488), 'numpy.maximum', 'np.maximum', (['(0)', 'ppv'], {}), '(0, ppv)\n', (20480, 20488), True, 'import numpy as np\n'), ((64816, 64832), 'numpy.abs', 'np.abs', (['_Pg2pvbs'], {}), '(_Pg2pvbs)\n', (64822, 64832), True, 'import numpy as np\n'), ((64926, 64943), 'numpy.abs', 'np.abs', (['_Pac2pvbs'], {}), '(_Pac2pvbs)\n', (64932, 64943), True, 'import numpy as np\n'), ((65038, 65055), 'numpy.abs', 'np.abs', (['_Ppvbs2ac'], {}), '(_Ppvbs2ac)\n', (65044, 65055), True, 'import numpy as np\n'), ((65151, 65167), 'numpy.abs', 'np.abs', (['_Ppvbs2l'], {}), '(_Ppvbs2l)\n', (65157, 65167), True, 'import numpy as np\n'), ((31645, 31662), 'numpy.sqrt', 'np.sqrt', (['_eta_BAT'], {}), '(_eta_BAT)\n', (31652, 31662), True, 'import numpy as np\n'), ((43520, 43537), 'numpy.sqrt', 'np.sqrt', (['_eta_BAT'], {}), '(_eta_BAT)\n', (43527, 43537), True, 'import numpy as np\n'), ((56070, 56087), 'numpy.sqrt', 'np.sqrt', (['_eta_BAT'], {}), '(_eta_BAT)\n', (56077, 56087), True, 'import numpy as np\n'), ((31737, 31754), 'numpy.sqrt', 'np.sqrt', (['_eta_BAT'], {}), '(_eta_BAT)\n', (31744, 31754), True, 'import numpy as np\n'), ((43610, 43627), 'numpy.sqrt', 'np.sqrt', (['_eta_BAT'], {}), '(_eta_BAT)\n', (43617, 43627), True, 'import numpy as np\n'), ((56162, 56179), 'numpy.sqrt', 'np.sqrt', (['_eta_BAT'], {}), '(_eta_BAT)\n', (56169, 56179), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def detect(image):
"""
performs detection of characters from image
:param image: numpy.array
:return coordinates: list of tuples
coordinates of detected elements
:return cropped image: list of numpy.arrays
bounding boxes of detected elements
"""
# convert the image to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# apply threshold to differentiate foreground and background easier
thresh = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C ,cv2.THRESH_BINARY, 21, 9)
# invert the colors
inverted_image = cv2.bitwise_not(thresh)
# perform dilatation to increase symbol thickness
dilatation = cv2.dilate(inverted_image, np.ones(shape=(2,2)), iterations=5)
# find contours
contours, hierarchy = cv2.findContours(dilatation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
coordinates, cropped_images = list(), list()
image_w, image_h = image.shape[:2] # image width and height
for cnt in contours:
if cv2.contourArea(cnt) >= image_w*image_h*0.0005: # contours with area more than 0.05% of picture
x, y, w, h = cv2.boundingRect(cnt)
# save coordinates
coordinates.append((x,y))
# normalize cropped image
normalized = (255 - gray_image[y:y + h, x:x + w]) / 255
# make pixels more extreme
normalized[normalized < 0.4] = 0
normalized[normalized >= 0.7] = 1
# resize to 30x30 because that's the model input but keep aspect ratio
# if image has bigger width then apply padding up and down, if it has bigger height apply it right and left
if w>h:
# apply padding with constant color of minimal pixel value
padded = cv2.copyMakeBorder(normalized, int((w - h) / 2), int((w - h) / 2), 0, 0,
borderType=cv2.BORDER_CONSTANT, value=np.min(normalized))
else:
padded = cv2.copyMakeBorder(normalized, 0, 0, int((h - w) / 2), int((h - w) / 2),
borderType=cv2.BORDER_CONSTANT, value=np.min(normalized))
# erode picture because dataset images are much thinner
eroded = cv2.morphologyEx(padded, cv2.MORPH_ERODE, np.ones(shape=(2, 2)), iterations=3)
# resize
resized = cv2.resize(eroded, (30, 30), interpolation=cv2.INTER_AREA)
# save cropped images
cropped_images.append(resized)
return coordinates, cropped_images | [
"numpy.ones",
"cv2.contourArea",
"cv2.adaptiveThreshold",
"cv2.cvtColor",
"numpy.min",
"cv2.findContours",
"cv2.bitwise_not",
"cv2.resize",
"cv2.boundingRect"
] | [((336, 375), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (348, 375), False, 'import cv2\n'), ((456, 557), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray_image', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(21)', '(9)'], {}), '(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 21, 9)\n', (477, 557), False, 'import cv2\n'), ((593, 616), 'cv2.bitwise_not', 'cv2.bitwise_not', (['thresh'], {}), '(thresh)\n', (608, 616), False, 'import cv2\n'), ((787, 859), 'cv2.findContours', 'cv2.findContours', (['dilatation', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilatation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (803, 859), False, 'import cv2\n'), ((710, 731), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2)'}), '(shape=(2, 2))\n', (717, 731), True, 'import numpy as np\n'), ((996, 1016), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1011, 1016), False, 'import cv2\n'), ((1109, 1130), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (1125, 1130), False, 'import cv2\n'), ((2173, 2231), 'cv2.resize', 'cv2.resize', (['eroded', '(30, 30)'], {'interpolation': 'cv2.INTER_AREA'}), '(eroded, (30, 30), interpolation=cv2.INTER_AREA)\n', (2183, 2231), False, 'import cv2\n'), ((2110, 2131), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2)'}), '(shape=(2, 2))\n', (2117, 2131), True, 'import numpy as np\n'), ((1791, 1809), 'numpy.min', 'np.min', (['normalized'], {}), '(normalized)\n', (1797, 1809), True, 'import numpy as np\n'), ((1976, 1994), 'numpy.min', 'np.min', (['normalized'], {}), '(normalized)\n', (1982, 1994), True, 'import numpy as np\n')] |
import math
import random
import numpy as np
from OpenGL.GL import *
from OpenGL.GL.ARB.framebuffer_object import *
from OpenGL.GL.EXT.framebuffer_object import *
from PyEngine3D.Utilities import *
from PyEngine3D.Common import logger, COLOR_BLACK
from PyEngine3D.OpenGLContext import Texture2D, Texture2DArray, Texture2DMultiSample, TextureCube, RenderBuffer, CreateTexture
from .Ocean import Constants as OceanConstants
class Option:
NONE = 0
MSAA = 1 << 1
SSAA = 1 << 2
class RenderTargets:
SCREENBUFFER = None
BACKBUFFER = None
DEPTH = None
DEPTH_STENCIL = None
OBJECT_ID = None
OBJECT_ID_DEPTH = None
HDR = None
HDR_TEMP = None
HDR_BACKUP = None
BLOOM_0 = None
BLOOM_1 = None
BLOOM_2 = None
BLOOM_3 = None
BLOOM_4 = None
LIGHT_SHAFT = None
LIGHT_PROBE_ATMOSPHERE = None
ATMOSPHERE = None
ATMOSPHERE_INSCATTER = None
TAA_RESOLVE = None
DIFFUSE = None
MATERIAL = None
WORLD_NORMAL = None
STATIC_SHADOWMAP = None
DYNAMIC_SHADOWMAP = None
COMPOSITE_SHADOWMAP = None
LINEAR_DEPTH = None
FOCUS_DISTANCE = None
SCREEN_SPACE_REFLECTION = None
SCREEN_SPACE_REFLECTION_RESOLVED_PREV = None
SCREEN_SPACE_REFLECTION_RESOLVED = None
SSAO = None
VELOCITY = None
FFT_A = None
FFT_B = None
TEMP_RENDER_BUFFER_MULTISAMPLE = None
TEMP_RGBA8 = None
TEMP_2D_ARRAY = None
TEMP_MULTISAMPLE_X4 = None
TEMP_HEIGHT_MAP = None
class RenderTargetManager(Singleton):
name = "RenderTargetManager"
def __init__(self):
self.core_manager = None
self.viewport_manager = None
self.renderer = None
self.rendertargets = dict()
self.immutable_rendertarget_names = []
self.temp_rendertargets = dict()
self.first_time = True
self.texture_lod_in_ssao = 1.0
def initialize(self, core_manager):
logger.info("initialize " + GetClassName(self))
self.core_manager = core_manager
self.viewport_manager = core_manager.viewport_manager
self.renderer = core_manager.renderer
self.clear()
def clear(self, force=False):
self.clear_rendertargets(force)
self.clear_temp_rendertargets()
def clear_rendertargets(self, force=False):
delete_list = []
for key, rendertarget in self.rendertargets.items():
if force or key not in self.immutable_rendertarget_names:
rendertarget.delete()
delete_list.append(key)
for key in delete_list:
self.rendertargets.pop(key)
if key in self.immutable_rendertarget_names:
self.immutable_rendertarget_names.pop(key)
self.core_manager.gc_collect()
def clear_temp_rendertargets(self):
for key, rendertarget in self.temp_rendertargets.items():
rendertarget.delete()
self.temp_rendertargets = dict()
self.core_manager.gc_collect()
def find_rendertarget(self, rendertarget_index, rendertarget_name):
if rendertarget_index < len(self.rendertargets) and rendertarget_name in self.rendertargets:
return self.rendertargets[rendertarget_name]
elif rendertarget_name in self.temp_rendertargets:
return self.temp_rendertargets[rendertarget_name]
return None
def get_rendertarget(self, rendertarget_name):
return self.rendertargets[rendertarget_name] if rendertarget_name in self.rendertargets else None
def get_temporary(self, rendertarget_name, reference_rendertarget=None, scale=1.0):
temp_rendertarget = None
if rendertarget_name in self.temp_rendertargets:
temp_rendertarget = self.temp_rendertargets[rendertarget_name]
elif reference_rendertarget:
rendertarget_datas = reference_rendertarget.get_texture_info()
rendertarget_datas['width'] = int(rendertarget_datas['width'] * scale)
rendertarget_datas['height'] = int(rendertarget_datas['height'] * scale)
rendertarget_type = rendertarget_datas['texture_type']
if type(rendertarget_type) is str:
rendertarget_type = eval(rendertarget_type)
temp_rendertarget = rendertarget_type(name=rendertarget_name, **rendertarget_datas)
if temp_rendertarget:
self.temp_rendertargets[rendertarget_name] = temp_rendertarget
# send rendertarget info to GUI
self.core_manager.send_render_target_info(temp_rendertarget.name)
if temp_rendertarget is None:
logger.warn("Failed to get temporary %s render target." % rendertarget_name)
return temp_rendertarget
def create_rendertarget(self, rendertarget_name, **datas):
option = datas.get('option', Option.NONE)
rendertarget_type = datas.get('texture_type', Texture2D)
if (Option.MSAA & option) and self.renderer.postprocess.enable_MSAA():
if rendertarget_type == Texture2D:
rendertarget_type = Texture2DMultiSample
datas['multisample_count'] = self.renderer.postprocess.get_msaa_multisample_count()
elif (Option.SSAA & option) and self.renderer.postprocess.is_SSAA():
datas['width'] = datas.get('width', 1) * 2
datas['height'] = datas.get('height', 1) * 2
immutable = datas.get('immutable', False)
rendertarget = None
if rendertarget_name in self.rendertargets:
rendertarget = self.rendertargets[rendertarget_name]
if not immutable or rendertarget_name not in self.rendertargets:
# Create RenderTarget
if rendertarget_type == RenderBuffer:
rendertarget = RenderBuffer(name=rendertarget_name, **datas)
else:
rendertarget = CreateTexture(name=rendertarget_name, **datas)
if rendertarget_name not in self.rendertargets:
self.rendertargets[rendertarget_name] = rendertarget
if immutable:
self.immutable_rendertarget_names.append(rendertarget_name)
# send rendertarget info to GUI
self.core_manager.send_render_target_info(rendertarget_name)
if rendertarget is None:
logger.error("Failed to crate a render target. %s" % rendertarget_name)
return rendertarget
def create_rendertargets(self):
self.clear()
# Note : # clear rendertarget infos in GUI
self.core_manager.clear_render_target_list()
screen_width = self.viewport_manager.root.width
screen_height = self.viewport_manager.root.height
width = self.viewport_manager.main_viewport.width
height = self.viewport_manager.main_viewport.height
fullsize_x = width
fullsize_y = height
halfsize_x = int(width / 2)
halfsize_y = int(height / 2)
quatersize_x = int(width / 4)
quatersize_y = int(height / 4)
hdr_internal_format = GL_RGBA16F
hdr_data_type = GL_FLOAT
RenderTargets.SCREENBUFFER = self.create_rendertarget(
"SCREENBUFFER",
texture_type=Texture2D,
width=screen_width,
height=screen_height,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
RenderTargets.BACKBUFFER = self.create_rendertarget(
"BACKBUFFER",
texture_type=Texture2D,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
# NOTE : bind render target
self.viewport_manager.main_viewport.bind_texture(RenderTargets.BACKBUFFER)
RenderTargets.DEPTH = self.create_rendertarget(
"DEPTH",
texture_type=Texture2D,
option=Option.SSAA,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_DEPTH_COMPONENT32F,
texture_format=GL_DEPTH_COMPONENT,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
# RenderTargets.DEPTH_STENCIL = self.create_rendertarget(
# "DEPTH_STENCIL",
# texture_type=Texture2D,
# option=Option.SSAA,
# width=fullsize_x,
# height=fullsize_y,
# internal_format=GL_DEPTH24_STENCIL8,
# texture_format=GL_DEPTH_STENCIL,
# data_type=GL_UNSIGNED_INT_24_8,
# min_filter=GL_NEAREST,
# mag_filter=GL_NEAREST,
# wrap=GL_CLAMP
# )
object_id_size = 512
RenderTargets.OBJECT_ID = self.create_rendertarget(
"OBJECT_ID",
texture_type=Texture2D,
option=Option.NONE,
width=object_id_size,
height=object_id_size,
internal_format=GL_R32F,
texture_format=GL_RED,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
RenderTargets.OBJECT_ID_DEPTH = self.create_rendertarget(
"OBJECT_ID_DEPTH",
texture_type=Texture2D,
option=Option.NONE,
width=object_id_size,
height=object_id_size,
internal_format=GL_DEPTH_COMPONENT32F,
texture_format=GL_DEPTH_COMPONENT,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
hdr_options = dict(
texture_type=Texture2D,
option=Option.MSAA | Option.SSAA,
width=fullsize_x,
height=fullsize_y,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
min_filter=GL_LINEAR_MIPMAP_LINEAR,
mag_filter=GL_LINEAR,
data_type=hdr_data_type,
clear_color=COLOR_BLACK,
wrap=GL_CLAMP
)
RenderTargets.HDR = self.create_rendertarget("HDR", **hdr_options)
RenderTargets.HDR_TEMP = self.create_rendertarget("HDR_TEMP", **hdr_options)
RenderTargets.HDR_BACKUP = self.create_rendertarget("HDR_BACKUP", **hdr_options)
bloom_options = dict(
texture_type=Texture2D,
option=Option.SSAA,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
data_type=hdr_data_type,
wrap=GL_CLAMP
)
RenderTargets.BLOOM_0 = self.create_rendertarget(
"BLOOM_0",
width=fullsize_x / 2,
height=fullsize_y / 2,
**bloom_options
)
RenderTargets.BLOOM_1 = self.create_rendertarget(
"BLOOM_1",
width=fullsize_x / 4,
height=fullsize_y / 4,
**bloom_options
)
RenderTargets.BLOOM_2 = self.create_rendertarget(
"BLOOM_2",
width=fullsize_x / 8,
height=fullsize_y / 8,
**bloom_options
)
RenderTargets.BLOOM_3 = self.create_rendertarget(
"BLOOM_3",
width=fullsize_x / 16,
height=fullsize_y / 16,
**bloom_options
)
RenderTargets.BLOOM_4 = self.create_rendertarget(
"BLOOM_4",
width=fullsize_x / 32,
height=fullsize_y / 32,
**bloom_options
)
RenderTargets.LIGHT_SHAFT = self.create_rendertarget(
"LIGHT_SHAFT",
texture_type=Texture2D,
width=halfsize_x,
height=halfsize_y,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
data_type=hdr_data_type,
wrap=GL_CLAMP
)
RenderTargets.LIGHT_PROBE_ATMOSPHERE = self.create_rendertarget(
"LIGHT_PROBE_ATMOSPHERE",
texture_type=TextureCube,
width=512,
height=512,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
min_filter=GL_LINEAR_MIPMAP_LINEAR,
mag_filter=GL_LINEAR,
data_type=hdr_data_type,
wrap=GL_CLAMP_TO_EDGE,
immutable=True
)
RenderTargets.ATMOSPHERE = self.create_rendertarget(
"ATMOSPHERE",
texture_type=Texture2D,
width=quatersize_x,
height=quatersize_y,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
data_type=hdr_data_type,
wrap=GL_CLAMP_TO_EDGE,
immutable=True
)
RenderTargets.ATMOSPHERE_INSCATTER = self.create_rendertarget(
"ATMOSPHERE_INSCATTER",
texture_type=Texture2D,
width=quatersize_x,
height=quatersize_y,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
data_type=hdr_data_type,
wrap=GL_CLAMP_TO_EDGE,
immutable=True
)
RenderTargets.TAA_RESOLVE = self.create_rendertarget(
"TAA Resolve",
texture_type=Texture2D,
option=Option.MSAA | Option.SSAA,
width=fullsize_x,
height=fullsize_y,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
data_type=hdr_data_type,
wrap=GL_CLAMP
)
RenderTargets.DIFFUSE = self.create_rendertarget(
"DIFFUSE",
texture_type=Texture2D,
option=Option.SSAA,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
RenderTargets.MATERIAL = self.create_rendertarget(
"MATERIAL",
texture_type=Texture2D,
option=Option.SSAA,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
RenderTargets.WORLD_NORMAL = self.create_rendertarget(
"WORLD_NORMAL",
texture_type=Texture2D,
option=Option.SSAA,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
# It must attach to depth render target
shadow_map_size = 2048
RenderTargets.STATIC_SHADOWMAP = self.create_rendertarget(
"STATIC_SHADOWMAP",
texture_type=Texture2D,
width=shadow_map_size,
height=shadow_map_size,
internal_format=GL_DEPTH_COMPONENT32,
texture_format=GL_DEPTH_COMPONENT,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
RenderTargets.DYNAMIC_SHADOWMAP = self.create_rendertarget(
"DYNAMIC_SHADOWMAP",
texture_type=Texture2D,
width=shadow_map_size,
height=shadow_map_size,
internal_format=GL_DEPTH_COMPONENT32,
texture_format=GL_DEPTH_COMPONENT,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
RenderTargets.COMPOSITE_SHADOWMAP = self.create_rendertarget(
"COMPOSITE_SHADOWMAP",
texture_type=Texture2D,
width=shadow_map_size,
height=shadow_map_size,
internal_format=GL_R32F,
texture_format=GL_RED,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
# It must attach to color render target
RenderTargets.LINEAR_DEPTH = self.create_rendertarget(
"LINEAR_DEPTH",
texture_type=Texture2D,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_R32F,
texture_format=GL_RED,
data_type=GL_FLOAT,
min_filter=GL_NEAREST_MIPMAP_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
data = np.zeros(1, dtype=np.float32)
RenderTargets.FOCUS_DISTANCE = self.create_rendertarget(
"FOCUS_DISTANCE",
texture_type=Texture2D,
width=1,
height=1,
internal_format=GL_R32F,
texture_format=GL_RED,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP,
data=data
)
ssr_options = dict(
texture_type=Texture2D,
width=halfsize_x,
height=halfsize_y,
internal_format=hdr_internal_format,
texture_format=GL_RGBA,
data_type=hdr_data_type,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
clear_color=COLOR_BLACK,
wrap=GL_CLAMP
)
RenderTargets.SCREEN_SPACE_REFLECTION = self.create_rendertarget("SCREEN_SPACE_REFLECTION", **ssr_options)
RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED_PREV = self.create_rendertarget("SCREEN_SPACE_REFLECTION_RESOLVED_PREV", **ssr_options)
RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED = self.create_rendertarget("SCREEN_SPACE_REFLECTION_RESOLVED", **ssr_options)
RenderTargets.SSAO = self.create_rendertarget(
"SSAO",
texture_type=Texture2D,
width=halfsize_x,
height=halfsize_y,
internal_format=GL_R16F,
texture_format=GL_RED,
data_type=GL_FLOAT,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
RenderTargets.VELOCITY = self.create_rendertarget(
"VELOCITY",
texture_type=Texture2D,
option=Option.SSAA,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RG32F,
texture_format=GL_RG,
data_type=GL_FLOAT,
min_filter=GL_NEAREST,
mag_filter=GL_NEAREST,
wrap=GL_CLAMP
)
RenderTargets.FFT_A = self.create_rendertarget(
'FFT_A',
texture_type=Texture2DArray,
image_mode='RGBA',
width=OceanConstants.FFT_SIZE,
height=OceanConstants.FFT_SIZE,
depth=5,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_LINEAR_MIPMAP_LINEAR,
mag_filter=GL_LINEAR,
data_type=GL_FLOAT,
wrap=GL_REPEAT,
immutable=True
)
RenderTargets.FFT_B = self.create_rendertarget(
'FFT_B',
texture_type=Texture2DArray,
image_mode='RGBA',
width=OceanConstants.FFT_SIZE,
height=OceanConstants.FFT_SIZE,
depth=5,
internal_format=GL_RGBA16F,
texture_format=GL_RGBA,
min_filter=GL_LINEAR_MIPMAP_LINEAR,
mag_filter=GL_LINEAR,
data_type=GL_FLOAT,
wrap=GL_REPEAT,
immutable=True
)
RenderTargets.TEMP_RGBA8 = self.create_rendertarget(
"TEMP_RGBA8",
texture_type=Texture2D,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
RenderTargets.TEMP_2D_ARRAY = self.create_rendertarget(
"TEMP_2D_ARRAY",
texture_type=Texture2DArray,
width=fullsize_x,
height=fullsize_y,
depth=5,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
RenderTargets.TEMP_MULTISAMPLE_X4 = self.create_rendertarget(
"TEMP_MULTISAMPLE_X4",
texture_type=Texture2DMultiSample,
multisample_count=4,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RGBA8,
texture_format=GL_RGBA,
data_type=GL_UNSIGNED_BYTE,
min_filter=GL_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
RenderTargets.TEMP_RENDER_BUFFER_MULTISAMPLE = self.create_rendertarget(
"TEMP_RENDER_BUFFER_MULTISAMPLE",
texture_type=RenderBuffer,
multisample_count=4,
width=fullsize_x,
height=fullsize_y,
internal_format=GL_RGBA8,
wrap=GL_CLAMP
)
RenderTargets.TEMP_HEIGHT_MAP = self.create_rendertarget(
"TEMP_HEIGHT_MAP",
texture_type=Texture2D,
width=1024,
height=1024,
internal_format=GL_R32F,
texture_format=GL_RED,
data_type=GL_FLOAT,
min_filter=GL_LINEAR_MIPMAP_LINEAR,
mag_filter=GL_LINEAR,
wrap=GL_CLAMP
)
self.texture_lod_in_ssao = math.log2(RenderTargets.LINEAR_DEPTH.width) - math.log2(RenderTargets.SSAO.width)
self.core_manager.gc_collect()
| [
"PyEngine3D.Common.logger.error",
"PyEngine3D.Common.logger.warn",
"math.log2",
"numpy.zeros",
"PyEngine3D.OpenGLContext.CreateTexture",
"PyEngine3D.OpenGLContext.RenderBuffer"
] | [((17294, 17323), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (17302, 17323), True, 'import numpy as np\n'), ((4617, 4693), 'PyEngine3D.Common.logger.warn', 'logger.warn', (["('Failed to get temporary %s render target.' % rendertarget_name)"], {}), "('Failed to get temporary %s render target.' % rendertarget_name)\n", (4628, 4693), False, 'from PyEngine3D.Common import logger, COLOR_BLACK\n'), ((6302, 6373), 'PyEngine3D.Common.logger.error', 'logger.error', (["('Failed to crate a render target. %s' % rendertarget_name)"], {}), "('Failed to crate a render target. %s' % rendertarget_name)\n", (6314, 6373), False, 'from PyEngine3D.Common import logger, COLOR_BLACK\n'), ((22421, 22464), 'math.log2', 'math.log2', (['RenderTargets.LINEAR_DEPTH.width'], {}), '(RenderTargets.LINEAR_DEPTH.width)\n', (22430, 22464), False, 'import math\n'), ((22467, 22502), 'math.log2', 'math.log2', (['RenderTargets.SSAO.width'], {}), '(RenderTargets.SSAO.width)\n', (22476, 22502), False, 'import math\n'), ((5763, 5808), 'PyEngine3D.OpenGLContext.RenderBuffer', 'RenderBuffer', ([], {'name': 'rendertarget_name'}), '(name=rendertarget_name, **datas)\n', (5775, 5808), False, 'from PyEngine3D.OpenGLContext import Texture2D, Texture2DArray, Texture2DMultiSample, TextureCube, RenderBuffer, CreateTexture\n'), ((5858, 5904), 'PyEngine3D.OpenGLContext.CreateTexture', 'CreateTexture', ([], {'name': 'rendertarget_name'}), '(name=rendertarget_name, **datas)\n', (5871, 5904), False, 'from PyEngine3D.OpenGLContext import Texture2D, Texture2DArray, Texture2DMultiSample, TextureCube, RenderBuffer, CreateTexture\n')] |
import os
import json
import torch
import pickle
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
from classifier import ClassifierTrainer, LTTrainer, NoiGANTrainer
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
class ARGS(object):
def __init__(self):
self.cuda = True
args = ARGS()
def read_triple(file_path, entity2id, relation2id):
'''
Read triples and map them into ids.
'''
triples = []
with open(file_path) as fin:
for line in fin:
h, r, t = line.strip().split('\t')
try:
triples.append((entity2id[h], relation2id[r], entity2id[t]))
except:
pass
return triples
model = "TransE"
dataset = "YAGO3-10"
fake = 10
data_path = "../data/%s" % dataset
save_path = "../models/%s_%s_CLF_soft%d" % (model, dataset, fake)
with open(os.path.join(data_path, 'entities.dict')) as fin:
entity2id = dict()
id2entity = dict()
for line in fin:
eid, entity = line.strip().split('\t')
entity2id[entity] = int(eid)
id2entity[int(eid)] = entity
with open(os.path.join(data_path, 'relations.dict')) as fin:
relation2id = dict()
id2relation = dict()
for line in fin:
rid, relation = line.strip().split('\t')
relation2id[relation] = int(rid)
id2relation[int(rid)] = relation
args.nentity = len(entity2id)
args.nrelation = len(relation2id)
train_triples = read_triple(os.path.join(data_path, 'train.txt'), entity2id, relation2id)
valid_triples = read_triple(os.path.join(data_path, 'valid.txt'), entity2id, relation2id)
test_triples = read_triple(os.path.join(data_path, 'test.txt'), entity2id, relation2id)
fake_triples = pickle.load(open(os.path.join(data_path, "fake%s.pkl" % fake), "rb"))
train_triples += fake_triples
all_true_triples = train_triples + valid_triples + test_triples
with open(os.path.join(save_path, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(save_path, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.data_path = argparse_dict['data_path']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
args.fake = argparse_dict['fake']
args.method = argparse_dict['method']
args.save_path = argparse_dict['save_path']
override_config(args)
checkpoint = torch.load(os.path.join(save_path, 'checkpoint'))
kge_model = KGEModel(
model_name=model,
nentity=args.nentity,
nrelation=args.nrelation,
hidden_dim=args.hidden_dim,
gamma=argparse_dict["gamma"],
double_entity_embedding=argparse_dict["double_entity_embedding"],
double_relation_embedding=argparse_dict["double_relation_embedding"]
)
kge_model.load_state_dict(checkpoint['model_state_dict'])
kge_model = kge_model.cuda()
trainer = NoiGANTrainer(train_triples, fake_triples, args, kge_model, False)
trainer.classifier.load_state_dict(checkpoint['classifier'])
# trainer.generator.load_state_dict(checkpoint['generator'])
true_head, true_tail = TrainDataset.get_true_head_and_tail(all_true_triples)
query_head, query_relation, query_tail, args.mode = "Joby_Talbot", "wroteMusicFor", "The_Hitchhiker's_Guide_to_the_Galaxy_(film)", "tail-batch"
head, relation, tail = entity2id[query_head], relation2id[query_relation], entity2id[query_tail]
args.negative_sample_size = 1024
negative_sample_list = []
negative_sample_size = 0
# 得分最高得几个错误选项
while negative_sample_size < args.negative_sample_size:
negative_sample = np.random.randint(args.nentity, size=args.negative_sample_size*2)
if args.mode == 'head-batch':
mask = np.in1d(
negative_sample,
true_head[(relation, tail)],
assume_unique=True,
invert=True
)
else:
mask = np.in1d(
negative_sample,
true_tail[(head, relation)],
assume_unique=True,
invert=True
)
negative_sample = negative_sample[mask]
negative_sample_list.append(negative_sample)
negative_sample_size += negative_sample.size
negative_sample = np.concatenate(negative_sample_list)[:args.negative_sample_size]
negative_sample = negative_sample.tolist()
candidate_sample = []
if args.mode == "head-batch":
for nhead in negative_sample:
candidate_sample.append((nhead, relation, tail))
else:
for ntail in negative_sample:
candidate_sample.append((head, relation, ntail))
candidate_tensor = torch.LongTensor(candidate_sample).cuda()
confidence_weight = trainer.classifier(trainer.get_vector(candidate_tensor)).cpu().view(-1)
max_score_fake50 = confidence_weight.argsort()[-50:]
with open("max_score_fake50.txt", "w") as fw:
for index in max_score_fake50:
h, r, t = candidate_sample[index]
head, relation, tail = id2entity[h], id2relation[r], id2entity[t]
print("%s\t%s\t%s\t%f" % (head, relation, tail, confidence_weight[index]))
fw.write("%s\t%s\t%s\t%f\n" % (head, relation, tail, confidence_weight[index]))
| [
"classifier.NoiGANTrainer",
"numpy.in1d",
"torch.LongTensor",
"os.path.join",
"model.KGEModel",
"numpy.random.randint",
"numpy.concatenate",
"dataloader.TrainDataset.get_true_head_and_tail",
"json.load"
] | [((2913, 3193), 'model.KGEModel', 'KGEModel', ([], {'model_name': 'model', 'nentity': 'args.nentity', 'nrelation': 'args.nrelation', 'hidden_dim': 'args.hidden_dim', 'gamma': "argparse_dict['gamma']", 'double_entity_embedding': "argparse_dict['double_entity_embedding']", 'double_relation_embedding': "argparse_dict['double_relation_embedding']"}), "(model_name=model, nentity=args.nentity, nrelation=args.nrelation,\n hidden_dim=args.hidden_dim, gamma=argparse_dict['gamma'],\n double_entity_embedding=argparse_dict['double_entity_embedding'],\n double_relation_embedding=argparse_dict['double_relation_embedding'])\n", (2921, 3193), False, 'from model import KGEModel\n'), ((3337, 3403), 'classifier.NoiGANTrainer', 'NoiGANTrainer', (['train_triples', 'fake_triples', 'args', 'kge_model', '(False)'], {}), '(train_triples, fake_triples, args, kge_model, False)\n', (3350, 3403), False, 'from classifier import ClassifierTrainer, LTTrainer, NoiGANTrainer\n'), ((3549, 3602), 'dataloader.TrainDataset.get_true_head_and_tail', 'TrainDataset.get_true_head_and_tail', (['all_true_triples'], {}), '(all_true_triples)\n', (3584, 3602), False, 'from dataloader import TrainDataset\n'), ((1615, 1651), 'os.path.join', 'os.path.join', (['data_path', '"""train.txt"""'], {}), "(data_path, 'train.txt')\n", (1627, 1651), False, 'import os\n'), ((1705, 1741), 'os.path.join', 'os.path.join', (['data_path', '"""valid.txt"""'], {}), "(data_path, 'valid.txt')\n", (1717, 1741), False, 'import os\n'), ((1794, 1829), 'os.path.join', 'os.path.join', (['data_path', '"""test.txt"""'], {}), "(data_path, 'test.txt')\n", (1806, 1829), False, 'import os\n'), ((2119, 2135), 'json.load', 'json.load', (['fjson'], {}), '(fjson)\n', (2128, 2135), False, 'import json\n'), ((2862, 2899), 'os.path.join', 'os.path.join', (['save_path', '"""checkpoint"""'], {}), "(save_path, 'checkpoint')\n", (2874, 2899), False, 'import os\n'), ((4023, 4090), 'numpy.random.randint', 'np.random.randint', (['args.nentity'], {'size': '(args.negative_sample_size * 2)'}), '(args.nentity, size=args.negative_sample_size * 2)\n', (4040, 4090), True, 'import numpy as np\n'), ((4614, 4650), 'numpy.concatenate', 'np.concatenate', (['negative_sample_list'], {}), '(negative_sample_list)\n', (4628, 4650), True, 'import numpy as np\n'), ((1020, 1060), 'os.path.join', 'os.path.join', (['data_path', '"""entities.dict"""'], {}), "(data_path, 'entities.dict')\n", (1032, 1060), False, 'import os\n'), ((1269, 1310), 'os.path.join', 'os.path.join', (['data_path', '"""relations.dict"""'], {}), "(data_path, 'relations.dict')\n", (1281, 1310), False, 'import os\n'), ((1887, 1931), 'os.path.join', 'os.path.join', (['data_path', "('fake%s.pkl' % fake)"], {}), "(data_path, 'fake%s.pkl' % fake)\n", (1899, 1931), False, 'import os\n'), ((2044, 2082), 'os.path.join', 'os.path.join', (['save_path', '"""config.json"""'], {}), "(save_path, 'config.json')\n", (2056, 2082), False, 'import os\n'), ((2315, 2331), 'json.load', 'json.load', (['fjson'], {}), '(fjson)\n', (2324, 2331), False, 'import json\n'), ((4138, 4226), 'numpy.in1d', 'np.in1d', (['negative_sample', 'true_head[relation, tail]'], {'assume_unique': '(True)', 'invert': '(True)'}), '(negative_sample, true_head[relation, tail], assume_unique=True,\n invert=True)\n', (4145, 4226), True, 'import numpy as np\n'), ((4308, 4396), 'numpy.in1d', 'np.in1d', (['negative_sample', 'true_tail[head, relation]'], {'assume_unique': '(True)', 'invert': '(True)'}), '(negative_sample, true_tail[head, relation], assume_unique=True,\n invert=True)\n', (4315, 4396), True, 'import numpy as np\n'), ((4981, 5015), 'torch.LongTensor', 'torch.LongTensor', (['candidate_sample'], {}), '(candidate_sample)\n', (4997, 5015), False, 'import torch\n'), ((2236, 2274), 'os.path.join', 'os.path.join', (['save_path', '"""config.json"""'], {}), "(save_path, 'config.json')\n", (2248, 2274), False, 'import os\n')] |
"""
Binary Class Transformation
---------------------------
The Binary Class Transformation Approach (Influential Marketing, Response Transformation Approach).
Based on
<NAME>. (2006). “Influential marketing: A new direct marketing strategy addressing
the existence of voluntary buyers”. Master of Science thesis, Simon Fraser University School
of Computing Science, Burnaby, BC,Canada. URL: https://summit.sfu.ca/item/6629
<NAME>., <NAME>., and <NAME>. (2016). “Pessimistic Uplift Modeling”. ACM SIGKDD, August 2016, San Francisco, California USA, arXiv:1603.09738v1.
URL:https://pdfs.semanticscholar.org/a67e/401715014c7a9d6a6679df70175be01daf7c.pdf.
<NAME>. et al. (2018). A Literature Survey and Experimental Evaluation of the State-of-the-Art in Uplift Modeling:
A Stepping Stone Toward the Development of Prescriptive Analytics. Big Data, Vol. 6, No. 1, March 1, 2018, pp. 1-29. Codes found at: data-lab.be/downloads.php.
Contents
BinaryTransformation Class
_binary_transformation,
_binary_regularization,
fit,
predict (Not available at this time),
predict_proba
"""
import numpy as np
from causeinfer.standard_algorithms.base_models import TransformationModel
class BinaryTransformation(TransformationModel):
def __init__(self, model=None, regularize=False):
"""
Checks the attributes of the control and treatment models before assignment.
"""
try:
model.__getattribute__("fit")
model.__getattribute__("predict")
except AttributeError:
raise AttributeError(
"The passed model should contain both fit and predict methods."
)
self.model = model
self.regularize = regularize
def _binary_transformation(self, y, w):
"""
Derives which of the unknown Affected Positive or Affected Negative classes the unit could fall into based known outcomes.
Parameters
----------
y : numpy.ndarray : (num_units,) : int, float
Vector of unit responses.
w : numpy.ndarray : (num_units,) : int, float
Vector of original treatment allocations across units.
Returns
-------
np.array(y_transformed) : numpy.ndarray : an array of transformed unit classes.
"""
y_transformed = []
for i in range(y.shape[0]):
# Favorable, possible Affected Positive units (TPs or CNs)
if self.is_treatment_positive(y[i], w[i]) or self.is_control_negative(
y[i], w[i]
):
y_transformed.append(1)
# Unfavorable, possible Affected Negative units (TNs or CPs)
elif self.is_treatment_negative(y[i], w[i]) or self.is_control_positive(
y[i], w[i]
):
y_transformed.append(0)
return np.array(y_transformed)
def _binary_regularization(self, y=None, w=None):
"""
Regularization of binary classes is based on the positive and negative binary affectual classes.
Parameters
----------
y : numpy.ndarray : (num_units,) : int, float
Vector of unit responses.
w : numpy.ndarray : (num_units,) : int, float
Vector of original treatment allocations across units.
Returns
-------
fav_ratio, unfav_ratio : float
Regularized ratios of favorable and unfavorable classes.
"""
# Initialize counts for Favorable and Unfavorable Classes.
fav_count, unfav_count = 0, 0
for i in range(y.shape[0]):
# Favorable (TPs or CNs) - contains all APs.
if self.is_treatment_positive(y[i], w[i]) or self.is_control_negative(
y[i], w[i]
):
fav_count += 1
# Unfavorable (TNs or CPs) - contains all ANs.
elif self.is_treatment_negative(y[i], w[i]) or self.is_control_positive(
y[i], w[i]
):
unfav_count += 1
self.fav_ratio = fav_count / (fav_count + unfav_count)
self.unfav_ratio = unfav_count / (fav_count + unfav_count)
def fit(self, X, y, w):
"""
Trains a model given covariates, responses and assignments.
Parameters
----------
X : numpy.ndarray : (num_units, num_features) : int, float
Matrix of covariates.
y : numpy.ndarray : (num_units,) : int, float
Vector of unit responses.
w : numpy.ndarray : (num_units,) : int, float
Vector of original treatment allocations across units.
Returns
-------
self : causeinfer.standard_algorithms.BinaryTransformation
A trained model.
"""
y_transformed = self._binary_transformation(y, w)
if self.regularize:
self._binary_regularization(y, w)
self.model.fit(X, y_transformed)
return self
# def predict(self, X):
# """
# Predicts a causal effect given covariates.
# Parameters
# ----------
# X : numpy.ndarray : (num_units, num_features) : int, float
# New data on which to make predictions.
# Returns
# -------
# predictions : numpy.ndarray : (num_units, 2) : float
# """
# return predictions
def predict_proba(self, X):
"""
Predicts the probability that a subject will be a given class given covariates.
Parameters
----------
X : numpy.ndarray : (num_units, num_features) : int, float
New data on which to make predictions.
Returns
-------
probas : numpy.ndarray : (num_units, 2) : float
Predicted probabilities for being a favorable class and unfavorable class.
"""
pred_fav = self.model.predict_proba(X)[:, 1]
pred_unfav = self.model.predict_proba(X)[:, 0]
if not self.regularize:
return np.array([(pred_fav[i], pred_unfav[i]) for i in range(len(X))])
pred_fav_regularized = pred_fav * self.fav_ratio
pred_unfav_regularized = pred_unfav * self.unfav_ratio
return np.array(
[
(pred_fav_regularized[i], pred_unfav_regularized[i])
for i in range(len(X))
]
)
| [
"numpy.array"
] | [((2938, 2961), 'numpy.array', 'np.array', (['y_transformed'], {}), '(y_transformed)\n', (2946, 2961), True, 'import numpy as np\n')] |
import numpy as np
from sequentia.classifiers import HMM
# Create some sample data
X = [np.random.random((10 * i, 3)) for i in range(1, 4)]
# Create and fit a left-right HMM with random transitions and initial state distribution
hmm = HMM(label='class1', n_states=5, topology='left-right')
hmm.set_random_initial()
hmm.set_random_transitions()
hmm.fit(X) | [
"numpy.random.random",
"sequentia.classifiers.HMM"
] | [((237, 291), 'sequentia.classifiers.HMM', 'HMM', ([], {'label': '"""class1"""', 'n_states': '(5)', 'topology': '"""left-right"""'}), "(label='class1', n_states=5, topology='left-right')\n", (240, 291), False, 'from sequentia.classifiers import HMM\n'), ((89, 118), 'numpy.random.random', 'np.random.random', (['(10 * i, 3)'], {}), '((10 * i, 3))\n', (105, 118), True, 'import numpy as np\n')] |
import numpy as np
a = [[1,4],[2,5],[3,6]]
a = np.array(a)
print(a.shape)
print(a[0]) | [
"numpy.array"
] | [((49, 60), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (57, 60), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import DataLoader
import pathlib
import os
import numpy as np
import dataset
from criteria import cal_criteria, bbrebuild
def loss_from_log(train_name):
with open('../logs/log_%s.txt' % train_name) as f:
lines = f.readlines()
val_loss = []
train_loss = []
for line in lines:
if line[:5] == 'epoch':
line = line.split()
if line[-1] == 'training':
train_loss.append([])
if line[-2] == 'mean_val_loss=':
val_loss.append(float(line[-1]))
if line[:5] == 'iters':
train_loss[-1].append(float(line.split('=')[-1]))
train_loss = np.array(train_loss)
val_loss = np.array(val_loss)
return train_loss, val_loss
class test(object):
def __init__(self, model, train_name, test_loader):
self.data = 1
self.testset_path = './dataset/data/test'
self.test_loader = test_loader
self.model = model
self.output_folder = '../output/%s/tor_pred' % train_name
self.coo_folder = '../output/%s/coo_pred' % train_name
self.cb_folder = '../output/%s/cb_pred' % train_name
self.model_dir = '../output/%s/models' % train_name
self.train_name = train_name
pathlib.Path(self.output_folder).mkdir(parents=True, exist_ok=True)
pathlib.Path(self.coo_folder).mkdir(parents=True, exist_ok=True)
pathlib.Path(self.cb_folder).mkdir(parents=True, exist_ok=True)
self.rmsds = []
self.gdts = []
self.ramas = []
def test_model(self, model_name):
self.model.load_model(self.train_name, model_name)
self.model.eval()
self.model.training = False
output_path = os.path.join(self.output_folder, model_name)
coo_path = os.path.join(self.coo_folder, model_name)
cb_path = os.path.join(self.cb_folder, model_name)
pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(coo_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(cb_path).mkdir(parents=True, exist_ok=True)
with torch.no_grad():
for inputs, _, filenames, lengths in self.test_loader:
inputs = inputs[0].cuda(non_blocking=True)
outputs = self.model(inputs, lengths).squeeze(
1).transpose(0, 1)
outputs = outputs.data.cpu().numpy()
last = 0
for filename, l_ in zip(filenames, lengths):
filename = filename[0]
next_ = last + (l_-1)
out = outputs[:, last: next_]
np.save(os.path.join(output_path, filename), out)
last = next_
coo = np.load(os.path.join(
self.testset_path, 'coo', '%s.npy' % filename))
with open(os.path.join(self.testset_path, 'seq', '%s.txt' % filename)) as f:
seq = f.read()
cb = np.load(os.path.join(
self.testset_path, 'cb', '%s.npy' % filename))
ca = coo[1::4]
coo_, cb_ = bbrebuild(ca, out, seq)
np.save(os.path.join(coo_path, filename), coo_)
np.save(os.path.join(cb_path, filename), cb_)
rmsd, gdt, rama = cal_criteria(coo, cb, coo_, cb_, seq)
self.rmsds.append(rmsd)
self.gdts.append(gdt)
self.ramas.append(rama)
def ensemble_output(self, model_names):
ensemble_path = os.path.join(self.output_folder, 'ensemble')
coo_path = os.path.join(self.coo_folder, 'ensemble')
cb_path = os.path.join(self.cb_folder, 'ensemble')
pathlib.Path(ensemble_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(coo_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(cb_path).mkdir(parents=True, exist_ok=True)
output_paths = [os.path.join(
self.output_folder, str(model_name)) for model_name in model_names]
filenames = os.listdir(output_paths[0])
for filename in filenames:
output = [np.load(os.path.join(output_path, filename))
for output_path in output_paths]
ensemble_output = np.mean(np.array(output), axis=0)
np.save(os.path.join(ensemble_path, filename), ensemble_output)
filename = filename[:-4]
coo = np.load(os.path.join(self.testset_path,
'coo', '%s.npy' % filename))
with open(os.path.join(self.testset_path, 'seq', '%s.txt' % filename)) as f:
seq = f.read()
cb = np.load(os.path.join(
self.testset_path, 'cb', '%s.npy' % filename))
ca = coo[1::4]
coo_, cb_ = bbrebuild(ca, ensemble_output, seq)
np.save(os.path.join(coo_path, filename), coo_)
np.save(os.path.join(cb_path, filename), cb_)
rmsd, gdt, rama = cal_criteria(coo, cb, coo_, cb_, seq)
self.rmsds.append(rmsd)
self.gdts.append(gdt)
self.ramas.append(rama)
def test_top_models(self, top_num=3, ensemble=True):
_, val_loss = loss_from_log(self.train_name)
top_models_index = np.argsort(val_loss)[:top_num]
for i in top_models_index:
self.test_model(str(i))
if ensemble:
self.ensemble_output(top_models_index)
self.rmsds = np.array(self.rmsds)
self.gdts = np.array(self.gdts)
self.ramas = np.array(self.ramas)
np.save('../results/rmsd/%s.npy' % self.train_name, self.rmsds)
np.save('../results/gdt/%s.npy' % self.train_name, self.gdts)
np.save('../results/rama/%s.npy' % self.train_name, self.ramas)
| [
"os.listdir",
"pathlib.Path",
"criteria.cal_criteria",
"criteria.bbrebuild",
"os.path.join",
"numpy.argsort",
"numpy.array",
"torch.no_grad",
"numpy.save"
] | [((705, 725), 'numpy.array', 'np.array', (['train_loss'], {}), '(train_loss)\n', (713, 725), True, 'import numpy as np\n'), ((742, 760), 'numpy.array', 'np.array', (['val_loss'], {}), '(val_loss)\n', (750, 760), True, 'import numpy as np\n'), ((1798, 1842), 'os.path.join', 'os.path.join', (['self.output_folder', 'model_name'], {}), '(self.output_folder, model_name)\n', (1810, 1842), False, 'import os\n'), ((1863, 1904), 'os.path.join', 'os.path.join', (['self.coo_folder', 'model_name'], {}), '(self.coo_folder, model_name)\n', (1875, 1904), False, 'import os\n'), ((1924, 1964), 'os.path.join', 'os.path.join', (['self.cb_folder', 'model_name'], {}), '(self.cb_folder, model_name)\n', (1936, 1964), False, 'import os\n'), ((3717, 3761), 'os.path.join', 'os.path.join', (['self.output_folder', '"""ensemble"""'], {}), "(self.output_folder, 'ensemble')\n", (3729, 3761), False, 'import os\n'), ((3782, 3823), 'os.path.join', 'os.path.join', (['self.coo_folder', '"""ensemble"""'], {}), "(self.coo_folder, 'ensemble')\n", (3794, 3823), False, 'import os\n'), ((3843, 3883), 'os.path.join', 'os.path.join', (['self.cb_folder', '"""ensemble"""'], {}), "(self.cb_folder, 'ensemble')\n", (3855, 3883), False, 'import os\n'), ((4232, 4259), 'os.listdir', 'os.listdir', (['output_paths[0]'], {}), '(output_paths[0])\n', (4242, 4259), False, 'import os\n'), ((5695, 5715), 'numpy.array', 'np.array', (['self.rmsds'], {}), '(self.rmsds)\n', (5703, 5715), True, 'import numpy as np\n'), ((5737, 5756), 'numpy.array', 'np.array', (['self.gdts'], {}), '(self.gdts)\n', (5745, 5756), True, 'import numpy as np\n'), ((5779, 5799), 'numpy.array', 'np.array', (['self.ramas'], {}), '(self.ramas)\n', (5787, 5799), True, 'import numpy as np\n'), ((5811, 5874), 'numpy.save', 'np.save', (["('../results/rmsd/%s.npy' % self.train_name)", 'self.rmsds'], {}), "('../results/rmsd/%s.npy' % self.train_name, self.rmsds)\n", (5818, 5874), True, 'import numpy as np\n'), ((5884, 5945), 'numpy.save', 'np.save', (["('../results/gdt/%s.npy' % self.train_name)", 'self.gdts'], {}), "('../results/gdt/%s.npy' % self.train_name, self.gdts)\n", (5891, 5945), True, 'import numpy as np\n'), ((5955, 6018), 'numpy.save', 'np.save', (["('../results/rama/%s.npy' % self.train_name)", 'self.ramas'], {}), "('../results/rama/%s.npy' % self.train_name, self.ramas)\n", (5962, 6018), True, 'import numpy as np\n'), ((2184, 2199), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2197, 2199), False, 'import torch\n'), ((5011, 5046), 'criteria.bbrebuild', 'bbrebuild', (['ca', 'ensemble_output', 'seq'], {}), '(ca, ensemble_output, seq)\n', (5020, 5046), False, 'from criteria import cal_criteria, bbrebuild\n'), ((5200, 5237), 'criteria.cal_criteria', 'cal_criteria', (['coo', 'cb', 'coo_', 'cb_', 'seq'], {}), '(coo, cb, coo_, cb_, seq)\n', (5212, 5237), False, 'from criteria import cal_criteria, bbrebuild\n'), ((5489, 5509), 'numpy.argsort', 'np.argsort', (['val_loss'], {}), '(val_loss)\n', (5499, 5509), True, 'import numpy as np\n'), ((1319, 1351), 'pathlib.Path', 'pathlib.Path', (['self.output_folder'], {}), '(self.output_folder)\n', (1331, 1351), False, 'import pathlib\n'), ((1396, 1425), 'pathlib.Path', 'pathlib.Path', (['self.coo_folder'], {}), '(self.coo_folder)\n', (1408, 1425), False, 'import pathlib\n'), ((1470, 1498), 'pathlib.Path', 'pathlib.Path', (['self.cb_folder'], {}), '(self.cb_folder)\n', (1482, 1498), False, 'import pathlib\n'), ((1974, 1999), 'pathlib.Path', 'pathlib.Path', (['output_path'], {}), '(output_path)\n', (1986, 1999), False, 'import pathlib\n'), ((2044, 2066), 'pathlib.Path', 'pathlib.Path', (['coo_path'], {}), '(coo_path)\n', (2056, 2066), False, 'import pathlib\n'), ((2111, 2132), 'pathlib.Path', 'pathlib.Path', (['cb_path'], {}), '(cb_path)\n', (2123, 2132), False, 'import pathlib\n'), ((3893, 3920), 'pathlib.Path', 'pathlib.Path', (['ensemble_path'], {}), '(ensemble_path)\n', (3905, 3920), False, 'import pathlib\n'), ((3965, 3987), 'pathlib.Path', 'pathlib.Path', (['coo_path'], {}), '(coo_path)\n', (3977, 3987), False, 'import pathlib\n'), ((4032, 4053), 'pathlib.Path', 'pathlib.Path', (['cb_path'], {}), '(cb_path)\n', (4044, 4053), False, 'import pathlib\n'), ((4459, 4475), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (4467, 4475), True, 'import numpy as np\n'), ((4506, 4543), 'os.path.join', 'os.path.join', (['ensemble_path', 'filename'], {}), '(ensemble_path, filename)\n', (4518, 4543), False, 'import os\n'), ((4629, 4688), 'os.path.join', 'os.path.join', (['self.testset_path', '"""coo"""', "('%s.npy' % filename)"], {}), "(self.testset_path, 'coo', '%s.npy' % filename)\n", (4641, 4688), False, 'import os\n'), ((4878, 4936), 'os.path.join', 'os.path.join', (['self.testset_path', '"""cb"""', "('%s.npy' % filename)"], {}), "(self.testset_path, 'cb', '%s.npy' % filename)\n", (4890, 4936), False, 'import os\n'), ((5068, 5100), 'os.path.join', 'os.path.join', (['coo_path', 'filename'], {}), '(coo_path, filename)\n', (5080, 5100), False, 'import os\n'), ((5129, 5160), 'os.path.join', 'os.path.join', (['cb_path', 'filename'], {}), '(cb_path, filename)\n', (5141, 5160), False, 'import os\n'), ((3273, 3296), 'criteria.bbrebuild', 'bbrebuild', (['ca', 'out', 'seq'], {}), '(ca, out, seq)\n', (3282, 3296), False, 'from criteria import cal_criteria, bbrebuild\n'), ((3474, 3511), 'criteria.cal_criteria', 'cal_criteria', (['coo', 'cb', 'coo_', 'cb_', 'seq'], {}), '(coo, cb, coo_, cb_, seq)\n', (3486, 3511), False, 'from criteria import cal_criteria, bbrebuild\n'), ((4327, 4362), 'os.path.join', 'os.path.join', (['output_path', 'filename'], {}), '(output_path, filename)\n', (4339, 4362), False, 'import os\n'), ((4753, 4812), 'os.path.join', 'os.path.join', (['self.testset_path', '"""seq"""', "('%s.txt' % filename)"], {}), "(self.testset_path, 'seq', '%s.txt' % filename)\n", (4765, 4812), False, 'import os\n'), ((2744, 2779), 'os.path.join', 'os.path.join', (['output_path', 'filename'], {}), '(output_path, filename)\n', (2756, 2779), False, 'import os\n'), ((2857, 2916), 'os.path.join', 'os.path.join', (['self.testset_path', '"""coo"""', "('%s.npy' % filename)"], {}), "(self.testset_path, 'coo', '%s.npy' % filename)\n", (2869, 2916), False, 'import os\n'), ((3116, 3174), 'os.path.join', 'os.path.join', (['self.testset_path', '"""cb"""', "('%s.npy' % filename)"], {}), "(self.testset_path, 'cb', '%s.npy' % filename)\n", (3128, 3174), False, 'import os\n'), ((3326, 3358), 'os.path.join', 'os.path.join', (['coo_path', 'filename'], {}), '(coo_path, filename)\n', (3338, 3358), False, 'import os\n'), ((3395, 3426), 'os.path.join', 'os.path.join', (['cb_path', 'filename'], {}), '(cb_path, filename)\n', (3407, 3426), False, 'import os\n'), ((2975, 3034), 'os.path.join', 'os.path.join', (['self.testset_path', '"""seq"""', "('%s.txt' % filename)"], {}), "(self.testset_path, 'seq', '%s.txt' % filename)\n", (2987, 3034), False, 'import os\n')] |
import numpy as np
import cv2
from Feature_Matching import initial_guess
from velocity import velocity
def distance(velocity_estimate,frame_0_time,time_inc,base_frame,curr_frame):
'''
function to calculate distance travelled between frames
Parameters:
-----------
velocity_estimate = Nx2 array of time,velocity
frame_0_time = time at frame 0
curr_frame = current frame number
Returns:
--------
Distance travelled
'''
base_time = frame_0_time +time_inc*base_frame
curr_time = frame_0_time +time_inc*curr_frame
total_time = curr_time - base_time
base_index = (velocity_estimate[:,0]<=base_time).nonzero()[0][-1]
curr_index = (velocity_estimate[:,0]<=curr_time).nonzero()[0][-1]
avg_velocity = velocity_estimate[base_index:curr_index+1,1].mean()
return avg_velocity*total_time
def observations(frame_0_time,nth_frame,input_dir):
'''
Retrieves:
1. Feature correspondences across camera frames
2. Initial guess for corresponding 3D points.
3. Initial guess for camera transformation matrices
All 3D points and transformations are defined w.r.t the first frame
Ouptut:
-------
point_list = Nx4 numpy array containing the observation points in the format - <Camera Frame><Feature index><x,y feature coordinates in current Camera Frame>
model_params = 1x4 list containing <total number of camera frames>,<total number of feature points>,<total number of observations>
cam_transforms = Nx6 numpy array of camera extrensic parameters
pt_3d_list = Nx3 numpy array of 3d points for matched features. The 3D points are defined w.r.t the first frame
'''
# list of run 4 image files
images_root = [
input_dir+'/run4_base_hr/mono_image/frame000050_2018_09_04_18_14_44_143406.png',\
input_dir+'/run4_base_hr/mono_image/frame000051_2018_09_04_18_14_44_342875.png',\
input_dir+'/run4_base_hr/mono_image/frame000052_2018_09_04_18_14_44_543322.png',\
input_dir+'/run4_base_hr/mono_image/frame000053_2018_09_04_18_14_44_745412.png',\
input_dir+'/run4_base_hr/mono_image/frame000054_2018_09_04_18_14_44_947120.png',\
input_dir+'/run4_base_hr/mono_image/frame000055_2018_09_04_18_14_45_145872.png',\
input_dir+'/run4_base_hr/mono_image/frame000056_2018_09_04_18_14_45_347874.png',\
input_dir+'/run4_base_hr/mono_image/frame000057_2018_09_04_18_14_45_548328.png',\
input_dir+'/run4_base_hr/mono_image/frame000058_2018_09_04_18_14_45_745208.png',\
input_dir+'/run4_base_hr/mono_image/frame000059_2018_09_04_18_14_45_942796.png',\
input_dir+'/run4_base_hr/mono_image/frame000060_2018_09_04_18_14_46_143434.png',\
input_dir+'/run4_base_hr/mono_image/frame000061_2018_09_04_18_14_46_343646.png',\
input_dir+'/run4_base_hr/mono_image/frame000062_2018_09_04_18_14_46_543835.png',\
input_dir+'/run4_base_hr/mono_image/frame000063_2018_09_04_18_14_46_747026.png',\
input_dir+'/run4_base_hr/mono_image/frame000064_2018_09_04_18_14_46_949385.png',\
input_dir+'/run4_base_hr/mono_image/frame000065_2018_09_04_18_14_47_146589.png',\
input_dir+'/run4_base_hr/mono_image/frame000066_2018_09_04_18_14_47_345159.png',\
input_dir+'/run4_base_hr/mono_image/frame000067_2018_09_04_18_14_47_544991.png',\
input_dir+'/run4_base_hr/mono_image/frame000068_2018_09_04_18_14_47_742937.png',\
input_dir+'/run4_base_hr/mono_image/frame000069_2018_09_04_18_14_47_942956.png',\
input_dir+'/run4_base_hr/mono_image/frame000070_2018_09_04_18_14_48_143949.png',\
input_dir+'/run4_base_hr/mono_image/frame000071_2018_09_04_18_14_48_343130.png',\
input_dir+'/run4_base_hr/mono_image/frame000072_2018_09_04_18_14_48_547100.png',\
input_dir+'/run4_base_hr/mono_image/frame000073_2018_09_04_18_14_48_748986.png',\
input_dir+'/run4_base_hr/mono_image/frame000074_2018_09_04_18_14_48_944920.png',\
input_dir+'/run4_base_hr/mono_image/frame000075_2018_09_04_18_14_49_145128.png',\
input_dir+'/run4_base_hr/mono_image/frame000076_2018_09_04_18_14_49_343308.png',\
input_dir+'/run4_base_hr/mono_image/frame000077_2018_09_04_18_14_49_543605.png',\
input_dir+'/run4_base_hr/mono_image/frame000078_2018_09_04_18_14_49_742891.png',\
input_dir+'/run4_base_hr/mono_image/frame000079_2018_09_04_18_14_49_945075.png',\
input_dir+'/run4_base_hr/mono_image/frame000080_2018_09_04_18_14_50_146393.png',\
input_dir+'/run4_base_hr/mono_image/frame000081_2018_09_04_18_14_50_346777.png',\
input_dir+'/run4_base_hr/mono_image/frame000082_2018_09_04_18_14_50_546128.png',\
input_dir+'/run4_base_hr/mono_image/frame000083_2018_09_04_18_14_50_743644.png',\
input_dir+'/run4_base_hr/mono_image/frame000084_2018_09_04_18_14_50_943493.png',\
input_dir+'/run4_base_hr/mono_image/frame000085_2018_09_04_18_14_51_144934.png',\
input_dir+'/run4_base_hr/mono_image/frame000086_2018_09_04_18_14_51_343637.png',\
input_dir+'/run4_base_hr/mono_image/frame000087_2018_09_04_18_14_51_544055.png',\
input_dir+'/run4_base_hr/mono_image/frame000088_2018_09_04_18_14_51_743875.png',\
input_dir+'/run4_base_hr/mono_image/frame000089_2018_09_04_18_14_51_946698.png',\
input_dir+'/run4_base_hr/mono_image/frame000090_2018_09_04_18_14_52_145009.png',\
input_dir+'/run4_base_hr/mono_image/frame000091_2018_09_04_18_14_52_345942.png',\
input_dir+'/run4_base_hr/mono_image/frame000092_2018_09_04_18_14_52_543475.png',\
input_dir+'/run4_base_hr/mono_image/frame000093_2018_09_04_18_14_52_743029.png',\
input_dir+'/run4_base_hr/mono_image/frame000094_2018_09_04_18_14_52_942685.png',\
input_dir+'/run4_base_hr/mono_image/frame000095_2018_09_04_18_14_53_144148.png',\
input_dir+'/run4_base_hr/mono_image/frame000096_2018_09_04_18_14_53_343962.png',\
input_dir+'/run4_base_hr/mono_image/frame000097_2018_09_04_18_14_53_547824.png',\
input_dir+'/run4_base_hr/mono_image/frame000098_2018_09_04_18_14_53_744450.png',\
input_dir+'/run4_base_hr/mono_image/frame000099_2018_09_04_18_14_53_945602.png',\
input_dir+'/run4_base_hr/mono_image/frame000100_2018_09_04_18_14_54_145016.png',\
input_dir+'/run4_base_hr/mono_image/frame000101_2018_09_04_18_14_54_343470.png',\
input_dir+'/run4_base_hr/mono_image/frame000102_2018_09_04_18_14_54_543084.png',\
input_dir+'/run4_base_hr/mono_image/frame000103_2018_09_04_18_14_54_743082.png',\
input_dir+'/run4_base_hr/mono_image/frame000104_2018_09_04_18_14_54_943012.png',\
input_dir+'/run4_base_hr/mono_image/frame000105_2018_09_04_18_14_55_145069.png',\
input_dir+'/run4_base_hr/mono_image/frame000106_2018_09_04_18_14_55_343661.png',\
input_dir+'/run4_base_hr/mono_image/frame000107_2018_09_04_18_14_55_546670.png',\
input_dir+'/run4_base_hr/mono_image/frame000108_2018_09_04_18_14_55_745882.png',\
input_dir+'/run4_base_hr/mono_image/frame000109_2018_09_04_18_14_55_945317.png',\
input_dir+'/run4_base_hr/mono_image/frame000110_2018_09_04_18_14_56_142818.png'
]
#Retrieve velocity profiles
velocity_estimate = np.asarray(velocity(input_dir))
time_inc = 0.2*nth_frame
#images to be processed
images = images_root[0::nth_frame]
#Camera Calibration Matrix
K =np.array([[904.04572636,0,645.74398382],[0,907.01811462,512.14951996],[0,0,1]])
Data_assoc=[np.zeros((1,2))]*len(images)
cam_transforms = [np.identity(4)]*len(images)
for i in range(len(images)):
# pont matching is done upto 6 frames forward
for j in range(i+1,min(i+6,len(images))):
img1 = cv2.imread(images[i],0)[:500,:]
img2 = cv2.imread(images[j],0)[:500,:]
dist = distance(velocity_estimate,frame_0_time,time_inc,i,j)
src,dst,pt_3d,R_t_mat=initial_guess(img1,img2,dist)
# transform camera parameters w.r.t to frame 0
if j-i==1:
cam_transforms[j]= cam_transforms[j-1]@R_t_mat
# transform 3D points w.r.t frame 0
pt_3d = (cam_transforms[i]@np.hstack((pt_3d,np.ones((pt_3d.shape[0],1)))).T).T
#Logic for creating list of matching point correspondences across multiple frames
point_i_bool=(src[:,None]==Data_assoc[i]).all(-1).any(-1)
if (~point_i_bool).all() == True:
if Data_assoc[i].shape[0]==1:
Data_assoc[i]=src
Data_assoc[j]=dst
pt_3d_list = pt_3d
for iter in range(len(images)):
if iter != i and iter != j:
Data_assoc[iter]=np.zeros((src.shape[0],2))
else:
for row_ind,row in enumerate(point_i_bool):
if row.all() == False:
Data_assoc[i]=np.vstack((Data_assoc[i],src[row_ind]))
if Data_assoc[j].shape[0]<Data_assoc[i].shape[0]:
diff = Data_assoc[i].shape[0]-Data_assoc[j].shape[0]-1
Data_assoc[j]=np.vstack((Data_assoc[j],np.zeros((diff,2))))
Data_assoc[j]=np.vstack((Data_assoc[j],dst[row_ind]))
pt_3d_list = np.vstack((pt_3d_list,pt_3d[row_ind]))
for iter in range(len(images)):
if iter != i and iter != j:
Data_assoc[iter]=np.vstack((Data_assoc[iter],np.zeros((1,2))))
else:
j_ind = (Data_assoc[i]==src[row_ind]).all(axis=1).nonzero()
j_ind = j_ind[0][0]
if Data_assoc[j].shape[0]<=j_ind:
diff = j_ind-Data_assoc[j].shape[0]+1
Data_assoc[j]=np.vstack((Data_assoc[j],np.zeros((diff,2))))
Data_assoc[j][j_ind]=dst[row_ind]
# Reorder data for returning to main funciton
point_list=[]
for i in range(Data_assoc[0].shape[0]):
for j in range(len(Data_assoc)):
if len(Data_assoc[j][i].nonzero()[0]) != 0:
point_list.append([j,i,Data_assoc[j][i][0],Data_assoc[j][i][1]])
point_list = np.asarray(point_list)
camera_frames = len(Data_assoc)
num_points = Data_assoc[0].shape[0]
num_observation = len(point_list)
model_params = [camera_frames,num_points,num_observation]
for index,matrix in enumerate(cam_transforms):
cam_transforms[index] = (np.vstack((cv2.Rodrigues(matrix[:3,:3])[0],matrix[:3,3].reshape(-1,1)))).T
cam_transforms = np.asarray(cam_transforms).reshape(-1,6)
return point_list,model_params,cam_transforms,pt_3d_list
| [
"numpy.identity",
"numpy.ones",
"Feature_Matching.initial_guess",
"velocity.velocity",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"cv2.Rodrigues",
"numpy.vstack",
"cv2.imread"
] | [((7973, 8064), 'numpy.array', 'np.array', (['[[904.04572636, 0, 645.74398382], [0, 907.01811462, 512.14951996], [0, 0, 1]]'], {}), '([[904.04572636, 0, 645.74398382], [0, 907.01811462, 512.14951996],\n [0, 0, 1]])\n', (7981, 8064), True, 'import numpy as np\n'), ((10920, 10942), 'numpy.asarray', 'np.asarray', (['point_list'], {}), '(point_list)\n', (10930, 10942), True, 'import numpy as np\n'), ((7811, 7830), 'velocity.velocity', 'velocity', (['input_dir'], {}), '(input_dir)\n', (7819, 7830), False, 'from velocity import velocity\n'), ((8070, 8086), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (8078, 8086), True, 'import numpy as np\n'), ((8121, 8135), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (8132, 8135), True, 'import numpy as np\n'), ((8496, 8527), 'Feature_Matching.initial_guess', 'initial_guess', (['img1', 'img2', 'dist'], {}), '(img1, img2, dist)\n', (8509, 8527), False, 'from Feature_Matching import initial_guess\n'), ((11302, 11328), 'numpy.asarray', 'np.asarray', (['cam_transforms'], {}), '(cam_transforms)\n', (11312, 11328), True, 'import numpy as np\n'), ((8306, 8330), 'cv2.imread', 'cv2.imread', (['images[i]', '(0)'], {}), '(images[i], 0)\n', (8316, 8330), False, 'import cv2\n'), ((8357, 8381), 'cv2.imread', 'cv2.imread', (['images[j]', '(0)'], {}), '(images[j], 0)\n', (8367, 8381), False, 'import cv2\n'), ((9541, 9581), 'numpy.vstack', 'np.vstack', (['(Data_assoc[i], src[row_ind])'], {}), '((Data_assoc[i], src[row_ind]))\n', (9550, 9581), True, 'import numpy as np\n'), ((9864, 9904), 'numpy.vstack', 'np.vstack', (['(Data_assoc[j], dst[row_ind])'], {}), '((Data_assoc[j], dst[row_ind]))\n', (9873, 9904), True, 'import numpy as np\n'), ((9941, 9980), 'numpy.vstack', 'np.vstack', (['(pt_3d_list, pt_3d[row_ind])'], {}), '((pt_3d_list, pt_3d[row_ind]))\n', (9950, 9980), True, 'import numpy as np\n'), ((11216, 11245), 'cv2.Rodrigues', 'cv2.Rodrigues', (['matrix[:3, :3]'], {}), '(matrix[:3, :3])\n', (11229, 11245), False, 'import cv2\n'), ((9354, 9381), 'numpy.zeros', 'np.zeros', (['(src.shape[0], 2)'], {}), '((src.shape[0], 2))\n', (9362, 9381), True, 'import numpy as np\n'), ((8782, 8810), 'numpy.ones', 'np.ones', (['(pt_3d.shape[0], 1)'], {}), '((pt_3d.shape[0], 1))\n', (8789, 8810), True, 'import numpy as np\n'), ((9805, 9824), 'numpy.zeros', 'np.zeros', (['(diff, 2)'], {}), '((diff, 2))\n', (9813, 9824), True, 'import numpy as np\n'), ((10532, 10551), 'numpy.zeros', 'np.zeros', (['(diff, 2)'], {}), '((diff, 2))\n', (10540, 10551), True, 'import numpy as np\n'), ((10169, 10185), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (10177, 10185), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Dict, Callable
import numpy as np
from allennlp.common.checks import ConfigurationError
from kb_utils.kb_context import KBContext
from utils import EntityType, Universe
class Action:
def __init__(self, action_str: str):
self.repr = action_str
struc_splits = action_str.split(" -> ")
# assert to avoid -> appears in realtion/entity
assert len(struc_splits) == 2
self.nonterminal = struc_splits[0]
# there may be many expanded nonterminals in rhs
self.rhs = struc_splits[1]
def __repr__(self):
return self.repr
class KBWorld:
"""
The world class is responsible for specifying all valid candidate actions for a specific case,
and converting them into their corresponding index in model training.
"""
def __init__(self, world_id: str, kb_context: KBContext, sparql_query: str, sexpression: str,
origin_utterance: str, answers: List[str], level: str, graph_query_info: Dict,
entity_meta_info: Dict, entity_offset_tokens: List[str],
language_class: Callable, sparql_converter: Callable, verify_sparql_converter: Callable):
self.world_id = world_id
self.kb_context = kb_context
self.language = language_class.build(kb_context)
# build parsed sexpression for evaluation
self.sexpression_eval = sexpression
# we pre-process it for parsing
self.sexpression_parse = self._preprocess(sexpression)
self.sparql_query = sparql_query
self.origin_utterance = origin_utterance
self.answers = answers
self.level = level
self.entities_indexer = {}
for i, schema in enumerate(self.kb_context.knowledge_graph.entities):
main_type = schema[:schema.find(":")]
if main_type == Universe.entity_repr:
# entity id is at the last, we should avoid it to be split
parts = schema.split(":", maxsplit=3)
elif main_type == Universe.relation_repr:
parts = schema.split(":", maxsplit=2)
else:
raise ConfigurationError(f"Do not support for main type as {main_type}")
self.entities_indexer[parts[-1]] = i
self.valid_actions: Dict[str, List[str]] = {}
self.valid_actions_flat: List[Action] = []
self.graph_query_info = graph_query_info
self.entity_meta_info = entity_meta_info
self.entity_offset_tokens = entity_offset_tokens
# a converter to convert sexpression to sparql
self.sparql_converter = sparql_converter
self.verify_sparql_converter = verify_sparql_converter
def get_action_sequence_and_all_actions(self):
try:
action_sequence = self.language.logical_form_to_action_sequence(self.sexpression_parse)
except:
action_sequence = []
all_action = self.language.all_possible_productions()
# parse str into structure inside Action
self.valid_actions_flat = [Action(ins) for ins in all_action]
# build nested structure
for action in self.valid_actions_flat:
action_key = action.nonterminal
if action_key not in self.valid_actions:
self.valid_actions[action_key] = []
# record action
self.valid_actions[action_key].append(action.repr)
return action_sequence, all_action
def index_entity_type(self):
defined_types = ['@@PAD@@', EntityType.entity_num, EntityType.entity_set,
EntityType.entity_str, Universe.relation_repr]
# now we have 5 types
assert len(defined_types) == 5
# record the entity index
entity_type_indices = []
for entity_index, schema in enumerate(self.kb_context.knowledge_graph.entities):
parts = schema.split(':')
entity_main_type = parts[0]
if entity_main_type == Universe.relation_repr:
entity_type = defined_types.index(entity_main_type)
elif entity_main_type == Universe.entity_repr:
entity_coarse_type = parts[1]
entity_type = defined_types.index(entity_coarse_type)
else:
raise ConfigurationError("Get the unknown entity: {}".format(schema))
entity_type_indices.append(entity_type)
return np.array(entity_type_indices, dtype=np.int)
def get_action_entity_mapping(self) -> Dict[str, int]:
"""
Get the entity index of every local grammar(also named after linked action)
:return:
"""
mapping = {}
for action in self.valid_actions_flat:
# default is padding
mapping[str(action)] = -1
# lowercase for all entities
production_right = action.rhs
# only instance class should apply entity map
if production_right not in self.entities_indexer:
continue
# record the entity id
mapping[str(action)] = self.entities_indexer[production_right]
return mapping
"""
Private functions to override
"""
def _preprocess(self, sexpression: str):
"""
1. processing all functions
2. distinguish JOIN_ENT and JOIN_REL
"""
return sexpression
def postprocess(self, sexpression: str) -> str:
"""
The reverse function for `preprocess`.
"""
return sexpression
| [
"numpy.array",
"allennlp.common.checks.ConfigurationError"
] | [((4506, 4549), 'numpy.array', 'np.array', (['entity_type_indices'], {'dtype': 'np.int'}), '(entity_type_indices, dtype=np.int)\n', (4514, 4549), True, 'import numpy as np\n'), ((2231, 2297), 'allennlp.common.checks.ConfigurationError', 'ConfigurationError', (['f"""Do not support for main type as {main_type}"""'], {}), "(f'Do not support for main type as {main_type}')\n", (2249, 2297), False, 'from allennlp.common.checks import ConfigurationError\n')] |
import numpy as np
import scipy.stats as stats
E = []
for ch in range(1,17):
energy = []
if ch<10:
file_name = "20210218-ch0" + str(ch) + ".e.txt"
else:
file_name = "20210218-ch" + str(ch) + ".e.txt"
with open(file_name,"r") as fl:
for line in fl:
energy.append(float(line))
E.append( np.array(energy) )
for ch1 in range(16):
for ch2 in range(ch1+1,16):
ss = "Test ch" + str(ch1) + " vs. ch" + str(ch2)
answer = stats.ks_2samp(E[ch1],E[ch2])
ss += "\t Stat: " + str(answer.statistic)
ss += "\t p-val: " + str(answer.pvalue)
print( ss )
| [
"numpy.array",
"scipy.stats.ks_2samp"
] | [((344, 360), 'numpy.array', 'np.array', (['energy'], {}), '(energy)\n', (352, 360), True, 'import numpy as np\n'), ((499, 529), 'scipy.stats.ks_2samp', 'stats.ks_2samp', (['E[ch1]', 'E[ch2]'], {}), '(E[ch1], E[ch2])\n', (513, 529), True, 'import scipy.stats as stats\n')] |
"""Optimizer for weights of portfolio."""
from datetime import datetime
from typing import Optional
import numpy as np
from scipy.optimize import minimize
from mypo.common import safe_cast
from mypo.market import Market
from mypo.optimizer.base_optimizer import BaseOptimizer
from mypo.sampler import Sampler
class CVaROptimizer(BaseOptimizer):
"""Minimum variance optimizer."""
_span: int
_beta: float
_samples: int
_sampler: Optional[Sampler]
def __init__(
self,
span: int = 260,
beta: float = 0.05,
samples: int = 10,
sampler: Optional[Sampler] = None,
do_re_optimize: bool = False,
):
"""Construct this object.
Args:
sampler: Sampler.
span: Span for evaluation.
beta: Confidence.
samples: Count of scenarios.
sampler: Sampler.
do_re_optimize: do re-optimize.
"""
self._span = span
self._beta = beta
self._samples = samples
self._sampler = sampler
super().__init__([1], do_re_optimize)
def optimize(self, market: Market, at: datetime) -> np.float64:
"""Optimize weights.
Args:
market: Past market stock prices.
at: Current date.
Returns:
Optimized weights
"""
historical_data = market.extract(market.get_index() < at).tail(self._span)
sampler = Sampler(market=historical_data, samples=self._samples) if self._sampler is None else self._sampler
sample = sampler.sample(self._span).to_numpy()
n = len(historical_data.get_tickers())
x = np.ones(n) / n
def fn(x: np.ndarray, sequence: np.ndarray) -> np.float64:
r = np.dot(sequence, x.T)
return -np.float64(np.mean(np.where(r < np.quantile(r, self._beta), r, 0)))
cons = {"type": "eq", "fun": lambda x: np.sum(x) - 1}
bounds = [[0.0, 1.0] for i in range(n)]
print(np.max(sample))
minout = minimize(
fn, x, args=(sample), method="SLSQP", bounds=bounds, constraints=cons, tol=1e-9 * np.max(sample)
)
self._weights = safe_cast(minout.x)
return np.float64(minout.fun)
| [
"numpy.ones",
"numpy.float64",
"mypo.common.safe_cast",
"numpy.max",
"numpy.sum",
"numpy.dot",
"numpy.quantile",
"mypo.sampler.Sampler"
] | [((2192, 2211), 'mypo.common.safe_cast', 'safe_cast', (['minout.x'], {}), '(minout.x)\n', (2201, 2211), False, 'from mypo.common import safe_cast\n'), ((2227, 2249), 'numpy.float64', 'np.float64', (['minout.fun'], {}), '(minout.fun)\n', (2237, 2249), True, 'import numpy as np\n'), ((1458, 1512), 'mypo.sampler.Sampler', 'Sampler', ([], {'market': 'historical_data', 'samples': 'self._samples'}), '(market=historical_data, samples=self._samples)\n', (1465, 1512), False, 'from mypo.sampler import Sampler\n'), ((1672, 1682), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1679, 1682), True, 'import numpy as np\n'), ((1771, 1792), 'numpy.dot', 'np.dot', (['sequence', 'x.T'], {}), '(sequence, x.T)\n', (1777, 1792), True, 'import numpy as np\n'), ((2006, 2020), 'numpy.max', 'np.max', (['sample'], {}), '(sample)\n', (2012, 2020), True, 'import numpy as np\n'), ((1929, 1938), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (1935, 1938), True, 'import numpy as np\n'), ((2143, 2157), 'numpy.max', 'np.max', (['sample'], {}), '(sample)\n', (2149, 2157), True, 'import numpy as np\n'), ((1845, 1871), 'numpy.quantile', 'np.quantile', (['r', 'self._beta'], {}), '(r, self._beta)\n', (1856, 1871), True, 'import numpy as np\n')] |
import json
from os import path
from os import mkdir
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
from astropy.time import Time
import glob
import matplotlib.cm as cm
def convert_dict_to_nested_type(report):
if type(report) is dict:
for k, v in report.items():
print(k)
convert_dict_to_nested_type(v)
print()
elif type(report) is list:
for el in report:
convert_dict_to_nested_type(el)
else:
print(type(report))
def save_report(report, date):
dir_path = "report_db/"
today = Time(date, format="jd")
current_day = today.iso.split(" ")[0].split("-")
dir_path = dir_path + str(current_day[1]) + "/"
report_name = str(current_day[2]) + ".json"
report_path = dir_path + report_name
# convert_dict_to_nested_type(report)
if path.isdir(dir_path):
with open(report_path, "w") as outfile:
# warning : serialize dictionary with numpy type doesn't work,
# convert values to nested python type
json.dump(report, outfile, indent=4)
else:
mkdir(dir_path)
with open(report_path, "w") as outfile:
json.dump(report, outfile, indent=4)
def parse_intra_night_report(intra_night_report):
if len(intra_night_report) > 0:
nb_sep_assoc = intra_night_report["number of separation association"]
nb_mag_filter = intra_night_report[
"number of association filtered by magnitude"
]
nb_tracklets = intra_night_report["number of intra night tracklets"]
metrics = intra_night_report["association metrics"]
if len(metrics) > 0:
pr = metrics["precision"]
re = metrics["recall"]
tp = metrics["True Positif"]
fp = metrics["False Positif"]
fn = metrics["False Negatif"]
tot = metrics["total real association"]
else:
pr = 100
re = 100
tp = 0
fp = 0
fn = 0
tot = 0
return np.array(
[nb_sep_assoc, nb_mag_filter, nb_tracklets, pr, re, tp, fp, fn, tot]
)
else:
return np.array([0, 0, 0, 100, 100, 0, 0, 0, 0])
def parse_association_report(association_report):
if len(association_report) > 0:
nb_sep_assoc = association_report[
"number of inter night separation based association"
]
nb_mag_filter = association_report[
"number of inter night magnitude filtered association"
]
nb_angle_filter = association_report[
"number of inter night angle filtered association"
]
nb_duplicates = association_report["number of duplicated association"]
metrics = association_report["metrics"]
if len(metrics) > 0:
pr = metrics["precision"]
re = metrics["recall"]
tp = metrics["True Positif"]
fp = metrics["False Positif"]
fn = metrics["False Negatif"]
tot = metrics["total real association"]
if fp == 0 and tot == 0:
pr = 100
re = 100
else:
pr = 100
re = 100
tp = 0
fp = 0
fn = 0
tot = 0
return np.array(
[
nb_sep_assoc,
nb_mag_filter,
nb_angle_filter,
nb_duplicates,
pr,
re,
tp,
fp,
fn,
tot,
]
)
else:
return np.array([0, 0, 0, 0, 100, 100, 0, 0, 0, 0])
def parse_trajectories_report(inter_night_report):
updated_trajectories = inter_night_report["list of updated trajectories"]
all_assoc_report = []
if len(inter_night_report["all nid report"]) > 0:
for report in inter_night_report["all nid report"]:
traj_to_track_report = report["trajectories_to_tracklets_report"]
traj_to_obs_report = report["trajectories_to_new_observation_report"]
all_assoc_report.append(
np.array(
[
parse_association_report(traj_to_track_report),
parse_association_report(traj_to_obs_report),
]
)
)
return updated_trajectories, np.array(all_assoc_report)
def parse_tracklets_obs_report(inter_night_report):
updated_trajectories = inter_night_report["list of updated trajectories"]
all_assoc_report = []
if len(inter_night_report["all nid report"]) > 0:
for report in inter_night_report["all nid report"]:
obs_to_track_report = report["old observation to tracklets report"]
obs_to_obs_report = report["old observation to new observation report"]
all_assoc_report.append(
np.array(
[
parse_association_report(obs_to_track_report),
parse_association_report(obs_to_obs_report),
]
)
)
return updated_trajectories, np.array(all_assoc_report)
def parse_inter_night_report(report):
intra_report = report["intra night report"]
traj_report = report["trajectory association report"]
if "tracklets and observation association report" in report:
track_report = report["tracklets and observation association report"]
parse_track_report = parse_tracklets_obs_report(track_report)
else:
parse_track_report = [], np.array([])
nb_traj = report["nb trajectories"]
nb_most_recent_traj = report["nb most recent traj"]
nb_old_obs = report["nb old observations"]
nb_new_obs = report["nb new observations"]
time = report["computation time of the night"]
parse_intra_report = parse_intra_night_report(intra_report)
parse_traj_report = parse_trajectories_report(traj_report)
return (
parse_intra_report,
parse_traj_report,
parse_track_report,
np.array([nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs]),
)
def open_and_parse_report(path):
with open(path, "r") as file:
inter_night_report = json.load(file)
return parse_inter_night_report(inter_night_report)
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct * total / 100.0))
return "{p:.2f}% ({v:d})".format(p=pct, v=val)
return my_autopct
def plot_report(parse_report):
fig, (ax, ax2) = plt.subplots(2, 1, figsize=(20, 20))
intra_assoc_value = parse_report[0]
traj_assoc_value = parse_report[1]
track_assoc_value = parse_report[2]
intra_values = [intra_assoc_value[1], (intra_assoc_value[0] - intra_assoc_value[1])]
labels = ("magnitude filtering", "remaining associations")
explode = (0.1, 0.0)
ax.pie(
intra_values,
explode=explode,
shadow=True,
labels=labels,
autopct=make_autopct(intra_values),
)
ax.axis("equal")
def transform_data(data):
return np.array(
[data[1], data[2], data[3], (data[0] - data[1] - data[2] - data[3])]
)
if len(traj_assoc_value[1]) > 0:
traj_assoc_value = traj_assoc_value[1].sum(axis=1).sum(axis=0)
traj_assoc_value = transform_data(traj_assoc_value)
else:
traj_assoc_value = np.array([0, 0, 0, 0])
if len(track_assoc_value[1]) > 0:
track_assoc_value = track_assoc_value[1].sum(axis=1).sum(axis=0)
track_assoc_value = transform_data(track_assoc_value)
else:
track_assoc_value = np.array([0, 0, 0, 0])
vals = np.concatenate([[traj_assoc_value], [track_assoc_value]], axis=0)
slop = 0.0001
group_size = vals.sum(axis=1) + slop
subgroup_size = vals.flatten()
subgroup_names = subgroup_size
# Create colors
a, b = [plt.cm.Blues, plt.cm.Reds]
ax2.axis("equal")
mypie, _ = ax2.pie(
group_size, radius=1.3, labels=group_size - slop, colors=[a(0.6), b(0.6)]
)
plt.setp(mypie, width=0.3, edgecolor="white")
# Second Ring (Inside)
mypie2, _ = ax2.pie(
subgroup_size,
radius=1.3 - 0.3,
labels=subgroup_names,
labeldistance=0.7,
colors=[
a(subgroup_size[0] / group_size[0] - slop),
a(subgroup_size[1] / group_size[0] - slop),
a(subgroup_size[2] / group_size[0] - slop),
a(subgroup_size[3] / group_size[0] - slop),
b(subgroup_size[0] / group_size[1] - slop),
b(subgroup_size[1] / group_size[1] - slop),
b(subgroup_size[2] / group_size[1] - slop),
b(subgroup_size[3] / group_size[1] - slop),
],
)
plt.setp(mypie2, width=0.4, edgecolor="white")
ax2.margins(0, 0)
subgroup_names_legs = [
"Trajectory association",
"Tracklets and observation association",
"filtered by magnitude",
"filtered by angle",
"duplicated association",
"remaining association",
"filtered by magnitude",
"filtered by angle",
"duplicated association",
"remaining association",
]
ax2.legend(subgroup_names_legs, loc="best")
ax.set_title("intra night association")
ax2.set_title("inter night association")
plt.show()
def get_intra_night_metrics(parse_report):
intra_night = parse_report[0]
return np.array(intra_night)[3:]
def get_intra_night_associations(parse_report):
intra_night = parse_report[0]
return np.array(intra_night)[:3]
def get_inter_night_metrics(parse_report):
traj_assoc_report = parse_report[1][1]
track_assoc_report = parse_report[2][1]
if len(traj_assoc_report) > 0:
traj_to_tracklets = traj_assoc_report[:, 0, 4:]
traj_to_obs = traj_assoc_report[:, 1, 4:]
else:
traj_to_tracklets = np.array([100, 100, 0, 0, 0, 0])
traj_to_obs = np.array([100, 100, 0, 0, 0, 0])
if len(track_assoc_report) > 0:
old_obs_to_track = track_assoc_report[:, 0, 4:]
old_obs_to_new_obs = track_assoc_report[:, 1, 4:]
else:
old_obs_to_track = np.array([100, 100, 0, 0, 0, 0])
old_obs_to_new_obs = np.array([100, 100, 0, 0, 0, 0])
return traj_to_tracklets, traj_to_obs, old_obs_to_track, old_obs_to_new_obs
def get_inter_night_stat(parse_report):
return parse_report[3]
def get_inter_night_associations(parse_report):
traj_assoc_report = parse_report[1][1]
track_assoc_report = parse_report[2][1]
if len(traj_assoc_report) > 0:
traj_to_tracklets = traj_assoc_report[:, 0, :4]
traj_to_obs = traj_assoc_report[:, 1, :4]
else:
traj_to_tracklets = np.array([0, 0, 0, 0])
traj_to_obs = np.array([0, 0, 0, 0])
if len(track_assoc_report) > 0:
old_obs_to_track = track_assoc_report[:, 0, :4]
old_obs_to_new_obs = track_assoc_report[:, 1, :4]
else:
old_obs_to_track = np.array([0, 0, 0, 0])
old_obs_to_new_obs = np.array([0, 0, 0, 0])
return traj_to_tracklets, traj_to_obs, old_obs_to_track, old_obs_to_new_obs
def mean_metrics_over_nights(metrics):
# test = np.ones(np.shape(metrics), dtype=bool)
# idx = np.where(metrics[:, -1] == 0)
# test[idx] = np.zeros(6, dtype=bool)
return np.mean(metrics, axis=0)
def plot_metrics(fig, metrics, axes, title):
values_idx = np.arange(1, np.shape(metrics[:, :2])[0] + 1)
css_color = mcolors.CSS4_COLORS
axes[0].plot(
values_idx,
np.cumsum(metrics[:, 0]) / values_idx,
label="precision",
color=css_color["crimson"],
)
axes[0].plot(
values_idx,
np.cumsum(metrics[:, 1]) / values_idx,
label="recall",
color=css_color["chocolate"],
)
axes[0].set_title(title)
axes[1].plot(
values_idx,
np.cumsum(metrics[:, 2:-1], axis=0),
alpha=0.8,
label=["True Positif", "False Positif", "False Negatif"],
)
axes[1].plot(
values_idx, np.cumsum(metrics[:, -1]), label="total real association", alpha=0.7
)
axes[1].set_yscale("log")
colors = [
css_color["green"],
css_color["red"],
css_color["royalblue"],
css_color["black"],
]
for i, j in enumerate(axes[1].lines):
j.set_color(colors[i])
lines_labels = [ax.get_legend_handles_labels() for ax in axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
fig.legend(lines, labels, loc=(0.5, 0.45), framealpha=0.2)
def plot_intra_assoc(assoc, axes, title):
values_idx = np.arange(1, np.shape(assoc[:, :2])[0] + 1)
axes.plot(
values_idx,
assoc,
label=["separation assoc", "magnitude filter", "detected tracklets"],
)
axes.set_title(title)
axes.legend()
def plot_inter_assoc(assoc, ax, title):
values_idx = np.arange(1, np.shape(assoc[:, :2])[0] + 1)
assoc[:, 1] = assoc[:, 0] - assoc[:, 1]
assoc[:, 2] = assoc[:, 1] - assoc[:, 2]
assoc[:, 3] = np.cumsum(assoc[:, 2] - assoc[:, 3])
ax.plot(
values_idx,
assoc,
label=[
"separation assoc",
"magnitude filter",
"angle filter",
"remain after removing duplicates",
],
)
ax.set_yscale("log")
ax.set_title(title)
def plot_inter_stat(stat, axes, title):
values_idx = np.arange(1, np.shape(stat[:, :2])[0] + 1)
axes[0].plot(values_idx, np.cumsum(stat[:, 1]))
axes[0].set_title("cumulative elapsed time")
axes[1].plot(values_idx, stat[:, 0])
axes[1].set_title("cumulative number of trajectories")
axes[2].plot(
values_idx,
stat[:, 2:],
label=[
"number of most recent trajectories",
"number of old observations",
"number of new observations",
],
)
axes[2].set_title("inputs statistics")
axes[2].legend()
def plot_trajectories(traj_df, mpc_plot):
gb_traj = (
traj_df.groupby(["trajectory_id"])
.agg(
{
"ra": list,
"dec": list,
"dcmag": list,
"fid": list,
"nid": list,
"candid": lambda x: len(x),
}
)
.reset_index()
)
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(40, 40))
colors = cm.jet(np.linspace(0, 1, len(mpc_plot)))
for i, rows in mpc_plot.iterrows():
ra = rows["ra"]
dec = rows["dec"]
ax1.scatter(ra, dec, color=colors[i])
colors = cm.Set1(np.linspace(0, 1, len(gb_traj)))
for i, rows in gb_traj.iterrows():
ra = rows["ra"]
dec = rows["dec"]
ax2.scatter(ra, dec, color=colors[i])
ax1.set_title("real trajectories")
ax2.set_title("detected trajectories")
def load_performance_stat(only_intra_night=False):
all_path_report = sorted(glob.glob("report_db/*/*"))
all_inter_metrics = [[], [], [], []]
all_intra_metrics = []
all_inter_assoc = [[], [], [], []]
all_intra_assoc = []
all_inter_stat = []
with open(all_path_report[0], "r") as file:
intra_night_report = json.load(file)
intra_night_values = parse_intra_night_report(intra_night_report)
nb_traj = intra_night_report["nb trajectories"]
nb_most_recent_traj = intra_night_report["nb most recent traj"]
nb_old_obs = intra_night_report["nb old observations"]
nb_new_obs = intra_night_report["nb new observations"]
time = intra_night_report["computation time of the night"]
all_intra_metrics.append(intra_night_values[3:])
all_intra_assoc.append(intra_night_values[:3])
all_inter_stat.append(
np.array([nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs])
)
for current_path in all_path_report[1:]:
if only_intra_night:
with open(current_path, "r") as file:
intra_night_report = json.load(file)
intra_night_values = parse_intra_night_report(
intra_night_report["intra night report"]
)
nb_traj = intra_night_report["nb trajectories"]
nb_most_recent_traj = intra_night_report["nb most recent traj"]
nb_old_obs = intra_night_report["nb old observations"]
nb_new_obs = intra_night_report["nb new observations"]
time = intra_night_report["computation time of the night"]
all_intra_metrics.append(intra_night_values[3:])
all_intra_assoc.append(intra_night_values[:3])
all_inter_stat.append(
np.array(
[nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs]
)
)
continue
parse_report = open_and_parse_report(current_path)
inter_night_assoc = get_inter_night_associations(parse_report)
intra_night_assoc = get_intra_night_associations(parse_report)
all_intra_assoc.append(intra_night_assoc)
inter_night_metrics = get_inter_night_metrics(parse_report)
intra_night_metrics = get_intra_night_metrics(parse_report)
all_intra_metrics.append(intra_night_metrics)
inter_stat = get_inter_night_stat(parse_report)
all_inter_stat.append(inter_stat)
for i in range(4):
metrics_shape = np.shape(inter_night_metrics[i])
assoc_shape = np.shape(inter_night_assoc[i])
if assoc_shape[0] > 1 and len(assoc_shape) == 2:
mean_assoc = np.nan_to_num(
mean_metrics_over_nights(inter_night_assoc[i])
)
all_inter_assoc[i].append(mean_assoc.reshape((1, 4)))
else:
all_inter_assoc[i].append(inter_night_assoc[i].reshape((1, 4)))
if metrics_shape[0] > 1 and len(metrics_shape) == 2:
mean_metrics = np.nan_to_num(
mean_metrics_over_nights(inter_night_metrics[i])
)
all_inter_metrics[i].append(mean_metrics.reshape((1, 6)))
else:
all_inter_metrics[i].append(inter_night_metrics[i].reshape((1, 6)))
all_intra_assoc = np.stack(all_intra_assoc)
all_inter_assoc = [np.concatenate(i, axis=0) for i in all_inter_assoc if len(i) > 0]
all_intra_metrics = np.stack(all_intra_metrics)
all_inter_metrics = [
np.concatenate(i, axis=0) for i in all_inter_metrics if len(i) > 0
]
all_inter_stat = np.stack(all_inter_stat)
return (
all_intra_assoc,
all_inter_assoc,
all_intra_metrics,
all_inter_metrics,
all_inter_stat,
)
def plot_performance_test(
all_intra_assoc,
all_inter_assoc,
all_intra_metrics,
all_inter_metrics,
all_inter_stat,
):
fig1 = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=2)
ax2 = plt.subplot2grid((3, 3), (1, 0))
ax3 = plt.subplot2grid((3, 3), (2, 0))
ax4 = plt.subplot2grid((3, 3), (1, 1))
ax5 = plt.subplot2grid((3, 3), (2, 1))
ax6 = plt.subplot2grid((3, 3), (0, 2))
ax7 = plt.subplot2grid((3, 3), (1, 2))
ax8 = plt.subplot2grid((3, 3), (2, 2))
stat_axes = [ax1, ax6, ax8]
assoc_axes = [ax2, ax3, ax4, ax5]
plot_inter_stat(all_inter_stat, stat_axes, "inter night statistics")
plot_intra_assoc(all_intra_assoc, ax7, "intra night association")
fig2, axes = plt.subplots(5, 2)
metrics_axes = np.array(axes)
plot_metrics(fig2, all_intra_metrics, metrics_axes[0], "intra night metrics")
metrics_title = [
"trajectory to tracklets metrics",
"trajectory to new observations metrics",
"old observations to tracklets metrics",
"old observations to new observations metrics",
]
assoc_title = [
"trajectory to tracklets associations",
"trajectory to new observations associations",
"old observations to tracklets associations",
"old observations to new observations associations",
]
fig2.suptitle("Metrics")
if len(all_inter_assoc) > 0 and len(all_inter_metrics) > 0:
for i, met_ax, met_title, assoc_ax, title in zip(
range(4), metrics_axes[1:], metrics_title, assoc_axes, assoc_title
):
plot_metrics(fig2, all_inter_metrics[i], met_ax, met_title)
plot_inter_assoc(all_inter_assoc[i], assoc_ax, title)
lines_labels = [ax.get_legend_handles_labels() for ax in assoc_axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
fig1.legend(lines[:4], labels[:4], loc=(0.55, 0.53), framealpha=0.2)
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.setp",
"numpy.mean",
"matplotlib.pyplot.subplot2grid",
"json.dump",
"json.load",
"astropy.time.Time",
"numpy.stack",
"os.path.isdir",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.concatenate",
"matplotlib.pyplot.tight_layout",
"os.mkdir",
"numpy.cumsum",
"numpy.sh... | [((614, 637), 'astropy.time.Time', 'Time', (['date'], {'format': '"""jd"""'}), "(date, format='jd')\n", (618, 637), False, 'from astropy.time import Time\n'), ((885, 905), 'os.path.isdir', 'path.isdir', (['dir_path'], {}), '(dir_path)\n', (895, 905), False, 'from os import path\n'), ((6679, 6715), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(20, 20)'}), '(2, 1, figsize=(20, 20))\n', (6691, 6715), True, 'import matplotlib.pyplot as plt\n'), ((7814, 7879), 'numpy.concatenate', 'np.concatenate', (['[[traj_assoc_value], [track_assoc_value]]'], {'axis': '(0)'}), '([[traj_assoc_value], [track_assoc_value]], axis=0)\n', (7828, 7879), True, 'import numpy as np\n'), ((8209, 8254), 'matplotlib.pyplot.setp', 'plt.setp', (['mypie'], {'width': '(0.3)', 'edgecolor': '"""white"""'}), "(mypie, width=0.3, edgecolor='white')\n", (8217, 8254), True, 'import matplotlib.pyplot as plt\n'), ((8901, 8947), 'matplotlib.pyplot.setp', 'plt.setp', (['mypie2'], {'width': '(0.4)', 'edgecolor': '"""white"""'}), "(mypie2, width=0.4, edgecolor='white')\n", (8909, 8947), True, 'import matplotlib.pyplot as plt\n'), ((9489, 9499), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9497, 9499), True, 'import matplotlib.pyplot as plt\n'), ((11494, 11518), 'numpy.mean', 'np.mean', (['metrics'], {'axis': '(0)'}), '(metrics, axis=0)\n', (11501, 11518), True, 'import numpy as np\n'), ((13221, 13257), 'numpy.cumsum', 'np.cumsum', (['(assoc[:, 2] - assoc[:, 3])'], {}), '(assoc[:, 2] - assoc[:, 3])\n', (13230, 13257), True, 'import numpy as np\n'), ((14520, 14556), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(40, 40)'}), '(2, 1, figsize=(40, 40))\n', (14532, 14556), True, 'import matplotlib.pyplot as plt\n'), ((18496, 18521), 'numpy.stack', 'np.stack', (['all_intra_assoc'], {}), '(all_intra_assoc)\n', (18504, 18521), True, 'import numpy as np\n'), ((18636, 18663), 'numpy.stack', 'np.stack', (['all_intra_metrics'], {}), '(all_intra_metrics)\n', (18644, 18663), True, 'import numpy as np\n'), ((18793, 18817), 'numpy.stack', 'np.stack', (['all_inter_stat'], {}), '(all_inter_stat)\n', (18801, 18817), True, 'import numpy as np\n'), ((19117, 19129), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19127, 19129), True, 'import matplotlib.pyplot as plt\n'), ((19140, 19183), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(0, 0)'], {'colspan': '(2)'}), '((3, 3), (0, 0), colspan=2)\n', (19156, 19183), True, 'import matplotlib.pyplot as plt\n'), ((19194, 19226), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(1, 0)'], {}), '((3, 3), (1, 0))\n', (19210, 19226), True, 'import matplotlib.pyplot as plt\n'), ((19237, 19269), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(2, 0)'], {}), '((3, 3), (2, 0))\n', (19253, 19269), True, 'import matplotlib.pyplot as plt\n'), ((19280, 19312), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(1, 1)'], {}), '((3, 3), (1, 1))\n', (19296, 19312), True, 'import matplotlib.pyplot as plt\n'), ((19323, 19355), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(2, 1)'], {}), '((3, 3), (2, 1))\n', (19339, 19355), True, 'import matplotlib.pyplot as plt\n'), ((19366, 19398), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(0, 2)'], {}), '((3, 3), (0, 2))\n', (19382, 19398), True, 'import matplotlib.pyplot as plt\n'), ((19409, 19441), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(1, 2)'], {}), '((3, 3), (1, 2))\n', (19425, 19441), True, 'import matplotlib.pyplot as plt\n'), ((19452, 19484), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(2, 2)'], {}), '((3, 3), (2, 2))\n', (19468, 19484), True, 'import matplotlib.pyplot as plt\n'), ((19718, 19736), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(2)'], {}), '(5, 2)\n', (19730, 19736), True, 'import matplotlib.pyplot as plt\n'), ((19756, 19770), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (19764, 19770), True, 'import numpy as np\n'), ((20923, 20941), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20939, 20941), True, 'import matplotlib.pyplot as plt\n'), ((20946, 20956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20954, 20956), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1163), 'os.mkdir', 'mkdir', (['dir_path'], {}), '(dir_path)\n', (1153, 1163), False, 'from os import mkdir\n'), ((2104, 2182), 'numpy.array', 'np.array', (['[nb_sep_assoc, nb_mag_filter, nb_tracklets, pr, re, tp, fp, fn, tot]'], {}), '([nb_sep_assoc, nb_mag_filter, nb_tracklets, pr, re, tp, fp, fn, tot])\n', (2112, 2182), True, 'import numpy as np\n'), ((2230, 2271), 'numpy.array', 'np.array', (['[0, 0, 0, 100, 100, 0, 0, 0, 0]'], {}), '([0, 0, 0, 100, 100, 0, 0, 0, 0])\n', (2238, 2271), True, 'import numpy as np\n'), ((3361, 3461), 'numpy.array', 'np.array', (['[nb_sep_assoc, nb_mag_filter, nb_angle_filter, nb_duplicates, pr, re, tp,\n fp, fn, tot]'], {}), '([nb_sep_assoc, nb_mag_filter, nb_angle_filter, nb_duplicates, pr,\n re, tp, fp, fn, tot])\n', (3369, 3461), True, 'import numpy as np\n'), ((3680, 3724), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 100, 100, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 100, 100, 0, 0, 0, 0])\n', (3688, 3724), True, 'import numpy as np\n'), ((4472, 4498), 'numpy.array', 'np.array', (['all_assoc_report'], {}), '(all_assoc_report)\n', (4480, 4498), True, 'import numpy as np\n'), ((5249, 5275), 'numpy.array', 'np.array', (['all_assoc_report'], {}), '(all_assoc_report)\n', (5257, 5275), True, 'import numpy as np\n'), ((6166, 6236), 'numpy.array', 'np.array', (['[nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs]'], {}), '([nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs])\n', (6174, 6236), True, 'import numpy as np\n'), ((6342, 6357), 'json.load', 'json.load', (['file'], {}), '(file)\n', (6351, 6357), False, 'import json\n'), ((7237, 7313), 'numpy.array', 'np.array', (['[data[1], data[2], data[3], data[0] - data[1] - data[2] - data[3]]'], {}), '([data[1], data[2], data[3], data[0] - data[1] - data[2] - data[3]])\n', (7245, 7313), True, 'import numpy as np\n'), ((7544, 7566), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (7552, 7566), True, 'import numpy as np\n'), ((7779, 7801), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (7787, 7801), True, 'import numpy as np\n'), ((9590, 9611), 'numpy.array', 'np.array', (['intra_night'], {}), '(intra_night)\n', (9598, 9611), True, 'import numpy as np\n'), ((9711, 9732), 'numpy.array', 'np.array', (['intra_night'], {}), '(intra_night)\n', (9719, 9732), True, 'import numpy as np\n'), ((10051, 10083), 'numpy.array', 'np.array', (['[100, 100, 0, 0, 0, 0]'], {}), '([100, 100, 0, 0, 0, 0])\n', (10059, 10083), True, 'import numpy as np\n'), ((10106, 10138), 'numpy.array', 'np.array', (['[100, 100, 0, 0, 0, 0]'], {}), '([100, 100, 0, 0, 0, 0])\n', (10114, 10138), True, 'import numpy as np\n'), ((10328, 10360), 'numpy.array', 'np.array', (['[100, 100, 0, 0, 0, 0]'], {}), '([100, 100, 0, 0, 0, 0])\n', (10336, 10360), True, 'import numpy as np\n'), ((10390, 10422), 'numpy.array', 'np.array', (['[100, 100, 0, 0, 0, 0]'], {}), '([100, 100, 0, 0, 0, 0])\n', (10398, 10422), True, 'import numpy as np\n'), ((10892, 10914), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (10900, 10914), True, 'import numpy as np\n'), ((10938, 10960), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (10946, 10960), True, 'import numpy as np\n'), ((11150, 11172), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (11158, 11172), True, 'import numpy as np\n'), ((11202, 11224), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (11210, 11224), True, 'import numpy as np\n'), ((12048, 12083), 'numpy.cumsum', 'np.cumsum', (['metrics[:, 2:-1]'], {'axis': '(0)'}), '(metrics[:, 2:-1], axis=0)\n', (12057, 12083), True, 'import numpy as np\n'), ((12215, 12240), 'numpy.cumsum', 'np.cumsum', (['metrics[:, -1]'], {}), '(metrics[:, -1])\n', (12224, 12240), True, 'import numpy as np\n'), ((13661, 13682), 'numpy.cumsum', 'np.cumsum', (['stat[:, 1]'], {}), '(stat[:, 1])\n', (13670, 13682), True, 'import numpy as np\n'), ((15105, 15131), 'glob.glob', 'glob.glob', (['"""report_db/*/*"""'], {}), "('report_db/*/*')\n", (15114, 15131), False, 'import glob\n'), ((15370, 15385), 'json.load', 'json.load', (['file'], {}), '(file)\n', (15379, 15385), False, 'import json\n'), ((18545, 18570), 'numpy.concatenate', 'np.concatenate', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (18559, 18570), True, 'import numpy as np\n'), ((18698, 18723), 'numpy.concatenate', 'np.concatenate', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (18712, 18723), True, 'import numpy as np\n'), ((1093, 1129), 'json.dump', 'json.dump', (['report', 'outfile'], {'indent': '(4)'}), '(report, outfile, indent=4)\n', (1102, 1129), False, 'import json\n'), ((1224, 1260), 'json.dump', 'json.dump', (['report', 'outfile'], {'indent': '(4)'}), '(report, outfile, indent=4)\n', (1233, 1260), False, 'import json\n'), ((5678, 5690), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5686, 5690), True, 'import numpy as np\n'), ((11712, 11736), 'numpy.cumsum', 'np.cumsum', (['metrics[:, 0]'], {}), '(metrics[:, 0])\n', (11721, 11736), True, 'import numpy as np\n'), ((11866, 11890), 'numpy.cumsum', 'np.cumsum', (['metrics[:, 1]'], {}), '(metrics[:, 1])\n', (11875, 11890), True, 'import numpy as np\n'), ((15938, 16008), 'numpy.array', 'np.array', (['[nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs]'], {}), '([nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs])\n', (15946, 16008), True, 'import numpy as np\n'), ((17649, 17681), 'numpy.shape', 'np.shape', (['inter_night_metrics[i]'], {}), '(inter_night_metrics[i])\n', (17657, 17681), True, 'import numpy as np\n'), ((17708, 17738), 'numpy.shape', 'np.shape', (['inter_night_assoc[i]'], {}), '(inter_night_assoc[i])\n', (17716, 17738), True, 'import numpy as np\n'), ((11596, 11620), 'numpy.shape', 'np.shape', (['metrics[:, :2]'], {}), '(metrics[:, :2])\n', (11604, 11620), True, 'import numpy as np\n'), ((12801, 12823), 'numpy.shape', 'np.shape', (['assoc[:, :2]'], {}), '(assoc[:, :2])\n', (12809, 12823), True, 'import numpy as np\n'), ((13083, 13105), 'numpy.shape', 'np.shape', (['assoc[:, :2]'], {}), '(assoc[:, :2])\n', (13091, 13105), True, 'import numpy as np\n'), ((13601, 13622), 'numpy.shape', 'np.shape', (['stat[:, :2]'], {}), '(stat[:, :2])\n', (13609, 13622), True, 'import numpy as np\n'), ((16182, 16197), 'json.load', 'json.load', (['file'], {}), '(file)\n', (16191, 16197), False, 'import json\n'), ((16890, 16960), 'numpy.array', 'np.array', (['[nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs]'], {}), '([nb_traj, time, nb_most_recent_traj, nb_old_obs, nb_new_obs])\n', (16898, 16960), True, 'import numpy as np\n')] |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import math
import numpy as np
import python_speech_features as psf
import resampy as rs
import scipy.io.wavfile as wave
def get_speech_features_from_file(filename, num_features, pad_to=8,
features_type='spectrogram',
window_size=20e-3,
window_stride=10e-3,
augmentation=None):
"""Function to convert audio file to numpy array of features.
Args:
filename (string): WAVE filename.
num_features (int): number of speech features in frequency domain.
features_type (string): 'mfcc' or 'spectrogram'.
window_size (float): size of analysis window in milli-seconds.
window_stride (float): stride of analysis window in milli-seconds.
augmentation (dict, optional): None or dictionary of augmentation parameters.
If not None, has to have 'time_stretch_ratio',
'noise_level_min', 'noise_level_max' fields, e.g.::
augmentation={
'time_stretch_ratio': 0.2,
'noise_level_min': -90,
'noise_level_max': -46,
}
Returns:
np.array: np.array of audio features with shape=[num_time_steps, num_features].
"""
# load audio signal
fs, signal = wave.read(filename)
return get_speech_features(
signal, fs, num_features, pad_to, features_type,
window_size, window_stride, augmentation,
)
def normalize_signal(signal):
"""
Normalize float32 signal to [-1, 1] range
"""
return signal / np.max(np.abs(signal))
def augment_audio_signal(signal, fs, augmentation):
"""Function that performs audio signal augmentation.
Args:
signal (np.array): np.array containing raw audio signal.
fs (float): frames per second.
augmentation (dict): dictionary of augmentation parameters. See
:func:`get_speech_features_from_file` for specification and example.
Returns:
np.array: np.array with augmented audio signal.
"""
signal_float = normalize_signal(signal.astype(np.float32))
if augmentation['time_stretch_ratio'] > 0:
# time stretch (might be slow)
stretch_amount = 1.0 + (2.0 * np.random.rand() - 1.0) * \
augmentation['time_stretch_ratio']
signal_float = rs.resample(
signal_float,
fs,
int(fs * stretch_amount),
filter='kaiser_fast',
)
# noise
noise_level_db = np.random.randint(low=augmentation['noise_level_min'],
high=augmentation['noise_level_max'])
signal_float += np.random.randn(signal_float.shape[0]) * \
10.0 ** (noise_level_db / 20.0)
return (normalize_signal(signal_float) * 32767.0).astype(np.int16)
def get_speech_features(signal, fs, num_features, pad_to=8,
features_type='spectrogram',
window_size=20e-3,
window_stride=10e-3,
augmentation=None):
"""Function to convert raw audio signal to numpy array of features.
Args:
signal (np.array): np.array containing raw audio signal.
fs (float): frames per second.
num_features (int): number of speech features in frequency domain.
pad_to (int): if specified, the length will be padded to become divisible
by ``pad_to`` parameter.
features_type (string): 'mfcc' or 'spectrogram'.
window_size (float): size of analysis window in milli-seconds.
window_stride (float): stride of analysis window in milli-seconds.
augmentation (dict, optional): dictionary of augmentation parameters. See
:func:`get_speech_features_from_file` for specification and example.
Returns:
np.array: np.array of audio features with shape=[num_time_steps, num_features].
audio_duration (float): duration of the signal in seconds
"""
if augmentation is not None:
if 'time_stretch_ratio' not in augmentation:
raise ValueError('time_stretch_ratio has to be included in augmentation '
'when augmentation it is not None')
if 'noise_level_min' not in augmentation:
raise ValueError('noise_level_min has to be included in augmentation '
'when augmentation it is not None')
if 'noise_level_max' not in augmentation:
raise ValueError('noise_level_max has to be included in augmentation '
'when augmentation it is not None')
signal = augment_audio_signal(signal, fs, augmentation)
else:
signal = (normalize_signal(signal.astype(np.float32)) * 32767.0).astype(np.int16)
audio_duration = len(signal) * 1.0/fs
n_window_size = int(fs * window_size)
n_window_stride = int(fs * window_stride)
# making sure length of the audio is divisible by 8 (fp16 optimization)
length = 1 + int(math.ceil(
(1.0 * signal.shape[0] - n_window_size) / n_window_stride
))
if pad_to > 0:
if length % pad_to != 0:
pad_size = (pad_to - length % pad_to) * n_window_stride
signal = np.pad(signal, (0, pad_size), mode='constant')
if features_type == 'spectrogram':
frames = psf.sigproc.framesig(sig=signal,
frame_len=n_window_size,
frame_step=n_window_stride,
winfunc=np.hanning)
# features = np.log1p(psf.sigproc.powspec(frames, NFFT=N_window_size))
features = psf.sigproc.logpowspec(frames, NFFT=n_window_size)
assert num_features <= n_window_size // 2 + 1, \
"num_features for spectrogram should be <= (fs * window_size // 2 + 1)"
# cut high frequency part
features = features[:, :num_features]
elif features_type == 'mfcc':
features = psf.mfcc(signal=signal,
samplerate=fs,
winlen=window_size,
winstep=window_stride,
numcep=num_features,
nfilt=2*num_features,
nfft=512,
lowfreq=0, highfreq=None,
preemph=0.97,
ceplifter=2*num_features,
appendEnergy=False)
elif features_type == 'logfbank':
features = psf.logfbank(signal=signal,
samplerate=fs,
winlen=window_size,
winstep=window_stride,
nfilt=num_features,
nfft=512,
lowfreq=0, highfreq=fs/2,
preemph=0.97)
else:
raise ValueError('Unknown features type: {}'.format(features_type))
if pad_to > 0:
assert features.shape[0] % pad_to == 0
m = np.mean(features)
s = np.std(features)
features = (features - m) / s
return features, audio_duration
| [
"numpy.mean",
"numpy.abs",
"math.ceil",
"numpy.random.rand",
"python_speech_features.sigproc.framesig",
"python_speech_features.logfbank",
"python_speech_features.mfcc",
"numpy.random.randint",
"python_speech_features.sigproc.logpowspec",
"scipy.io.wavfile.read",
"numpy.std",
"numpy.pad",
"n... | [((1419, 1438), 'scipy.io.wavfile.read', 'wave.read', (['filename'], {}), '(filename)\n', (1428, 1438), True, 'import scipy.io.wavfile as wave\n'), ((2559, 2656), 'numpy.random.randint', 'np.random.randint', ([], {'low': "augmentation['noise_level_min']", 'high': "augmentation['noise_level_max']"}), "(low=augmentation['noise_level_min'], high=augmentation[\n 'noise_level_max'])\n", (2576, 2656), True, 'import numpy as np\n'), ((6859, 6876), 'numpy.mean', 'np.mean', (['features'], {}), '(features)\n', (6866, 6876), True, 'import numpy as np\n'), ((6883, 6899), 'numpy.std', 'np.std', (['features'], {}), '(features)\n', (6889, 6899), True, 'import numpy as np\n'), ((2707, 2745), 'numpy.random.randn', 'np.random.randn', (['signal_float.shape[0]'], {}), '(signal_float.shape[0])\n', (2722, 2745), True, 'import numpy as np\n'), ((5242, 5352), 'python_speech_features.sigproc.framesig', 'psf.sigproc.framesig', ([], {'sig': 'signal', 'frame_len': 'n_window_size', 'frame_step': 'n_window_stride', 'winfunc': 'np.hanning'}), '(sig=signal, frame_len=n_window_size, frame_step=\n n_window_stride, winfunc=np.hanning)\n', (5262, 5352), True, 'import python_speech_features as psf\n'), ((5541, 5591), 'python_speech_features.sigproc.logpowspec', 'psf.sigproc.logpowspec', (['frames'], {'NFFT': 'n_window_size'}), '(frames, NFFT=n_window_size)\n', (5563, 5591), True, 'import python_speech_features as psf\n'), ((1689, 1703), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (1695, 1703), True, 'import numpy as np\n'), ((4941, 5009), 'math.ceil', 'math.ceil', (['((1.0 * signal.shape[0] - n_window_size) / n_window_stride)'], {}), '((1.0 * signal.shape[0] - n_window_size) / n_window_stride)\n', (4950, 5009), False, 'import math\n'), ((5144, 5190), 'numpy.pad', 'np.pad', (['signal', '(0, pad_size)'], {'mode': '"""constant"""'}), "(signal, (0, pad_size), mode='constant')\n", (5150, 5190), True, 'import numpy as np\n'), ((5846, 6083), 'python_speech_features.mfcc', 'psf.mfcc', ([], {'signal': 'signal', 'samplerate': 'fs', 'winlen': 'window_size', 'winstep': 'window_stride', 'numcep': 'num_features', 'nfilt': '(2 * num_features)', 'nfft': '(512)', 'lowfreq': '(0)', 'highfreq': 'None', 'preemph': '(0.97)', 'ceplifter': '(2 * num_features)', 'appendEnergy': '(False)'}), '(signal=signal, samplerate=fs, winlen=window_size, winstep=\n window_stride, numcep=num_features, nfilt=2 * num_features, nfft=512,\n lowfreq=0, highfreq=None, preemph=0.97, ceplifter=2 * num_features,\n appendEnergy=False)\n', (5854, 6083), True, 'import python_speech_features as psf\n'), ((6359, 6525), 'python_speech_features.logfbank', 'psf.logfbank', ([], {'signal': 'signal', 'samplerate': 'fs', 'winlen': 'window_size', 'winstep': 'window_stride', 'nfilt': 'num_features', 'nfft': '(512)', 'lowfreq': '(0)', 'highfreq': '(fs / 2)', 'preemph': '(0.97)'}), '(signal=signal, samplerate=fs, winlen=window_size, winstep=\n window_stride, nfilt=num_features, nfft=512, lowfreq=0, highfreq=fs / 2,\n preemph=0.97)\n', (6371, 6525), True, 'import python_speech_features as psf\n'), ((2309, 2325), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2323, 2325), True, 'import numpy as np\n')] |
'''
Design a delivery algorithm where you want to carry as many packages as possible under a weight limit.
Assumptions:
- Weight is a positive integer
- Each item has a price and a weight
- Total weight limit is a positive amount
- Want to maximize quantity of packages to fit, not necessarily the heaviest items
- Cannot have fractions, only include or exclude items
'''
import numpy as np
class Package():
def __init__(self, weight, price):
self.weight = weight
self.price = price
def maxPackages(prices, weights, target_weight):
w_sum = 0
packages = []
for i,w in sorted(enumerate(weights), key=lambda x: x[1]):
if w_sum + w > target_weight:
break
w_sum += w
packages.append((i,w))
return packages
packages = [Package(i,i) for i in np.random.choice(10, 10, replace=True)]
print([p.weight for p in packages])
print("use packages:")
for i,w in maxPackages([p.price for p in packages], [p.weight for p in packages], 10):
print(f" package {i} with weight {w}")
'''
Time complexity is O(n)
Space complexity is O(n)
See also:
https://youtu.be/8LusJS5-AGo?t=928
https://www.youtube.com/watch?v=YRBON9sIZ2Y
https://leetcode.com/discuss/interview-question/535706/maximum-quantity-of-items-dp-question/471080
''' | [
"numpy.random.choice"
] | [((801, 839), 'numpy.random.choice', 'np.random.choice', (['(10)', '(10)'], {'replace': '(True)'}), '(10, 10, replace=True)\n', (817, 839), True, 'import numpy as np\n')] |
"""
Expansion and contraction of resource allocation in sensory bottlenecks.
<NAME>., <NAME>., <NAME>.
Written in 2021 by <NAME>.
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication
along with this software.
If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
# Plotting functions for sensory bottlenecks
import matplotlib.pyplot as plt
import numpy as np
def allocation_plot(allo_x, allo_y, dens_ratio, act_ratio, plot_type='1D'):
"""
Plots allocation over all bottleneck sizes. Includes limit and proportional
density lines.
Parameters
----------
allo_x : ndarray
X values for plotting allocation
allo_y : ndarray
Allocation for region 1
dens_ratio : int
ratio for the density
act_ratio : int
ratio for the activation
plot_type : str, optional
Whether data for 1D or 2D sim. The default is '1D'.
Returns
-------
None.
"""
plt.figure()
if plot_type == '1D':
v_line = 1/(1+np.sqrt(dens_ratio*act_ratio))*100
plt.plot([0,100],[v_line,v_line],linestyle='--',color='#d1d1d1',Label = r'$\frac{1}{1 + \sqrt{ad}}$')
elif plot_type == '2D':
v_line = 1/(1+np.sqrt(dens_ratio)*act_ratio)*100
plt.plot([0,100],[v_line,v_line],linestyle='--',color='#d1d1d1',Label = r'$\frac{1}{1 + a \sqrt{d}}$')
# calculate density line and add
d_line = 100/(dens_ratio+1)
plt.plot([0,100],[d_line,d_line],linestyle='--',color='#9c2c2c',label='Proportional density')
# plot allocation line data
plt.plot(allo_x, allo_y)
# add title, axis and legend
plt.xticks(np.linspace(0,100,11))
plt.xlim([0,100])
plt.ylim([0,100])
plt.ylabel('Allocation [%]')
plt.xlabel('Bottleneck width [%]')
plt.legend()
plt.title('Allocation for density:{}, activation:{}'.format(dens_ratio, act_ratio))
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend"
] | [((1221, 1233), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1231, 1233), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1819), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 100]', '[d_line, d_line]'], {'linestyle': '"""--"""', 'color': '"""#9c2c2c"""', 'label': '"""Proportional density"""'}), "([0, 100], [d_line, d_line], linestyle='--', color='#9c2c2c', label\n ='Proportional density')\n", (1723, 1819), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1878), 'matplotlib.pyplot.plot', 'plt.plot', (['allo_x', 'allo_y'], {}), '(allo_x, allo_y)\n', (1862, 1878), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1981), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 100]'], {}), '([0, 100])\n', (1971, 1981), True, 'import matplotlib.pyplot as plt\n'), ((1985, 2003), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 100]'], {}), '([0, 100])\n', (1993, 2003), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2035), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Allocation [%]"""'], {}), "('Allocation [%]')\n", (2017, 2035), True, 'import matplotlib.pyplot as plt\n'), ((2040, 2074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bottleneck width [%]"""'], {}), "('Bottleneck width [%]')\n", (2050, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2091), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2089, 2091), True, 'import matplotlib.pyplot as plt\n'), ((1326, 1437), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 100]', '[v_line, v_line]'], {'linestyle': '"""--"""', 'color': '"""#d1d1d1"""', 'Label': '"""$\\\\frac{1}{1 + \\\\sqrt{ad}}$"""'}), "([0, 100], [v_line, v_line], linestyle='--', color='#d1d1d1', Label\n ='$\\\\frac{1}{1 + \\\\sqrt{ad}}$')\n", (1334, 1437), True, 'import matplotlib.pyplot as plt\n'), ((1936, 1959), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(11)'], {}), '(0, 100, 11)\n', (1947, 1959), True, 'import numpy as np\n'), ((1530, 1642), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 100]', '[v_line, v_line]'], {'linestyle': '"""--"""', 'color': '"""#d1d1d1"""', 'Label': '"""$\\\\frac{1}{1 + a \\\\sqrt{d}}$"""'}), "([0, 100], [v_line, v_line], linestyle='--', color='#d1d1d1', Label\n ='$\\\\frac{1}{1 + a \\\\sqrt{d}}$')\n", (1538, 1642), True, 'import matplotlib.pyplot as plt\n'), ((1283, 1314), 'numpy.sqrt', 'np.sqrt', (['(dens_ratio * act_ratio)'], {}), '(dens_ratio * act_ratio)\n', (1290, 1314), True, 'import numpy as np\n'), ((1487, 1506), 'numpy.sqrt', 'np.sqrt', (['dens_ratio'], {}), '(dens_ratio)\n', (1494, 1506), True, 'import numpy as np\n')] |
import numpy as np
import math
def grid_reading(key, table):
_key = sorted(key)
order = [key.index(i) for i in _key]
print(list(key))
#print(_key)
print('Порядок использования столбцов:', order)
m = table.shape[0]
res = ''
for j in order:
for i in range(m):
res += table[i][j]
return res
ru = [chr(i) for i in range(ord('а'), ord('я')+1)]
print('ru: ',ru)
print('####################################################################')
# 1. Маршрутное шифрование
print('# 1.')
def task1_rout(text, key):
print('Текст: ', text)
text = text.replace(' ','').lower()
print('Ключ:', key)
key = key.lower()
n = len(key)
t_len = len(text)
m = math.ceil(t_len/n)
A = np.full((m,n),'')
print('Длина текста:', t_len)
print('n = ', n, '\nm = ', m)
for k in range(n*m - t_len):
text += text[-1]
#print(text)
k = 0
for i in range(m):
for j in range(n):
A[i][j] = text[k]
k += 1
print(A)
res = grid_reading(key, A)
print('Криптограмма: ', res)
text = 'Нельзя недооценивать противника'
key = 'пароль'
task1_rout(text, key)
print('----------------------------------------------------')
text = 'Live long and prosper'
key = 'Spock'
task1_rout(text, key)
print('####################################################################')
# 2. Шифрование с помощью решеток
def rotate_90r(A):
x = A.shape[0]
y = A.shape[1]
res = np.empty((y,x))
for i in range(x):
for j in range(y):
res[j, x-1-i] = A[i,j]
return res
def generate_key(lenth):
key = ''
while (len(key) != lenth):
_key = np.random.randint(len(ru))
if (key.count(ru[_key]) == 0):
key += ru[_key]
return key
print('# 2.')
def task2_grid(text):
print('Текст: ', text)
text = text.replace(' ','').lower()
t_len = len(text)
print('Длина текста:', t_len)
size = math.ceil(np.sqrt(t_len)) # количество чисел в маленькой таблице
while size % np.sqrt(size) != 0:
size += 1
for k in range(size*size - t_len):
text += text[-1]
t_s = int(np.sqrt(size)) # размер малькой таблицы
t_s2 = t_s * 2 # размер большой таблицы
t_el = np.arange(1,size+1) # массив значений size
key = generate_key(t_s2)
#key = 'шифр'
print('Ключ:', key)
key = key.lower()
A0 = np.empty((t_s,t_s))
n = 0
for i in range(t_s):
for j in range(t_s):
A0[i][j] = t_el[n]
n += 1
A1 = np.concatenate((A0,rotate_90r(A0)),axis=1)
A2 = np.concatenate((A1,rotate_90r(rotate_90r(A1))),axis=0)
#print(A0)
#print(A1)
print('Квадрат цифр:\n', A2)
R = np.zeros((t_s2,t_s2))
tmp = t_el.copy()
for k in range (size):
r = np.random.randint(4)
tmp_count = 0
for i in range (t_s2):
for j in range (t_s2):
if (A2[i][j] == k + 1):
if(tmp_count == r):
R[i][j] = A2[i][j]
tmp_count = -100
tmp = tmp[tmp != k+1]
else:
tmp_count += 1
print('Сгенерированная схема:\n', R)
n = 0
answer = np.full((t_s2, t_s2),'')
for k in range(4):
for i in range(t_s2):
for j in range(t_s2):
if(R[i][j] != 0):
answer[i][j] = text[n]
n += 1
#print(k)
#print(R)
#print(answer)
R = rotate_90r(R)
print('Схема из букв:\n', answer)
res = grid_reading(key, answer)
print('Криптограмма: ', res)
text = 'договор подписали'
task2_grid(text)
print('----------------------------------------------------')
text = 'за что мне все эти страдания'
task2_grid(text)
print('####################################################################')
letters = {ru[i]:i for i in range(len(ru))}
print('Словарь букв ru: ', letters)
# 3. Таблица Виженера
print('# 3.')
vigenere_table = np.array(ru)
for i in range(1, len(ru)):
row = np.roll(ru, -i)
vigenere_table = np.vstack((vigenere_table, row))
print('Таблица Виженера: \n',vigenere_table)
def task3_vigenere(text, key):
print('Текст: ', text)
text = text.replace(' ','').lower()
t_len = len(text)
print('Длина текста:', t_len)
print('Ключ:', key)
key = key.lower()
n = len(key)
_key = key
while len(_key) < t_len:
_key += _key[len(_key) - n]
print('-----')
print(text)
print(_key)
print('-----')
res = ''
for i in range(t_len):
x = letters[_key[i]] # номера букв ключа
y = letters[text[i]] # номера букв текста
res += vigenere_table[x][y]
print('Криптограмма: ', res)
text = 'криптография серьезная наука'
key = 'математика'
task3_vigenere(text, key)
print('----------------------------------------------------')
text = 'ты не пройдешь'
key = 'Гендальф'
task3_vigenere(text, key) | [
"math.ceil",
"numpy.roll",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.empty",
"numpy.vstack",
"numpy.full",
"numpy.arange"
] | [((4187, 4199), 'numpy.array', 'np.array', (['ru'], {}), '(ru)\n', (4195, 4199), True, 'import numpy as np\n'), ((732, 752), 'math.ceil', 'math.ceil', (['(t_len / n)'], {}), '(t_len / n)\n', (741, 752), False, 'import math\n'), ((759, 778), 'numpy.full', 'np.full', (['(m, n)', '""""""'], {}), "((m, n), '')\n", (766, 778), True, 'import numpy as np\n'), ((1521, 1537), 'numpy.empty', 'np.empty', (['(y, x)'], {}), '((y, x))\n', (1529, 1537), True, 'import numpy as np\n'), ((2338, 2360), 'numpy.arange', 'np.arange', (['(1)', '(size + 1)'], {}), '(1, size + 1)\n', (2347, 2360), True, 'import numpy as np\n'), ((2506, 2526), 'numpy.empty', 'np.empty', (['(t_s, t_s)'], {}), '((t_s, t_s))\n', (2514, 2526), True, 'import numpy as np\n'), ((2845, 2867), 'numpy.zeros', 'np.zeros', (['(t_s2, t_s2)'], {}), '((t_s2, t_s2))\n', (2853, 2867), True, 'import numpy as np\n'), ((3381, 3406), 'numpy.full', 'np.full', (['(t_s2, t_s2)', '""""""'], {}), "((t_s2, t_s2), '')\n", (3388, 3406), True, 'import numpy as np\n'), ((4238, 4253), 'numpy.roll', 'np.roll', (['ru', '(-i)'], {}), '(ru, -i)\n', (4245, 4253), True, 'import numpy as np\n'), ((4275, 4307), 'numpy.vstack', 'np.vstack', (['(vigenere_table, row)'], {}), '((vigenere_table, row))\n', (4284, 4307), True, 'import numpy as np\n'), ((2034, 2048), 'numpy.sqrt', 'np.sqrt', (['t_len'], {}), '(t_len)\n', (2041, 2048), True, 'import numpy as np\n'), ((2235, 2248), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (2242, 2248), True, 'import numpy as np\n'), ((2928, 2948), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (2945, 2948), True, 'import numpy as np\n'), ((2109, 2122), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (2116, 2122), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import os
import sys
from time import sleep
import pandas as pd
# running on Mac for testing
if 'darwin' in sys.platform:
from fake_rpi.RPi import GPIO as GPIO
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import animation
import matplotlib
matplotlib.use("TkAgg")
# running on raspberry pi
elif 'linux' in sys.platform:
import RPi.GPIO as GPIO
def my_parser():
parser = argparse.ArgumentParser(description='Optional description')
if 'darwin' in sys.platform:
vis_type = 'plot'
elif 'linux' in sys.platform:
vis_type = 'cube'
parser.add_argument('--fname', type=str, help='path to visualization file', default='sequence')
parser.add_argument('--matrix', type=np.array, help='numpy array with matrix', default=np.zeros(0))
parser.add_argument('--pin_delay', type=np.float64, help='delay between pin outputs', default=0.0002)
parser.add_argument('--time_step', type=np.float64, help='time between volumes in s', default=0.5)
parser.add_argument('--cube_size', type=np.int, help='size of cube', default=3)
parser.add_argument('--vis_type', type=str, help='cube, plot, plot_binary', default=vis_type)
parser.add_argument('--n_steps_pin', type=np.int32, help='length of binary vetor', default=6)
return parser
defaults = pd.DataFrame(columns=['bt_no', 'vis_name', 'time_step'])
defaults.loc[len(defaults)] = ['1', 'sequence', 0.1]
defaults.loc[len(defaults)] = ['2', 'rain', 0.5]
defaults.loc[len(defaults)] = ['3', 'sphere', 0.5]
defaults.loc[len(defaults)] = ['4', 'fillup', 0.5]
defaults.loc[len(defaults)] = ['5', 'intensity', 0.5]
defaults.loc[len(defaults)] = ['6', 'wave', 0.5]
def args_to_cmd(args):
cmd_str = ""
for arg in vars(args):
cmd_str = cmd_str + f' --{arg} {getattr(args, arg)}'
return cmd_str
# load 4d matrix from file
def load_matrix(fname):
dd = os.path.join(os.path.dirname(__file__), 'sequences')
# dd = '/home/pi/myCode/cubeventure/sequences'
fpath = os.path.join(dd, fname) + '.npy'
data_in = np.load(fpath)
return data_in
def save_matrix(matrix, fname):
dd = os.path.join(os.path.dirname(__file__), 'sequences')
if not os.path.exists(dd):
os.makedirs(dd)
fname = os.path.join(dd, fname)
if np.all((matrix == 1) | (matrix == 0)):
matrix = matrix.astype(np.int32)
np.save(fname, matrix)
def grid_array(i_grid):
if i_grid == 3:
col_pins = np.array([[13, 6, 4],
[22, 16, 27],
[5, 17, 26]])
# top - middle - top
ly_pins = [24, 23, 25]
elif i_grid == 7:
# TODO: check with Liam
col_pins = np.array([[47, 39, 25, 26, 20, 22, 9],
[46, 38, 27, 28, 21, 23, 10],
[30, 29, 17, 18, 7, 11, 12],
[34, 36, 37, 42, 24, 15, 14],
[43, 44, 35, 0, 41, 16, 13],
[40, 32, 45, 19, 5, 8, 3],
[48, 33, 31, 6, 4, 2, 1]])
ly_pins = [0, 2, 5, 3, 1, 4, 6]
return col_pins, ly_pins
def intensity_to_binary(matrix, n_steps_pin):
i_grid = matrix.shape[0]
if matrix.ndim == 3:
matrix = np.expand_dims(matrix, axis=4)
t_steps = matrix.shape[3]
expanded = np.zeros((i_grid, i_grid, i_grid, n_steps_pin*t_steps))
matrix = np.log10(matrix + 1)
matrix = matrix / np.max(matrix)
for i_t in np.arange(0, t_steps):
vol = matrix[:, :, :, i_t]
ind_start = i_t * n_steps_pin
ind_stop = i_t * n_steps_pin + n_steps_pin
# loop over z, x, y
for iz in np.arange(0, i_grid):
for ix in np.arange(0, i_grid):
for iy in np.arange(0, i_grid):
intensity = vol[ix, iy, iz]
# convert intensity to binary vector
vec = np.zeros(n_steps_pin)
if intensity != 0:
step = np.round(1 / intensity).astype(np.int32)
vec[0::step] = 1
expanded[ix, iy, iz, ind_start:ind_stop] = vec
return expanded
class Visualization:
def __init__(self, args=my_parser().parse_known_args()[0], **kwargs):
self.args = args
self.ani_running = False
if self.args.matrix.size < 2:
self.matrix = load_matrix(self.args.fname)
# use matrix directly
else:
self.matrix = self.args.matrix
self.i_grid = self.matrix.shape[0]
def start_stop(self):
print('no start-stop possible')
def close_animation(self):
print('nothing to close')
class CubeRun(Visualization):
def __init__(self, args, root, **kwargs):
super(CubeRun, self).__init__(args)
self.n_reps = np.ceil(self.args.time_step / (self.i_grid * self.args.pin_delay)).astype(np.int)
self.root = root
self.col_pins, self.ly_pins = grid_array(self.i_grid)
self.i_v = 0
try:
self.setup_columns()
#self.single_pin(col=5, layer=23)
except:
self.close_animation()
def close_animation(self):
self.ani_running = False
GPIO.cleanup()
print('closed')
def start_stop(self):
if self.ani_running:
self.ani_running = False
else:
self.ani_running = True
# initialisation of the pins
def setup_columns(self):
# GPIO pin addressing will use the virtual number
GPIO.setmode(GPIO.BCM)
# Initialise the pins so we can output values to them
GPIO.setup(self.col_pins.reshape(-1).tolist(), GPIO.OUT)
GPIO.setup(self.ly_pins, GPIO.OUT)
GPIO.output(self.col_pins.reshape(-1).tolist(), False)
GPIO.output(self.ly_pins, False)
def update_time_step(self, time_step):
setattr(self.args, 'time_step', time_step)
self.n_reps = np.ceil(self.args.time_step / (self.args.n_steps_pin * self.i_grid * self.args.pin_delay)).astype(np.int)
# function to turn on a single pin
def single_pin(self, col=13, layer=23, t_light=5):
GPIO.output(col, True)
GPIO.output(layer, True)
sleep(t_light)
GPIO.output(col, False)
GPIO.output(layer, False)
def run_animation(self):
self.ani_running = True
if self.matrix.dtype != np.int32:
print('convert intensities')
# repetition of cycles through layers
self.n_reps = np.ceil(self.args.time_step / (self.args.n_steps_pin * self.i_grid * self.args.pin_delay)).astype(np.int)
self.run_expanded_vols()
else:
self.run_normal_vols()
def run_expanded_vols(self):
# loop over real volumes
while self.i_v < self.matrix.shape[3]:
if self.ani_running:
vol = self.matrix[:, :, :, self.i_v]
# convert intensities to binary matrix
vol_exp = intensity_to_binary(vol, self.args.n_steps_pin)
# loop over repetitions
for _ in np.arange(0, self.n_reps):
# loop over expanded volumes
for i_s in np.arange(0, self.args.n_steps_pin):
vol_s = vol_exp[:, :, :, i_s]
self.show_vol(vol_s)
self.i_v = self.i_v + 1
self.root.update()
else:
self.root.update()
def run_normal_vols(self):
while self.i_v < self.matrix.shape[3]:
if self.ani_running:
vol = self.matrix[:, :, :, self.i_v]
# loop over repetitions
for _ in np.arange(0, self.n_reps):
self.show_vol(vol)
self.i_v = self.i_v + 1
self.root.update()
else:
self.root.update()
def show_vol(self, vol):
# loop over layers
for i_z in np.arange(0, self.i_grid):
# select active columns
pins = self.col_pins[np.where(vol[:, :, i_z] != 0)].tolist()
GPIO.output(pins, True)
GPIO.output(self.ly_pins[i_z], True)
sleep(self.args.pin_delay)
GPIO.output(pins, False)
GPIO.output(self.ly_pins[i_z], False)
class PlotRun(Visualization):
def __init__(self, args, **kwargs):
super(PlotRun, self).__init__(args)
if self.args.vis_type == 'plot_binary':
self.matrix = intensity_to_binary(self.matrix, self.args.n_steps_pin)
self.update_rate = self.args.pin_delay * 1000
else:
self.update_rate = self.args.time_step * 1000
# initialize figure
self.x, self.y, self.z = np.meshgrid(np.arange(self.i_grid), np.arange(self.i_grid), np.arange(self.i_grid))
self.fig = plt.figure()
self.ax = p3.Axes3D(self.fig)
self.ani = []
self.fig.canvas.mpl_connect('button_press_event', self.start_stop)
self.fig.canvas.mpl_connect('close_event', self.close_animation)
if self.i_grid == 3:
self.marker_size = 500
elif self.i_grid == 7:
self.marker_size = 50
else:
self.marker_size = 40
def show_vol(self, vol):
# TODO: how to change colours rather than redrawing plot?
plt.cla()
scat = self.ax.scatter(self.x.flatten(), self.y.flatten(), self.z.flatten(), s=self.marker_size, c=vol.reshape(-1), cmap='binary', depthshade=False, vmin=0, vmax=1, edgecolors="white")
self.ax.set_xticklabels("")
self.ax.set_yticklabels("")
self.ax.set_zticklabels("")
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('Z')
return scat
def update_plot(self, i):
# read in each volume of the 4D matrix
data_vol = self.matrix[:, :, :, i]
self.show_vol(data_vol)
def update_time_step(self, time_step):
self.ani.event_source.interval = time_step * 1000
self.update_rate = time_step * 1000
def start_stop(self, *event):
if self.ani_running:
self.ani.event_source.stop()
self.ani_running = False
else:
self.ani.event_source.start()
self.ani_running = True
def close_animation(self, *event):
if self.ani.event_source:
self.ani.event_source.stop()
self.ani_running = False
plt.close('all')
def run_animation(self):
self.ani_running = True
# animation for 4D matrix
if len(self.matrix.shape) == 4:
self.ani = animation.FuncAnimation(self.fig, self.update_plot, interval=self.update_rate, frames=self.matrix.shape[3])
plt.show()
# static plot if only 3D matrix
elif len(self.matrix.shape) == 3:
plt.cla()
self.show_vol(self.matrix)
| [
"numpy.log10",
"RPi.GPIO.output",
"time.sleep",
"numpy.array",
"numpy.arange",
"RPi.GPIO.setmode",
"numpy.save",
"os.path.exists",
"RPi.GPIO.cleanup",
"argparse.ArgumentParser",
"numpy.where",
"numpy.max",
"matplotlib.pyplot.close",
"pandas.DataFrame",
"matplotlib.pyplot.cla",
"numpy.r... | [((1391, 1447), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['bt_no', 'vis_name', 'time_step']"}), "(columns=['bt_no', 'vis_name', 'time_step'])\n", (1403, 1447), True, 'import pandas as pd\n'), ((344, 367), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (358, 367), False, 'import matplotlib\n'), ((488, 547), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Optional description"""'}), "(description='Optional description')\n", (511, 547), False, 'import argparse\n'), ((2129, 2143), 'numpy.load', 'np.load', (['fpath'], {}), '(fpath)\n', (2136, 2143), True, 'import numpy as np\n'), ((2326, 2349), 'os.path.join', 'os.path.join', (['dd', 'fname'], {}), '(dd, fname)\n', (2338, 2349), False, 'import os\n'), ((2357, 2394), 'numpy.all', 'np.all', (['((matrix == 1) | (matrix == 0))'], {}), '((matrix == 1) | (matrix == 0))\n', (2363, 2394), True, 'import numpy as np\n'), ((2441, 2463), 'numpy.save', 'np.save', (['fname', 'matrix'], {}), '(fname, matrix)\n', (2448, 2463), True, 'import numpy as np\n'), ((3399, 3456), 'numpy.zeros', 'np.zeros', (['(i_grid, i_grid, i_grid, n_steps_pin * t_steps)'], {}), '((i_grid, i_grid, i_grid, n_steps_pin * t_steps))\n', (3407, 3456), True, 'import numpy as np\n'), ((3468, 3488), 'numpy.log10', 'np.log10', (['(matrix + 1)'], {}), '(matrix + 1)\n', (3476, 3488), True, 'import numpy as np\n'), ((3541, 3562), 'numpy.arange', 'np.arange', (['(0)', 't_steps'], {}), '(0, t_steps)\n', (3550, 3562), True, 'import numpy as np\n'), ((1979, 2004), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1994, 2004), False, 'import os\n'), ((2082, 2105), 'os.path.join', 'os.path.join', (['dd', 'fname'], {}), '(dd, fname)\n', (2094, 2105), False, 'import os\n'), ((2219, 2244), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2234, 2244), False, 'import os\n'), ((2270, 2288), 'os.path.exists', 'os.path.exists', (['dd'], {}), '(dd)\n', (2284, 2288), False, 'import os\n'), ((2298, 2313), 'os.makedirs', 'os.makedirs', (['dd'], {}), '(dd)\n', (2309, 2313), False, 'import os\n'), ((2529, 2578), 'numpy.array', 'np.array', (['[[13, 6, 4], [22, 16, 27], [5, 17, 26]]'], {}), '([[13, 6, 4], [22, 16, 27], [5, 17, 26]])\n', (2537, 2578), True, 'import numpy as np\n'), ((3319, 3349), 'numpy.expand_dims', 'np.expand_dims', (['matrix'], {'axis': '(4)'}), '(matrix, axis=4)\n', (3333, 3349), True, 'import numpy as np\n'), ((3511, 3525), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (3517, 3525), True, 'import numpy as np\n'), ((3734, 3754), 'numpy.arange', 'np.arange', (['(0)', 'i_grid'], {}), '(0, i_grid)\n', (3743, 3754), True, 'import numpy as np\n'), ((5305, 5319), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (5317, 5319), True, 'import RPi.GPIO as GPIO\n'), ((5624, 5646), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (5636, 5646), True, 'import RPi.GPIO as GPIO\n'), ((5782, 5816), 'RPi.GPIO.setup', 'GPIO.setup', (['self.ly_pins', 'GPIO.OUT'], {}), '(self.ly_pins, GPIO.OUT)\n', (5792, 5816), True, 'import RPi.GPIO as GPIO\n'), ((5888, 5920), 'RPi.GPIO.output', 'GPIO.output', (['self.ly_pins', '(False)'], {}), '(self.ly_pins, False)\n', (5899, 5920), True, 'import RPi.GPIO as GPIO\n'), ((6247, 6269), 'RPi.GPIO.output', 'GPIO.output', (['col', '(True)'], {}), '(col, True)\n', (6258, 6269), True, 'import RPi.GPIO as GPIO\n'), ((6278, 6302), 'RPi.GPIO.output', 'GPIO.output', (['layer', '(True)'], {}), '(layer, True)\n', (6289, 6302), True, 'import RPi.GPIO as GPIO\n'), ((6311, 6325), 'time.sleep', 'sleep', (['t_light'], {}), '(t_light)\n', (6316, 6325), False, 'from time import sleep\n'), ((6334, 6357), 'RPi.GPIO.output', 'GPIO.output', (['col', '(False)'], {}), '(col, False)\n', (6345, 6357), True, 'import RPi.GPIO as GPIO\n'), ((6366, 6391), 'RPi.GPIO.output', 'GPIO.output', (['layer', '(False)'], {}), '(layer, False)\n', (6377, 6391), True, 'import RPi.GPIO as GPIO\n'), ((8086, 8111), 'numpy.arange', 'np.arange', (['(0)', 'self.i_grid'], {}), '(0, self.i_grid)\n', (8095, 8111), True, 'import numpy as np\n'), ((8974, 8986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8984, 8986), True, 'import matplotlib.pyplot as plt\n'), ((9005, 9024), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'p3.Axes3D', (['self.fig'], {}), '(self.fig)\n', (9014, 9024), True, 'import mpl_toolkits.mplot3d.axes3d as p3\n'), ((9476, 9485), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (9483, 9485), True, 'import matplotlib.pyplot as plt\n'), ((10592, 10608), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10601, 10608), True, 'import matplotlib.pyplot as plt\n'), ((858, 869), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (866, 869), True, 'import numpy as np\n'), ((2758, 2978), 'numpy.array', 'np.array', (['[[47, 39, 25, 26, 20, 22, 9], [46, 38, 27, 28, 21, 23, 10], [30, 29, 17, 18,\n 7, 11, 12], [34, 36, 37, 42, 24, 15, 14], [43, 44, 35, 0, 41, 16, 13],\n [40, 32, 45, 19, 5, 8, 3], [48, 33, 31, 6, 4, 2, 1]]'], {}), '([[47, 39, 25, 26, 20, 22, 9], [46, 38, 27, 28, 21, 23, 10], [30, \n 29, 17, 18, 7, 11, 12], [34, 36, 37, 42, 24, 15, 14], [43, 44, 35, 0, \n 41, 16, 13], [40, 32, 45, 19, 5, 8, 3], [48, 33, 31, 6, 4, 2, 1]])\n', (2766, 2978), True, 'import numpy as np\n'), ((3778, 3798), 'numpy.arange', 'np.arange', (['(0)', 'i_grid'], {}), '(0, i_grid)\n', (3787, 3798), True, 'import numpy as np\n'), ((8234, 8257), 'RPi.GPIO.output', 'GPIO.output', (['pins', '(True)'], {}), '(pins, True)\n', (8245, 8257), True, 'import RPi.GPIO as GPIO\n'), ((8270, 8306), 'RPi.GPIO.output', 'GPIO.output', (['self.ly_pins[i_z]', '(True)'], {}), '(self.ly_pins[i_z], True)\n', (8281, 8306), True, 'import RPi.GPIO as GPIO\n'), ((8319, 8345), 'time.sleep', 'sleep', (['self.args.pin_delay'], {}), '(self.args.pin_delay)\n', (8324, 8345), False, 'from time import sleep\n'), ((8358, 8382), 'RPi.GPIO.output', 'GPIO.output', (['pins', '(False)'], {}), '(pins, False)\n', (8369, 8382), True, 'import RPi.GPIO as GPIO\n'), ((8395, 8432), 'RPi.GPIO.output', 'GPIO.output', (['self.ly_pins[i_z]', '(False)'], {}), '(self.ly_pins[i_z], False)\n', (8406, 8432), True, 'import RPi.GPIO as GPIO\n'), ((8883, 8905), 'numpy.arange', 'np.arange', (['self.i_grid'], {}), '(self.i_grid)\n', (8892, 8905), True, 'import numpy as np\n'), ((8907, 8929), 'numpy.arange', 'np.arange', (['self.i_grid'], {}), '(self.i_grid)\n', (8916, 8929), True, 'import numpy as np\n'), ((8931, 8953), 'numpy.arange', 'np.arange', (['self.i_grid'], {}), '(self.i_grid)\n', (8940, 8953), True, 'import numpy as np\n'), ((10768, 10880), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.update_plot'], {'interval': 'self.update_rate', 'frames': 'self.matrix.shape[3]'}), '(self.fig, self.update_plot, interval=self.\n update_rate, frames=self.matrix.shape[3])\n', (10791, 10880), False, 'from matplotlib import animation\n'), ((10888, 10898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10896, 10898), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3846), 'numpy.arange', 'np.arange', (['(0)', 'i_grid'], {}), '(0, i_grid)\n', (3835, 3846), True, 'import numpy as np\n'), ((4895, 4961), 'numpy.ceil', 'np.ceil', (['(self.args.time_step / (self.i_grid * self.args.pin_delay))'], {}), '(self.args.time_step / (self.i_grid * self.args.pin_delay))\n', (4902, 4961), True, 'import numpy as np\n'), ((6038, 6133), 'numpy.ceil', 'np.ceil', (['(self.args.time_step / (self.args.n_steps_pin * self.i_grid * self.args.\n pin_delay))'], {}), '(self.args.time_step / (self.args.n_steps_pin * self.i_grid * self.\n args.pin_delay))\n', (6045, 6133), True, 'import numpy as np\n'), ((7199, 7224), 'numpy.arange', 'np.arange', (['(0)', 'self.n_reps'], {}), '(0, self.n_reps)\n', (7208, 7224), True, 'import numpy as np\n'), ((7816, 7841), 'numpy.arange', 'np.arange', (['(0)', 'self.n_reps'], {}), '(0, self.n_reps)\n', (7825, 7841), True, 'import numpy as np\n'), ((10993, 11002), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (11000, 11002), True, 'import matplotlib.pyplot as plt\n'), ((3979, 4000), 'numpy.zeros', 'np.zeros', (['n_steps_pin'], {}), '(n_steps_pin)\n', (3987, 4000), True, 'import numpy as np\n'), ((6613, 6708), 'numpy.ceil', 'np.ceil', (['(self.args.time_step / (self.args.n_steps_pin * self.i_grid * self.args.\n pin_delay))'], {}), '(self.args.time_step / (self.args.n_steps_pin * self.i_grid * self.\n args.pin_delay))\n', (6620, 6708), True, 'import numpy as np\n'), ((7306, 7341), 'numpy.arange', 'np.arange', (['(0)', 'self.args.n_steps_pin'], {}), '(0, self.args.n_steps_pin)\n', (7315, 7341), True, 'import numpy as np\n'), ((8182, 8211), 'numpy.where', 'np.where', (['(vol[:, :, i_z] != 0)'], {}), '(vol[:, :, i_z] != 0)\n', (8190, 8211), True, 'import numpy as np\n'), ((4071, 4094), 'numpy.round', 'np.round', (['(1 / intensity)'], {}), '(1 / intensity)\n', (4079, 4094), True, 'import numpy as np\n')] |
from typing import Optional, Tuple, Sequence, Type, Union, Dict
import numpy as np
from anndata import AnnData
import scipy.stats
from scipy import sparse
from scanpy import logging as logg
import graph_tool.all as gt
import pandas as pd
from .._utils import get_cell_loglikelihood, get_cell_back_p, state_from_blocks
from scanpy._utils import get_igraph_from_adjacency, _choose_graph
def calculate_affinity(
adata: AnnData,
level: int = 1,
block_key: Optional[str] = 'nsbm',
group_by: Optional[str] = None,
state: Optional = None,
neighbors_key: Optional[str] = 'neighbors',
adjacency: Optional[sparse.spmatrix] = None,
directed: bool = False,
use_weights: bool = False,
obsp: Optional[str] = None,
back_prob: bool = False,
copy: bool = False
) -> Optional[AnnData]:
"""\
Calculate cell affinity given a partition scheme. It can be used for
partitions calculated using schist or for any partition scheme, given
for example by cell annotations.
Parameters
----------
adata:
The AnnData object. Should have been already processed with schist
level:
The level to calculate affinity. This parameter is effective
only for Nested partitions
block_key:
The prefix for partitions. This parameter is ignored if the state
is not gt.NestedBlockState
group_by:
The key for group names used for calculations. Setting this will override
level and block_key. This is effective only for NestedBlockState partitions
state:
Optionally calculate affinities on this state.
neighbors_key
Use neighbors connectivities as adjacency.
If not specified, leiden looks .obsp['connectivities'] for connectivities
(default storage place for pp.neighbors).
If specified, leiden looks
.obsp[.uns[neighbors_key]['connectivities_key']] for connectivities.
adjacency
Sparse adjacency matrix of the graph, defaults to neighbors connectivities.
directed
Whether to treat the graph as directed or undirected.
use_weights
If `True`, edge weights from the graph are used in the computation
(placing more emphasis on stronger edges).
copy:
Return a new object or do everything in place
Returns
-------
Depending on `copy`, returns or updates `adata` with affinity values
in adata.obsm[f'CA_{block_key}_level_{level}']
"""
matrix_key = f'CA_{block_key}_level_{level}' # the default name of the matrix
if group_by:
logg.info(f'Calculating cell affinity to {group_by}')
else:
logg.info(f'Calculating cell affinity to level {level}')
if not state:
# if no state is provided, use the default to retrieve graph
if 'schist' in adata.uns and 'blocks' in adata.uns['schist'][f'{block_key}']:
params = adata.uns['schist'][f'{block_key}']['params']
if 'neighbors_key' in params:
neighbors_key=params['neighbors_key']
if 'use_weights' in params:
use_weights=params['use_weights']
if 'deg_corr' in params:
deg_corr=params['deg_corr']
state = state_from_blocks(adata,
state_key=block_key,
neighbors_key=neighbors_key,
adjacency=adjacency,
directed=directed,
use_weights=use_weights,
deg_corr=deg_corr
)
g = state.g
elif not neighbors_key:
# no state and no adjacency provided, raise an error
raise ValueError("A state or an adjacency matrix should be given"
"Otherwise a graph cannot be computed")
else:
# get the graph from the adjacency
adjacency = _choose_graph(adata, obsp, neighbors_key)
g = get_igraph_from_adjacency(adjacency, directed=directed)
g = g.to_graph_tool()
gt.remove_parallel_edges(g)
state = gt.BlockState(g)
else:
g = state.g
if group_by:
matrix_key = f'CA_{group_by}'
# if groups are given, we generate a new BlockState and work on that
if group_by in adata.obs.columns and adata.obs[group_by].dtype.name == 'category':
partitions = adata.obs[group_by].cat.codes.values
state = gt.BlockState(g, b=partitions)
if back_prob:
ca_matrix = get_cell_back_p(state)
else:
ca_matrix = get_cell_loglikelihood(state, as_prob=True)
else:
raise ValueError(f"{group_by} should be a categorical entry in adata.obs")
else:
# use precomputed blocks and states
if type(state) == gt.NestedBlockState:
if back_prob:
p0 = get_cell_back_p(state, level=0)
else:
p0 = get_cell_loglikelihood(state, level=0, as_prob=True)
group_col = None
if group_by and group_by in adata.obs.columns:
group_col = group_by
else:
g_name = f'{block_key}_level_{level}'
if g_name in adata.obs.columns:
group_col = g_name
if not group_col:
raise ValueError("The provided groups or level/blocks do not exist")
g0 = pd.Categorical(state.project_partition(0, 0).a)
cross_tab = pd.crosstab(g0, adata.obs[group_col], normalize='index')
ca_matrix = (p0 @ cross_tab).values
elif type(state) == gt.PPBlockState:
if back_prob:
ca_matrix = get_cell_back_p(state)
else:
ca_matrix = get_cell_loglikelihood(state, as_prob=True)
matrix_key = 'CA_ppbm'
adata.obsm[matrix_key] = ca_matrix
return adata if copy else None
def cluster_consistency(
adata: AnnData,
groups: str = None,
key_added: Optional[str] = 'cluster_consistency',
use_marginals: Optional[bool] = False,
copy: bool = False
) -> Optional[AnnData]:
"""\
Calculate cluster consistency at a given level
Parameters
----------
adata
Annotated data matrix.
groups
The key for clusters in adata.obs
key_added
The name of obs values that will be added to the adata
use_marginals
By default it uses cell affinities for the analysis, but if group marginals
are available from the inference, those can be used here.
copy
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with consistency values
in adata.uns['cluster_consistency'] and adata.obs['cluster_consistency']
"""
matrix_prefix = 'CA'
if use_marginals:
matrix_prefix = 'CM'
if not groups or not groups in adata.obs.columns:
raise ValueError("Valid groups should be specified")
else:
ca_key = f'{matrix_prefix}_{groups}'
if not ca_key in adata.obsm.keys():
msg = "Affinities for the provided group were not calculated"
if use_marginals:
msg = "Marginals for the provided group were not calculated"
raise ValueError(msg)
affinity = adata.obsm[ca_key]
entropy = scipy.stats.entropy(affinity, axis=0) / np.log(adata.shape[0]) #normalized entropy
adata.uns['cluster_consistency'] = entropy
# now assign consistency to each cell, according to their group
e_dict = dict(zip(adata.obs[groups].cat.categories, entropy))
g = adata.obs[groups].values
adata.obs['cluster_consistency'] = [e_dict[g[x]] for x in range(adata.shape[0])]
return adata if copy else None
def cell_stability(
adata: AnnData,
block_key: Optional[str] = 'nsbm', # dummy default
key_added: Optional[str] = 'cell_stability',
use_marginals: Optional[bool] = False,
neighbors_key: Optional[str] = 'neighbors',
adjacency: Optional[sparse.spmatrix] = None,
directed: bool = False,
use_weights: bool = False,
obsp: Optional[str] = None,
state: Optional = None,
back_prob: bool = False,
copy: bool = False
) -> Optional[AnnData]:
"""\
Calculate cell stability given cell affinity.
Parameters
----------
adata
Annotated data matrix.
key
The prefix of CA matrices in adata.obsm to evaluate.
copy
Return a copy instead of writing to adata.
use_marginals
Whether to use marginals in place of affinities
Returns
-------
Depending on `copy`, returns or updates `adata` with stability values
in adata.obs['cell_stability']
"""
matrix_prefix = 'CA'
if use_marginals:
matrix_prefix = 'CM'
if not state:
if not adata.uns['schist'][f'{block_key}']['blocks']:
raise ValueError("No state detected")
else:
params = adata.uns['schist'][f'{block_key}']['params']
if 'neighbors_key' in params:
neighbors_key=params['neighbors_key']
if 'use_weights' in params:
use_weights=params['use_weights']
if 'deg_corr' in params:
deg_corr=params['deg_corr']
state = state_from_blocks(adata,
state_key=block_key,
neighbors_key=neighbors_key,
adjacency=adjacency,
directed=directed,
use_weights=use_weights,
deg_corr=deg_corr
)
# check if we have levels we want to prune
n_effective_levels = sum([x.get_nonempty_B() > 1 for x in state.get_levels()])
n_effective_levels = min(n_effective_levels, len(state.get_levels()))
obsm_names = [x for x in adata.obsm if x.startswith(f"{matrix_prefix}_{block_key}_level")]
if len(obsm_names) < n_effective_levels:
logg.warning("Your dataset doesn't contain all the required matrices\n")
if use_marginals:
logg.warning("Marginals cannot be recomputed from current data, switching to affinities")
matrix_prefix='CA'
logg.warning("They will be recalculated from scratch")
if back_prob:
adata.obsm[f'{matrix_prefix}_{block_key}_level_0'] = get_cell_back_p(state, level=0)
else:
adata.obsm[f'{matrix_prefix}_{block_key}_level_0'] = get_cell_loglikelihood(state, level=0,
as_prob=True)
obsm_names = [f'{matrix_prefix}_{block_key}_level_0']
for n in range(n_effective_levels):
calculate_affinity(adata, level = n+1, block_key=block_key, state=state)
obsm_names.append(f'{matrix_prefix}_{block_key}_level_{n}')
# take only matrices with at least 2 groups
obsm_names = [x for x in obsm_names if adata.obsm[x].shape[1] > 1]
# take the max value for each matrix
_M = np.array([np.max(adata.obsm[x], axis=1) for x in obsm_names]).T
# set a threshold given by a uniform distribution
# this is questionable, may be improved
thr = np.array([1 - 1/adata.obsm[x].shape[1] for x in obsm_names])
# use the fraction of levels that are over the level specific threshold
_S = np.sum(_M > thr, axis=1) / _M.shape[1]
adata.obs[f'{key_added}'] = _S
# _S = np.array([scipy.stats.entropy(adata.obsm[x], axis=1) /np.log(adata.obsm[x].shape[1]) for x in obsm_names]).T
# adata.obs[f'{key_added}'] = 1-np.nanmax(_S, axis=1) #/ np.nanmean(EE, axis=1)
return adata if copy else None
def cell_similarity(
adata: AnnData,
key_added: Optional[str] = 'cell_similarity',
sim_type: Optional[str] = 'hub-promoted',
use_weights: Optional[bool] = True,
copy: bool = False,
**neighbors_kwds
) -> Optional[AnnData]:
"""\
Calculate cell similarity score based on the kNN graph. Higher scores
are associated to cells mostly close to similar cells.
Parameters
----------
adata
Annotated data matrix.
key_added
The name of the entry in adata.obs with calculated values.
copy
Return a copy instead of writing to adata.
sim_type:
Similarity function. Can be one in 'dice', 'salton', 'hub-promoted','hub-suppressed', 'jaccard', 'inv-log-weight', 'resource-allocation','leight-holme-newman'. For more information check here https://graph-tool.skewed.de/static/doc/topology.html?highlight=distance#graph_tool.topology.vertex_similarity
state
A separate block state object
Returns
-------
Depending on `copy`, returns or updates `adata` with stability values
in adata.obs['cell_stability']
"""
from .._utils import get_graph_tool_from_adata
logg.info("Adding cell similarity scores")
g = get_graph_tool_from_adata(adata, use_weights=use_weights, **neighbors_kwds)
n_cells = g.num_vertices()
S = gt.vertex_similarity(g, sim_type=sim_type).get_2d_array(range(n_cells))
D = np.dot(S, S)
D = np.diag(D / np.max(D)) # take the scaled diagonal
adata.obs[f'{key_added}'] = D
return adata if copy else None
def label_transfer(
adata: AnnData,
adata_ref: Optional[AnnData] = None,
obs: Optional[str] = None,
label_unk: Optional[str] = 'unknown',
use_best: Optional[bool] = False,
neighbors_key: Optional[str] = 'neighbors',
adjacency: Optional[sparse.spmatrix] = None,
directed: bool = False,
use_weights: bool = False,
pca_args: Optional[dict] = {},
harmony_args: Optional[dict] = {},
copy: bool = False
) -> Optional[AnnData]:
"""\
Transfer annotation from one dataset to another using cell affinities.
If two datasets are given, it uses harmony to perform
integration and then the kNN graph. If only no reference is given, it is assumed
that the only adata already contains the proper kNN graph and that
labels to be reassigned have a specified value.
Parameters
----------
adata:
The AnnData object.
adata_ref
The optional reference dataset. If None, then all the needed information
should be included in `adata` (i.e. the kNN graph and the labels)
obs
The label that needs to be transfered. Should be in `adata_ref.obs` or in
`adata.obs` if no `adata_ref` is given
label_unk
The label for unassigned cells. If no `adata_ref` is given, this label
identifies cells to be assigned in `adata`. If `adata_ref` is given, this
label will be given to all cells that cannot be assigned.
use_best
When assigning labels, some cells may have not enough evidence and, therefore,
left `unknown`. If this parameter is set to `True`, all cells will be assigned
to the best possible, even if it may not be optimal
neighbors_key
Use neighbors connectivities as adjacency.
If not specified, leiden looks .obsp['connectivities'] for connectivities
(default storage place for pp.neighbors).
If specified, leiden looks
.obsp[.uns[neighbors_key]['connectivities_key']] for connectivities.
adjacency
Sparse adjacency matrix of the graph, defaults to neighbors connectivities.
directed
Whether to treat the graph as directed or undirected.
use_weights
If `True`, edge weights from the graph are used in the computation
(placing more emphasis on stronger edges).
pca_args
Parameters to be passed to `sc.tl.pca` before harmony is issued
harmony_args
Parameters to be passed to `sc.external.pp.harmony_integrate`
copy:
Return a new object or do everything in place
Returns
-------
Depending on `copy`, returns or updates `adata` with added labels
in adata.obs[f'{label_ref}']
"""
adata = adata.copy() if copy else adata
if adata_ref:
from scanpy.tools import pca
from scanpy.preprocessing import neighbors
from scanpy.external.pp import harmony_integrate
# we have to create a merged dataset and integrate
# before that check that the labels are not in the recipient, in case drop
if obs in adata.obs_keys():
logg.warning(f'{obs} was found in dataset 1, it will be wiped')
adata.obs.drop(obs, inplace=True, axis='columns')
if not obs in adata_ref.obs_keys():
raise ValueError(
f'Annotation {obs} is not present in reference dataset.'
)
# now do the merge, so that the empty category is now created
adata_merge = adata.concatenate(adata_ref, batch_categories=['_unk', '_ref'],
batch_key='_label_transfer')
#
adata_merge.obs[obs] = adata_merge.obs[obs].cat.add_categories(label_unk).fillna(label_unk)
# perform integration using harmony
pca(adata_merge, **pca_args)
harmony_integrate(adata_merge,
key='_label_transfer',
**harmony_args)
# now calculate the kNN graph
n_neighbors = int(np.sqrt(adata_merge.shape[0])/2)
key_added = neighbors_key
if key_added == 'neighbors':
key_added = None
neighbors(adata_merge, use_rep='X_pca_harmony',
n_neighbors=n_neighbors, key_added=key_added)
else:
adata_merge = adata#.copy()
if not obs in adata_merge.obs_keys():
raise ValueError(
f'Annotation {obs} is not present in dataset.'
)
if not label_unk in adata_merge.obs[obs].cat.categories:
raise ValueError(
f'Label {label_unk} is not present in {obs}.'
)
# calculate affinity
calculate_affinity(adata_merge, group_by=obs, neighbors_key=neighbors_key)
# now work on affinity, rank it to get the new labels
categories = adata_merge.obs[obs].cat.categories
affinity = pd.DataFrame(adata_merge.obsm[f'CA_{obs}'],
index=adata_merge.obs_names, columns=categories)
# if use_best we need to remove label unknonw from the matrix so it
# does not get scored
if use_best:
affinity.drop(label_unk, axis='columns', inplace=True)
rank_affinity = affinity.rank(axis=1, ascending=False)
adata_merge.obs[f'_{obs}_tmp'] = adata_merge.obs[obs].values
for c in rank_affinity.columns:
# pretty sure there's a way to do it without a
# for loop :-/ I really need a course on pandas
cells = rank_affinity[rank_affinity[c] == 1].index
adata_merge.obs.loc[cells, f'_{obs}_tmp'] = c
# do actual transfer to dataset 1
# here we assume that concatenation does not change the order of cells
# only cell names
labels = adata_merge.obs[f'_{obs}_tmp'].cat.categories
if adata_ref:
# transfer has been done between two files
adata.obs[obs] = adata_merge.obs.query('_label_transfer == "_unk"')[f'_{obs}_tmp'].values
else:
# transfer is within dataset
adata_merge.obs[obs] = adata_merge.obs[f'_{obs}_tmp'].values
adata_merge.obs.drop(f'_{obs}_tmp', axis='columns', inplace=True)
adata = adata_merge
# ensure that it is categorical with proper order
adata.obs[obs] = pd.Categorical(adata.obs[obs], categories=labels)
# transfer colors if any
if adata_ref and f'{obs}_colors' in adata_ref.uns:
colors = list(adata_ref.uns[f'{obs}_colors'])
if not use_best:
# add gray for unknown
colors.append('#aabbcc')
adata.uns[f'{obs}_colors'] = colors
return adata if copy else None
| [
"graph_tool.all.remove_parallel_edges",
"graph_tool.all.vertex_similarity",
"numpy.sqrt",
"numpy.log",
"scanpy.preprocessing.neighbors",
"scanpy._utils._choose_graph",
"numpy.array",
"scanpy.tools.pca",
"graph_tool.all.BlockState",
"pandas.Categorical",
"numpy.max",
"numpy.dot",
"scanpy.exte... | [((11570, 11634), 'numpy.array', 'np.array', (['[(1 - 1 / adata.obsm[x].shape[1]) for x in obsm_names]'], {}), '([(1 - 1 / adata.obsm[x].shape[1]) for x in obsm_names])\n', (11578, 11634), True, 'import numpy as np\n'), ((13210, 13252), 'scanpy.logging.info', 'logg.info', (['"""Adding cell similarity scores"""'], {}), "('Adding cell similarity scores')\n", (13219, 13252), True, 'from scanpy import logging as logg\n'), ((13456, 13468), 'numpy.dot', 'np.dot', (['S', 'S'], {}), '(S, S)\n', (13462, 13468), True, 'import numpy as np\n'), ((18560, 18656), 'pandas.DataFrame', 'pd.DataFrame', (["adata_merge.obsm[f'CA_{obs}']"], {'index': 'adata_merge.obs_names', 'columns': 'categories'}), "(adata_merge.obsm[f'CA_{obs}'], index=adata_merge.obs_names,\n columns=categories)\n", (18572, 18656), True, 'import pandas as pd\n'), ((19916, 19965), 'pandas.Categorical', 'pd.Categorical', (['adata.obs[obs]'], {'categories': 'labels'}), '(adata.obs[obs], categories=labels)\n', (19930, 19965), True, 'import pandas as pd\n'), ((2606, 2659), 'scanpy.logging.info', 'logg.info', (['f"""Calculating cell affinity to {group_by}"""'], {}), "(f'Calculating cell affinity to {group_by}')\n", (2615, 2659), True, 'from scanpy import logging as logg\n'), ((2678, 2734), 'scanpy.logging.info', 'logg.info', (['f"""Calculating cell affinity to level {level}"""'], {}), "(f'Calculating cell affinity to level {level}')\n", (2687, 2734), True, 'from scanpy import logging as logg\n'), ((7653, 7675), 'numpy.log', 'np.log', (['adata.shape[0]'], {}), '(adata.shape[0])\n', (7659, 7675), True, 'import numpy as np\n'), ((10335, 10407), 'scanpy.logging.warning', 'logg.warning', (['"""Your dataset doesn\'t contain all the required matrices\n"""'], {}), '("Your dataset doesn\'t contain all the required matrices\\n")\n', (10347, 10407), True, 'from scanpy import logging as logg\n'), ((10575, 10629), 'scanpy.logging.warning', 'logg.warning', (['"""They will be recalculated from scratch"""'], {}), "('They will be recalculated from scratch')\n", (10587, 10629), True, 'from scanpy import logging as logg\n'), ((11721, 11745), 'numpy.sum', 'np.sum', (['(_M > thr)'], {'axis': '(1)'}), '(_M > thr, axis=1)\n', (11727, 11745), True, 'import numpy as np\n'), ((17418, 17446), 'scanpy.tools.pca', 'pca', (['adata_merge'], {}), '(adata_merge, **pca_args)\n', (17421, 17446), False, 'from scanpy.tools import pca\n'), ((17455, 17524), 'scanpy.external.pp.harmony_integrate', 'harmony_integrate', (['adata_merge'], {'key': '"""_label_transfer"""'}), "(adata_merge, key='_label_transfer', **harmony_args)\n", (17472, 17524), False, 'from scanpy.external.pp import harmony_integrate\n'), ((17819, 17916), 'scanpy.preprocessing.neighbors', 'neighbors', (['adata_merge'], {'use_rep': '"""X_pca_harmony"""', 'n_neighbors': 'n_neighbors', 'key_added': 'key_added'}), "(adata_merge, use_rep='X_pca_harmony', n_neighbors=n_neighbors,\n key_added=key_added)\n", (17828, 17916), False, 'from scanpy.preprocessing import neighbors\n'), ((4634, 4664), 'graph_tool.all.BlockState', 'gt.BlockState', (['g'], {'b': 'partitions'}), '(g, b=partitions)\n', (4647, 4664), True, 'import graph_tool.all as gt\n'), ((5718, 5774), 'pandas.crosstab', 'pd.crosstab', (['g0', 'adata.obs[group_col]'], {'normalize': '"""index"""'}), "(g0, adata.obs[group_col], normalize='index')\n", (5729, 5774), True, 'import pandas as pd\n'), ((10446, 10545), 'scanpy.logging.warning', 'logg.warning', (['"""Marginals cannot be recomputed from current data, switching to affinities"""'], {}), "(\n 'Marginals cannot be recomputed from current data, switching to affinities'\n )\n", (10458, 10545), True, 'from scanpy import logging as logg\n'), ((13376, 13418), 'graph_tool.all.vertex_similarity', 'gt.vertex_similarity', (['g'], {'sim_type': 'sim_type'}), '(g, sim_type=sim_type)\n', (13396, 13418), True, 'import graph_tool.all as gt\n'), ((13489, 13498), 'numpy.max', 'np.max', (['D'], {}), '(D)\n', (13495, 13498), True, 'import numpy as np\n'), ((16723, 16786), 'scanpy.logging.warning', 'logg.warning', (['f"""{obs} was found in dataset 1, it will be wiped"""'], {}), "(f'{obs} was found in dataset 1, it will be wiped')\n", (16735, 16786), True, 'from scanpy import logging as logg\n'), ((4061, 4102), 'scanpy._utils._choose_graph', '_choose_graph', (['adata', 'obsp', 'neighbors_key'], {}), '(adata, obsp, neighbors_key)\n', (4074, 4102), False, 'from scanpy._utils import get_igraph_from_adjacency, _choose_graph\n'), ((4119, 4174), 'scanpy._utils.get_igraph_from_adjacency', 'get_igraph_from_adjacency', (['adjacency'], {'directed': 'directed'}), '(adjacency, directed=directed)\n', (4144, 4174), False, 'from scanpy._utils import get_igraph_from_adjacency, _choose_graph\n'), ((4221, 4248), 'graph_tool.all.remove_parallel_edges', 'gt.remove_parallel_edges', (['g'], {}), '(g)\n', (4245, 4248), True, 'import graph_tool.all as gt\n'), ((4269, 4285), 'graph_tool.all.BlockState', 'gt.BlockState', (['g'], {}), '(g)\n', (4282, 4285), True, 'import graph_tool.all as gt\n'), ((11401, 11430), 'numpy.max', 'np.max', (['adata.obsm[x]'], {'axis': '(1)'}), '(adata.obsm[x], axis=1)\n', (11407, 11430), True, 'import numpy as np\n'), ((17678, 17707), 'numpy.sqrt', 'np.sqrt', (['adata_merge.shape[0]'], {}), '(adata_merge.shape[0])\n', (17685, 17707), True, 'import numpy as np\n')] |
import importlib
import datetime
import argparse
import random
import uuid
import time
import os
import numpy as np
import torch
from torch.autograd import Variable
from metrics.metrics import confusion_matrix
import matplotlib.pyplot as plt
from main import load_datasets
# Import saliency methods
#from fullgrad_saliency_master.saliency.fullgrad import FullGrad
#from fullgrad_saliency_master.saliency.simple_fullgrad import SimpleFullGrad
#from fullgrad_saliency_master.saliency.smooth_fullgrad import SmoothFullGrad
from fullgrad_saliency_master.saliency.gradcam import GradCAM
from fullgrad_saliency_master.saliency.grad import InputGradient
from fullgrad_saliency_master.saliency.smoothgrad import SmoothGrad
"""
Stolen from CS231N
"""
def compute_saliency_maps(x, y, model):
"""
Compute a class saliency map using the model for images X and labels y.
Input:
- x: Input image: Tensor of shape(H*W)
- y: Labels for x; float label
- model: A pretrained CNN that will be used to compute the saliency map.
Returns:
- saliency: A Tensor of shape (H*W) giving the saliency maps for the input
images.
"""
# Make sure the model is in "test" mode
model.eval()
# Make input tensor require gradient
x.requires_grad_()
##############################################################################
# TODO: Implement this function. Perform a forward and backward pass through #
# the model to compute the gradient of the correct class score with respect #
# to each input image. You first want to compute the loss over the correct #
# scores (we'll combine losses across a batch by summing), and then compute #
# the gradients with a backward pass. #
##############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
scores = model(x, 0) # Not sure about the 0
# Gather just the correct scores
# Not sure why we did this instead of the loss!
scores = scores.gather(0, y,).squeeze()
loss = torch.sum(scores)
loss.backward()
# Now actually get step
x_grad = x.grad
saliency = torch.abs(x_grad)
return saliency
def main():
parser = argparse.ArgumentParser(description='Continuum learning')
# Woody: extra args for caml
parser.add_argument('--caml_priority', type=str, default='loss',
help='how to prioritize sampling in caml')
parser.add_argument('--softmax_temperature', type=float, default=1.0,
help='temperature for softmax in replay buffer sampling')
# model details
parser.add_argument('--model', type=str, default='caml1',
help='model to train')
parser.add_argument('--n_hiddens', type=int, default=100,
help='number of hidden neurons at each layer')
parser.add_argument('--n_layers', type=int, default=2,
help='number of hidden layers')
parser.add_argument('--finetune', default='yes', type=str,help='whether to initialize nets in indep. nets')
# optimizer parameters influencing all models
parser.add_argument('--n_epochs', type=int, default=1,
help='Number of epochs per task')
parser.add_argument('--batch_size', type=int, default=1,
help='the amount of items received by the algorithm at one time (set to 1 across all experiments). Variable name is from GEM project.')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate')
# memory parameters for GEM baselines
parser.add_argument('--n_memories', type=int, default=0,
help='number of memories per task')
parser.add_argument('--memory_strength', default=0, type=float,
help='memory strength (meaning depends on memory)')
# parameters specific to models in https://openreview.net/pdf?id=B1gTShAct7
parser.add_argument('--memories', type=int, default=5120, help='number of total memories stored in a reservoir sampling based buffer')
parser.add_argument('--gamma', type=float, default=1.0,
help='gamma learning rate parameter') #gating net lr in roe
parser.add_argument('--batches_per_example', type=float, default=1,
help='the number of batch per incoming example')
parser.add_argument('--s', type=float, default=1,
help='current example learning rate multiplier (s)')
parser.add_argument('--replay_batch_size', type=float, default=20,
help='The batch size for experience replay. Denoted as k-1 in the paper.')
parser.add_argument('--beta', type=float, default=1.0,
help='beta learning rate parameter') # exploration factor in roe
# experiment parameters
parser.add_argument('--cuda', type=str, default='no',
help='Use GPU?')
parser.add_argument('--seed', type=int, default=0,
help='random seed of model')
parser.add_argument('--log_every', type=int, default=100,
help='frequency of logs, in minibatches')
parser.add_argument('--save_path', type=str, default='results/',
help='save models at the end of training')
# data parameters
parser.add_argument('--data_path', default='data/',
help='path where data is located')
parser.add_argument('--data_file', default='mnist_rotations.pt',
help='data file')
parser.add_argument('--samples_per_task', type=int, default=-1,
help='training samples per task (all if negative)')
parser.add_argument('--shuffle_tasks', type=str, default='no',
help='present tasks in order')
# Saliency method
parser.add_argument('--saliency', type=str, default='smoothgrad',
help="Defines the saliency method used")
args = parser.parse_args()
args.cuda = True if args.cuda == 'yes' else False
args.finetune = True if args.finetune == 'yes' else False
# taskinput model has one extra layer
if args.model == 'taskinput':
args.n_layers -= 1
# unique identifier
uid = uuid.uuid4().hex
# initialize seeds
torch.backends.cudnn.enabled = False
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cuda:
print("Found GPU:", torch.cuda.get_device_name(0))
torch.cuda.manual_seed_all(args.seed)
# load data
x_tr, x_te, n_inputs, n_outputs, n_tasks = load_datasets(args)
n_outputs = n_outputs.item() # outputs should not be a tensor, otherwise "TypeError: expected Float (got Long)"
# load model
Model = importlib.import_module('model.' + args.model)
model = Model.Net(n_inputs, n_outputs, n_tasks, args)
#result_t, result_a, model_state_dict, stats, one_liner, _ = torch.load('woody_results/online_mnist_rotations.pt_2020_11_01_11_19_37_f37e2305e6e04d61ab498c9bf252fe97.pt')
result_t, result_a, model_state_dict, stats, one_liner, _ = torch.load('woody_results/caml1_mnist_rotations.pt_2020_11_01_13_58_46_0c7287daad494c818e6d5ce206b16b0b.pt')
model.load_state_dict(model_state_dict)
model.eval()
if args.cuda:
try:
model.cuda()
except:
pass
# Initialize saliency methods
saliency_methods = {
# FullGrad-based methods
#'fullgrad': FullGrad(model),
#'simple_fullgrad': SimpleFullGrad(model),
#'smooth_fullgrad': SmoothFullGrad(model),
# Other saliency methods from literature
'gradcam': GradCAM(model),
'inputgrad': InputGradient(model),
'smoothgrad': SmoothGrad(model)
}
# Test this saliency shit on two data points
# From the final task train set
task_num = 0
saliency_idxes = [7, 1, 105]
#x = x_tr[task_num][1][saliency_idxes]
#y = x_tr[task_num][2][saliency_idxes]
x = x_tr[task_num][1][1]
y = x_tr[task_num][2][1]
#saliency = compute_saliency_maps(x, y, model)
saliency = saliency_methods[args.saliency].saliency(x, y)
# Convert the saliency map from Torch Tensor to numpy array and show images
# and saliency maps together.
saliency = saliency.detach().numpy()
# Try the technique of multiplying the image and saliency!
#saliency = saliency * x.detach().numpy()
saliency = saliency.reshape(-1, 28, 28)
x = x.reshape(-1, 28, 28).detach().numpy()
N = x.shape[0]
for i in range(N):
plt.subplot(2, N, i+1)
plt.imshow(x[i])
plt.axis('off')
plt.subplot(2, N, N + i + 1)
plt.imshow(saliency[i], cmap=plt.cm.Greens)
plt.axis('off')
plt.gcf().set_size_inches(12, 5)
plt.show()
if __name__ == '__main__':
main()
| [
"fullgrad_saliency_master.saliency.gradcam.GradCAM",
"fullgrad_saliency_master.saliency.smoothgrad.SmoothGrad",
"torch.sum",
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"numpy.random.seed",
"matplotlib.pyplot.axis",
"torch.abs",
"importlib.import_module",
"matplotlib.pyplot.gcf",
"uui... | [((2135, 2152), 'torch.sum', 'torch.sum', (['scores'], {}), '(scores)\n', (2144, 2152), False, 'import torch\n'), ((2241, 2258), 'torch.abs', 'torch.abs', (['x_grad'], {}), '(x_grad)\n', (2250, 2258), False, 'import torch\n'), ((2305, 2362), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Continuum learning"""'}), "(description='Continuum learning')\n", (2328, 2362), False, 'import argparse\n'), ((6485, 6513), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6502, 6513), False, 'import torch\n'), ((6518, 6543), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (6532, 6543), True, 'import numpy as np\n'), ((6548, 6570), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (6559, 6570), False, 'import random\n'), ((6758, 6777), 'main.load_datasets', 'load_datasets', (['args'], {}), '(args)\n', (6771, 6777), False, 'from main import load_datasets\n'), ((6925, 6971), 'importlib.import_module', 'importlib.import_module', (["('model.' + args.model)"], {}), "('model.' + args.model)\n", (6948, 6971), False, 'import importlib\n'), ((7270, 7388), 'torch.load', 'torch.load', (['"""woody_results/caml1_mnist_rotations.pt_2020_11_01_13_58_46_0c7287daad494c818e6d5ce206b16b0b.pt"""'], {}), "(\n 'woody_results/caml1_mnist_rotations.pt_2020_11_01_13_58_46_0c7287daad494c818e6d5ce206b16b0b.pt'\n )\n", (7280, 7388), False, 'import torch\n'), ((8973, 8983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8981, 8983), True, 'import matplotlib.pyplot as plt\n'), ((6396, 6408), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6406, 6408), False, 'import uuid\n'), ((6656, 6693), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (6682, 6693), False, 'import torch\n'), ((7837, 7851), 'fullgrad_saliency_master.saliency.gradcam.GradCAM', 'GradCAM', (['model'], {}), '(model)\n', (7844, 7851), False, 'from fullgrad_saliency_master.saliency.gradcam import GradCAM\n'), ((7874, 7894), 'fullgrad_saliency_master.saliency.grad.InputGradient', 'InputGradient', (['model'], {}), '(model)\n', (7887, 7894), False, 'from fullgrad_saliency_master.saliency.grad import InputGradient\n'), ((7918, 7935), 'fullgrad_saliency_master.saliency.smoothgrad.SmoothGrad', 'SmoothGrad', (['model'], {}), '(model)\n', (7928, 7935), False, 'from fullgrad_saliency_master.saliency.smoothgrad import SmoothGrad\n'), ((8743, 8767), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'N', '(i + 1)'], {}), '(2, N, i + 1)\n', (8754, 8767), True, 'import matplotlib.pyplot as plt\n'), ((8774, 8790), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x[i]'], {}), '(x[i])\n', (8784, 8790), True, 'import matplotlib.pyplot as plt\n'), ((8799, 8814), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8807, 8814), True, 'import matplotlib.pyplot as plt\n'), ((8823, 8851), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', 'N', '(N + i + 1)'], {}), '(2, N, N + i + 1)\n', (8834, 8851), True, 'import matplotlib.pyplot as plt\n'), ((8860, 8903), 'matplotlib.pyplot.imshow', 'plt.imshow', (['saliency[i]'], {'cmap': 'plt.cm.Greens'}), '(saliency[i], cmap=plt.cm.Greens)\n', (8870, 8903), True, 'import matplotlib.pyplot as plt\n'), ((8912, 8927), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8920, 8927), True, 'import matplotlib.pyplot as plt\n'), ((6617, 6646), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (6643, 6646), False, 'import torch\n'), ((8936, 8945), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8943, 8945), True, 'import matplotlib.pyplot as plt\n')] |
import os
from asrlib.utils import base, reader, audio
from asrlib.utils.wer import compute_wer
import time
from collections import OrderedDict
import glob
import numpy as np
from absl import logging, app, flags
flags.DEFINE_string('dataset', 'testdata/dataset', 'the dataset dir')
flags.DEFINE_string('outdir', '/tmp/out_of_open_asr', 'the output dir')
flags.DEFINE_string('api', 'tencent', 'baidu / google / xunfei / tencent')
flags.DEFINE_string('path_prefix', 'testdata/dataset/', 'add to the head of audio path')
flags.DEFINE_integer('job_num', 1, '')
flags.DEFINE_integer('job_idx', 0, '')
flags.DEFINE_bool('rerun_empty', False, 'retry if the recognized text is empty')
flags.DEFINE_bool('rerun_error', True, 'retry if the recognition cause an error')
flags.DEFINE_bool('rerun_short', False, 'retry if the recognized text is too short')
flags.DEFINE_bool('rerun_all', False, 'rerun all the sentences')
FLAGS = flags.FLAGS
sed_filter_file = base.default_sed_filter_file
def process_wav_line(wav_line):
a = wav_line.split()
res = []
for s in a:
if s.endswith('.wav') or s.endswith('.mp3'):
s = FLAGS.path_prefix + s
res.append(s)
return ' '.join(res)
def audio_to_wav(wav_line, tmp_name):
wav_line = process_wav_line(wav_line)
return audio.parse_wav_line(wav_line)
def load_all_results():
res_dict = OrderedDict()
for f in glob.glob(os.path.join(FLAGS.outdir, FLAGS.api, 'result*.txt')):
print('load result from %s' % f)
res_dict.update(reader.read_txt_to_dict(f))
return res_dict
def should_retry(recog_text, ref_text):
if FLAGS.rerun_empty and recog_text.strip() == '':
print('retry as empty')
return True
if FLAGS.rerun_error and recog_text == '[ERROR]':
print('retry as error')
return True
if FLAGS.rerun_short and len(recog_text.split()) <= len(ref_text.split()) // 2:
print('retry as too short !')
return True
return False
def main(_):
assert FLAGS.dataset is not None
assert FLAGS.outdir is not None
assert FLAGS.api is not None
if FLAGS.api == 'xunfei':
from openasr.xunfei.iat_ws_python3 import recognize_wav
elif FLAGS.api == 'google':
from openasr.google.speech_to_text import recognize_wav
elif FLAGS.api == 'baidu':
from openasr.baidu.speech_to_text import recognize_wav
elif FLAGS.api == 'tencent':
from openasr.tencent.speech_to_text import recognize_wav
else:
raise TypeError('undefined api name = {}'.format(FLAGS.api))
base.mkdir(FLAGS.outdir)
result_file = os.path.join(FLAGS.outdir, FLAGS.api, 'result_{}_of_{}.txt'.format(FLAGS.job_idx, FLAGS.job_num))
wav_dict = reader.read_txt_to_dict(FLAGS.dataset + '/wav.scp')
txt_dict = reader.read_txt_to_dict(
base.filter_text(FLAGS.dataset + '/text', base.BytesIO(), sed_filter_file)
)
# get subset
total_num = len(wav_dict)
cur_num = int(np.ceil(1.0 * total_num / FLAGS.job_num))
print(cur_num)
key_list = list(wav_dict.keys())[FLAGS.job_idx * cur_num: FLAGS.job_idx * cur_num + cur_num]
print('process {} of {} utterances'.format(len(key_list), len(wav_dict)))
res_dict = load_all_results()
print('load {} cached results.'.format(len(res_dict)))
def hypos_refer_iter():
for key in key_list:
refer_text = txt_dict[key]
recog_text = res_dict.get(key, None)
if FLAGS.rerun_all:
recog_text = None
for retry_times in range(10):
if recog_text is None or should_retry(recog_text, refer_text):
if retry_times > 0:
print('waiting 10 seconds before retrying ...')
time.sleep(10.0) # sleep 10 seconds
tmp_wav = audio_to_wav(wav_dict[key], os.path.join(FLAGS.outdir, 'wav', key + '.wav'))
recog_text = recognize_wav(tmp_wav)
else:
break
res_dict[key] = recog_text
reader.write_dict_to_txt(res_dict, result_file)
hypos = recog_text.lower()
hypos_filter = base.filter_text(base.StringIO(hypos), base.BytesIO(), sed_filter_file)
yield reader.stream2str(hypos_filter), refer_text, key
compute_wer(
hypos_refer_iter(),
special_word='<?>',
output_stream=base.Logger(os.path.join(FLAGS.outdir, FLAGS.api,
'recog_{}_of_{}.log'.format(FLAGS.job_idx, FLAGS.job_num)),
'wt'))
if __name__ == '__main__':
app.run(main)
| [
"asrlib.utils.base.StringIO",
"collections.OrderedDict",
"asrlib.utils.reader.read_txt_to_dict",
"numpy.ceil",
"absl.flags.DEFINE_bool",
"absl.flags.DEFINE_integer",
"asrlib.utils.reader.write_dict_to_txt",
"os.path.join",
"absl.app.run",
"time.sleep",
"asrlib.utils.audio.parse_wav_line",
"asr... | [((214, 283), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""', '"""testdata/dataset"""', '"""the dataset dir"""'], {}), "('dataset', 'testdata/dataset', 'the dataset dir')\n", (233, 283), False, 'from absl import logging, app, flags\n'), ((284, 355), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""outdir"""', '"""/tmp/out_of_open_asr"""', '"""the output dir"""'], {}), "('outdir', '/tmp/out_of_open_asr', 'the output dir')\n", (303, 355), False, 'from absl import logging, app, flags\n'), ((356, 430), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""api"""', '"""tencent"""', '"""baidu / google / xunfei / tencent"""'], {}), "('api', 'tencent', 'baidu / google / xunfei / tencent')\n", (375, 430), False, 'from absl import logging, app, flags\n'), ((431, 523), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""path_prefix"""', '"""testdata/dataset/"""', '"""add to the head of audio path"""'], {}), "('path_prefix', 'testdata/dataset/',\n 'add to the head of audio path')\n", (450, 523), False, 'from absl import logging, app, flags\n'), ((520, 558), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""job_num"""', '(1)', '""""""'], {}), "('job_num', 1, '')\n", (540, 558), False, 'from absl import logging, app, flags\n'), ((559, 597), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""job_idx"""', '(0)', '""""""'], {}), "('job_idx', 0, '')\n", (579, 597), False, 'from absl import logging, app, flags\n'), ((598, 683), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""rerun_empty"""', '(False)', '"""retry if the recognized text is empty"""'], {}), "('rerun_empty', False, 'retry if the recognized text is empty'\n )\n", (615, 683), False, 'from absl import logging, app, flags\n'), ((679, 764), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""rerun_error"""', '(True)', '"""retry if the recognition cause an error"""'], {}), "('rerun_error', True,\n 'retry if the recognition cause an error')\n", (696, 764), False, 'from absl import logging, app, flags\n'), ((761, 849), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""rerun_short"""', '(False)', '"""retry if the recognized text is too short"""'], {}), "('rerun_short', False,\n 'retry if the recognized text is too short')\n", (778, 849), False, 'from absl import logging, app, flags\n'), ((846, 910), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""rerun_all"""', '(False)', '"""rerun all the sentences"""'], {}), "('rerun_all', False, 'rerun all the sentences')\n", (863, 910), False, 'from absl import logging, app, flags\n'), ((1298, 1328), 'asrlib.utils.audio.parse_wav_line', 'audio.parse_wav_line', (['wav_line'], {}), '(wav_line)\n', (1318, 1328), False, 'from asrlib.utils import base, reader, audio\n'), ((1370, 1383), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1381, 1383), False, 'from collections import OrderedDict\n'), ((2577, 2601), 'asrlib.utils.base.mkdir', 'base.mkdir', (['FLAGS.outdir'], {}), '(FLAGS.outdir)\n', (2587, 2601), False, 'from asrlib.utils import base, reader, audio\n'), ((2734, 2785), 'asrlib.utils.reader.read_txt_to_dict', 'reader.read_txt_to_dict', (["(FLAGS.dataset + '/wav.scp')"], {}), "(FLAGS.dataset + '/wav.scp')\n", (2757, 2785), False, 'from asrlib.utils import base, reader, audio\n'), ((4662, 4675), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (4669, 4675), False, 'from absl import logging, app, flags\n'), ((1407, 1459), 'os.path.join', 'os.path.join', (['FLAGS.outdir', 'FLAGS.api', '"""result*.txt"""'], {}), "(FLAGS.outdir, FLAGS.api, 'result*.txt')\n", (1419, 1459), False, 'import os\n'), ((2981, 3021), 'numpy.ceil', 'np.ceil', (['(1.0 * total_num / FLAGS.job_num)'], {}), '(1.0 * total_num / FLAGS.job_num)\n', (2988, 3021), True, 'import numpy as np\n'), ((1527, 1553), 'asrlib.utils.reader.read_txt_to_dict', 'reader.read_txt_to_dict', (['f'], {}), '(f)\n', (1550, 1553), False, 'from asrlib.utils import base, reader, audio\n'), ((2876, 2890), 'asrlib.utils.base.BytesIO', 'base.BytesIO', ([], {}), '()\n', (2888, 2890), False, 'from asrlib.utils import base, reader, audio\n'), ((4081, 4128), 'asrlib.utils.reader.write_dict_to_txt', 'reader.write_dict_to_txt', (['res_dict', 'result_file'], {}), '(res_dict, result_file)\n', (4105, 4128), False, 'from asrlib.utils import base, reader, audio\n'), ((4213, 4233), 'asrlib.utils.base.StringIO', 'base.StringIO', (['hypos'], {}), '(hypos)\n', (4226, 4233), False, 'from asrlib.utils import base, reader, audio\n'), ((4235, 4249), 'asrlib.utils.base.BytesIO', 'base.BytesIO', ([], {}), '()\n', (4247, 4249), False, 'from asrlib.utils import base, reader, audio\n'), ((3958, 3980), 'openasr.tencent.speech_to_text.recognize_wav', 'recognize_wav', (['tmp_wav'], {}), '(tmp_wav)\n', (3971, 3980), False, 'from openasr.tencent.speech_to_text import recognize_wav\n'), ((4286, 4317), 'asrlib.utils.reader.stream2str', 'reader.stream2str', (['hypos_filter'], {}), '(hypos_filter)\n', (4303, 4317), False, 'from asrlib.utils import base, reader, audio\n'), ((3781, 3797), 'time.sleep', 'time.sleep', (['(10.0)'], {}), '(10.0)\n', (3791, 3797), False, 'import time\n'), ((3876, 3923), 'os.path.join', 'os.path.join', (['FLAGS.outdir', '"""wav"""', "(key + '.wav')"], {}), "(FLAGS.outdir, 'wav', key + '.wav')\n", (3888, 3923), False, 'import os\n')] |
# Standard Library
import pandas as pd
import statistics as st
import numpy as np
import imdb
from datetime import datetime
from datetime import timedelta
import multiprocessing
import json
import time
import re
import random
import matplotlib.pyplot as plt
# Email Library
from email.mime.text import MIMEText as text
import smtplib
# Scraper Library
from bs4 import BeautifulSoup as soup # HTML data structure
from urllib.request import urlopen as uReq # Web client
from lxml import etree
# Twitter API Library
import tweepy
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from tweepy import Stream
# Kafka Library
from kafka import KafkaProducer
from kafka import KafkaConsumer
# Pymongo Library
from pymongo import MongoClient
from pprint import pprint
# Pre-processing Library
from difflib import SequenceMatcher
import string
import unicodedata
import nltk
import contractions
import inflect
# Sentiment Analysis Library
from textblob import TextBlob
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import *
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.oauth2 import service_account
# No Warning
pd.options.mode.chained_assignment = None
#! YOU NEED ALSO HTMLIB5 INSTALLED, NOT NEED TO IMPORT IT
#!! YOU NEED TO HAVE COMPLETED NTLK INSTALLATION, INCLUDING "ntlk-download()"
def movie_title():
#selects yearly calendar
exit = 0
while exit != 1:
while True:
year_selected = str(input('Select which release calendar you want [2019/2020]: '))
if year_selected not in ("2019", "2020"):
print("Sorry, you selected a year out of range.")
continue
else:
break
while True:
print("You selected: "+year_selected+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
exit = 1
break
print("Please wait...")
# URl to web scrap from.
page_url = "https://www.firstshowing.net/schedule"+year_selected
# opens the connection and downloads html page from url
uClient = uReq(page_url)
# parses html into a soup data structure to traverse html
# as if it were a json data type.
page_soup = soup(uClient.read(), "html.parser")
uClient.close()
# finds the container from the page
#containers = page_soup.findAll("p", {"class": "sched"})
containers = page_soup.findAll("div", {"class": "schedcontent"})
#How many movies are releasing from 20 dec to 17 jan? (variable)
#Create a dataframe which contains all movies release dates
movie_dates_list = []
datecontainer = page_soup.findAll("h4")
y=0
for container in datecontainer:
date = container.strong.text
y += 1
movie_dates_list.append([date])
movie_dates = pd.DataFrame(movie_dates_list, columns=["dates"])
movie_dates = movie_dates.drop(movie_dates.tail(1).index)
pd.set_option('display.max_rows', movie_dates.shape[0]+1)
display(movie_dates)
exit = 0
while exit != 1:
while True:
while True:
try:
movie_index_start = int(input('Enter index of start date: '))
break
except ValueError:
print("Cannot enter null or string value.")
if movie_index_start > len(movie_dates)-1:
print("Sorry, you selected an index out of range.")
continue
else:
break
while True:
print("You selected: "+movie_dates.iloc[movie_index_start]["dates"]+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
start_date_input = movie_dates.iloc[movie_index_start]["dates"]
exit = 1
break
exit = 0
while exit != 1:
while True:
while True:
try:
movie_index_end = int(input('Enter index of end date: '))
break
except ValueError:
print("Cannot enter null or string value.")
if movie_index_end > len(movie_dates)-1:
print("Sorry, you selected an index out of range.")
continue
else:
if movie_index_end >= movie_index_start:
break
else:
print("You must select an end date that is after the start date selected previously.")
continue
while True:
print("You selected: "+movie_dates.iloc[movie_index_end]["dates"]+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
try:
end_date_input = movie_dates.iloc[movie_index_end+1]["dates"]
except IndexError:
end_date_input = '<h4 align="center">'
exit = 1
break
print("Please wait...")
#create list in which to store movie names
movie_titles_list = []
#Counts how many movies are releasing between the two specified dates.
start_date = start_date_input+"<"
end_date = end_date_input+"<"
html_str = str(containers)
text = html_str[html_str.index(start_date)+len(start_date):html_str.index(end_date)]
textsoup = soup(text, "html.parser")
containers_new = textsoup.findAll("a", {"class": "showTip"})
n_movies= len(textsoup.findAll("a", {"class": "showTip"}))
#Get movie names from start_date to end_date
for container in containers_new:
title = container.text
movie_titles_list.append([title])
movie_titles = pd.DataFrame(movie_titles_list, columns=["title"])
#Select one movie
display(movie_titles)
exit = 0
while exit != 1:
while True:
while True:
try:
movie_index = int(input('Select the movie of your interest. Enter index of desired movie: '))
break
except ValueError:
print("Cannot enter null or string value.")
if movie_index > len(movie_titles)-1:
print("Sorry, you selected an index out of range.")
continue
else:
break
while True:
print("You selected: "+movie_titles.iloc[movie_index]["title"]+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
selected_movie = movie_titles.iloc[movie_index]["title"]
exit = 1
break
#Now we use imdbpy to get data about the movie
ia = imdb.IMDb()
i = imdb.IMDb('http')
#Create lists for each column of the new dataframe
movie_info = pd.DataFrame( columns = ["title", "release", "genres", "top_5_cast"])
print("Please wait...")
#Get info like: movie title, release date in us, genres, top 5 actors
title = selected_movie
movies = ia.search_movie(title)
movies_id = movies[0].movieID
movie = i.get_movie(movies_id)
i.update(movie, 'release dates')
release_list = [i for i in movie["release dates"] if i.startswith('USA')]
release = []
for z in release_list:
if re.match("USA::\d+\s\w+\s\d\d\d\d$", z):
release.append(z)
genres = movie['genres']
top_5_list = []
try:
cast = movie.get('cast')
topActors = 5
for actor in cast[:topActors]:
top_5 = ("{}".format(actor['name']))
top_5_list.append(top_5)
except KeyError:
cast = np.nan
#Populate the dataframe
movie_info.at[0,"title"] = title
movie_info.at[0,"release"] = release[0].lstrip("'[USA::").rstrip("]'")
movie_info["release"] = pd.to_datetime(movie_info["release"])
movie_info.at[0,"genres"] = [', '.join(genres)][0]
movie_info.at[0, "top_5_cast"] = [', '.join(top_5_list)][0]
#Clean the data
print("Done!")
return movie_info
def hashtags(movie):
#Takes title and actors and makes them lowercase, no whitespace, etc and creates a column for each hashtag
hashtag_df_cast = movie["top_5_cast"].str.split(", ", expand=True).reindex(columns=np.arange(5)).add_prefix('actor_hashtag_')
hashtag_df_title = movie["title"].str.split(": ", expand=True).reindex(columns=np.arange(2)).add_prefix('title_hashtag_')
hashtag_df_title["title_hashtag_2"] = hashtag_df_title["title_hashtag_0"] +"movie"
hashtag_df_title["title_hashtag_3"] = movie["title"]
hashtag_df_cast = hashtag_df_cast.apply(np.vectorize(lambda x: x.lower().replace(" ", "").strip("'").replace(".", "") if(np.all(pd.notnull(x))) else x))
hashtag_df_title = hashtag_df_title.apply(np.vectorize(lambda x: x.lower().replace(" ", "").replace(":", "") if(np.all(pd.notnull(x))) else x))
hashtag_df_title.at[(hashtag_df_title["title_hashtag_1"].isnull() == True), "title_hashtag_3"] = np.nan
movie_info_hashtags = pd.concat([movie, hashtag_df_title, hashtag_df_cast], axis=1).replace(to_replace=["None"], value=np.nan)
hashtags_only_df = pd.concat([hashtag_df_title, hashtag_df_cast], axis=1).replace(to_replace=["None"], value=np.nan)
query = hashtags_only_df.T.apply(lambda x: x.dropna().tolist())[0].tolist()
mytopic = str(movie_info_hashtags.iloc[0]["title_hashtag_0"])
return movie_info_hashtags, query, mytopic
def nifi_template_changer(template, mytopic):
tree = etree.parse(template)
for elem in tree.iterfind("//*"):
#change topic
if elem.text in ("mycollectionname", "mytopicname"):
elem.text = mytopic
tree.write('new_template.xml', pretty_print=True)
def key():
consumer_key = str(input("Please type your consumer key: "))
consumer_secret = str(input("Please type your consumer secret key: "))
access_key = str(input("Please type your access key: "))
access_secret = str(input("Please type your access secret key: "))
return consumer_key, consumer_secret, access_key, access_secret
def stream(stop, keys, mytopic, query):
while True:
consumer_key = keys[0]
consumer_secret = keys[1]
access_key = keys[2]
access_secret = keys[3]
class MyListener(StreamListener):
def __init__(self, producer, producer_topic):
super().__init__()
self.producer = producer
self.producer_topic = producer_topic
def on_status(self, status):
is_retweet = hasattr(status, "retweeted_status")
is_ext = hasattr(status, "extended_tweet")
if hasattr(status,"extended_tweet"):
text = status.extended_tweet["full_text"]
else:
text = status.text
tweet = {
'user_id': status.user.id,
'username': status.user.name,
'screen_name': status.user.screen_name,
'text': text,
'which_lang' : status.lang,
'is_RT' : is_retweet,
'is_EXT' : is_ext,
}
self.producer.send(topic=self.producer_topic, value=tweet)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
producer = KafkaProducer(
bootstrap_servers=["kafka:9092"],
value_serializer=lambda v: json.dumps(v).encode("utf-8"))
listener = MyListener(producer = producer, producer_topic = mytopic)
stream = Stream(auth=api.auth,tweet_mode='extended',listener=listener)
stream.filter(track=query, languages=["en"])
if stop():
break
def ask_time():
while True:
try:
timeout_input = int(input("For how long do you want to collect tweets? (in minutes - integer): "))
except ValueError:
print("Sorry, you have not inputed an integer number.")
continue
break
timeout_mins = timeout_input
timeout = timeout_mins*60
return timeout
def get_email():
while True:
print("Do you wanna receive an email when the collection completes?")
yes_no = str(input('[y/n]: '))
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
if yes_no == "n":
exit=1
email = 0
else:
exit=0
while exit != 1:
while True:
email_entered = str(input("Please type in your email: "))
print("You entered: "+email_entered+". Is that correct?")
yes_no_mail = input('[y/n]: ')
if (yes_no_mail == "y") or (yes_no_mail == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no_mail == "n":
is_looping = False
print("c")
break
else:
email = email_entered
exit = 1
break
return email
def send_email_finish(email):
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login("<EMAIL>", "DataMan2019!")
m = text("The tweet collector have finished,\nNow you can check on Mongodb typing: \nlocalhost:8081 on your local browser.")
m['Subject'] = '***TWEET COLLECTION FINISHED***'
m['From'] = "<EMAIL>"
m['To'] = email
server.sendmail(
"<EMAIL>",
email,
m.as_string())
server.quit()
def starter(timeout, keys, mytopic, query, email):
stop_process = False
streamf = multiprocessing.Process(target = stream, args =(lambda : stop_threads, keys, mytopic, query))
streamf.daemon = True
streamf.start()
time.sleep(timeout)
stop_process = True
streamf.terminate()
if email!=0:
send_email_finish(email)
streamf.join()
print('Process killed')
print("Is stream alive?")
print(streamf.is_alive())
def get_database_coll():
#Variable with client info
client = MongoClient('mongo', 27017, username='admin', password='<PASSWORD>!')
#Choose database
print('List of all databases in mongodb')
db_names= pd.DataFrame(client.list_database_names(), columns = ["db_name"])
display(db_names)
exit = 0
while exit != 1:
while True:
while True:
try:
db_index = int(input('Select the database of your interest. Enter index of desired db: '))
break
except ValueError:
print("Cannot enter null or string value.")
if db_index > len(db_names)-1:
print("Sorry, you selected an index out of range.")
continue
else:
break
while True:
print("You selected: "+db_names.iloc[db_index]["db_name"]+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
selected_db = db_names.iloc[db_index]["db_name"]
exit = 1
break
#Choose collection
print('List of all databases in the selected database')
coll_names= pd.DataFrame(client[selected_db].list_collection_names(), columns = ["collection_name"])
display(coll_names)
exit = 0
while exit != 1:
while True:
while True:
try:
coll_index = int(input('Select the collection of your interest. Enter index of desired collection: '))
break
except ValueError:
print("Cannot enter null or string value.")
if coll_index > len(coll_names)-1:
print("Sorry, you selected an index out of range.")
continue
else:
break
while True:
print("You selected: "+coll_names.iloc[coll_index]["collection_name"]+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
selected_coll = coll_names.iloc[coll_index]["collection_name"]
exit = 1
break
db = client[selected_db]
collection = db[selected_coll]
data_python = collection.find()
#Transform collection from json to pandas dataframe
pd.set_option('display.max_colwidth', -1)
df = pd.DataFrame(list(data_python))
return df
def clean_tweet_auto(dataframe):
#Delete metada
def remove_metadata(rows,start):
for i in range(len(rows)):
if(rows[i] == '\n'):
start = i+1
break
new_doc_start = rows[start:]
return new_doc_start
#Delete contractions
def replace_contractions(rows):
new_doc = []
for row in rows:
new_row = contractions.fix(row)
new_doc.append(new_row)
return new_doc
#Delete URL and e-mail
def remove_url_mail(rows):
new_doc = []
for row in rows:
new_row = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', row)
new_row = re.sub(r'[\w\.-]+@[\w\.-]+', '', new_row)
new_doc.append(new_row)
return new_doc
#Delete empty rows and tabs
def remove_tabs(tokens):
table= str.maketrans('','','\t\n\r')
new = [token.translate(table) for token in tokens]
return new
#Delete non unicode characters
def remove_non_unicode(tokens):
new_tokens = []
for token in tokens:
new_token = unicodedata.normalize('NFKD', token).encode('ascii', 'ignore').decode('utf-8', 'ignore')
new_tokens.append(new_token)
return new_tokens
#Delete punctuation
def remove_punctuation(tokens):
table= str.maketrans('','', string.punctuation)
new = [token.translate(table) for token in tokens]
new = [str for str in new if str]
return new
#Text stemming e lemmatization
def stem_and_lem(tokens):
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
stemmed = [stemmer.stem(token) for token in tokens]
lemmatized = [lemmatizer.lemmatize(token, pos='v') for token in tokens]
return stemmed,lemmatized
# pre processing,take only non-retweet
df = dataframe.loc[~dataframe.is_RT == True]
#Delete tweets that contains an http (usually ads)
df['indexes'] = df['text'].str.find('http')
df = df.loc[~df.indexes > -1]
array_text = df.text.values
array_text = remove_metadata(array_text,0)
array_text = replace_contractions(array_text)
array_text = remove_url_mail(array_text)
array_text = remove_tabs(array_text)
array_text = remove_non_unicode(array_text)
array_text = remove_punctuation(array_text)
print('\nAll tweets are clean')
return array_text
def which_sentiment():
exit = 0
while exit != 1:
while True:
print("You can use Textblob sentiment analyzer or Google NLU service. Which one do you prefer?")
sentiment_selected = str(input('[textblob/google]: '))
if sentiment_selected not in ("textblob", "google"):
print("Sorry, you must select one of the two services.")
continue
else:
break
while True:
print("You selected: "+sentiment_selected+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
sentiment_type = sentiment_selected
exit = 1
break
return sentiment_type
def sentiment_textblob(array):
print('\nThere are',len(array),'tweets in your database\n')
df_result=pd.DataFrame()
exit = 0
while exit != 1:
while True:
while True:
try:
num_tweet = int(input('\nHow many tweet you wanna analyze?\n'))
break
except ValueError:
print("Cannot enter null or string value.")
if num_tweet < 0:
print("Sorry, you selected a negative number of tweets.")
continue
elif num_tweet > len(array):
print("Sorry, you only have "+len(array)+" in your collection.")
else:
break
while True:
print('\nDo you want do a random sampling?')
yes_no = input('[y/n]: ')
if yes_no in ("y", "n"):
if (yes_no == "y"):
array=random.choices(array, k=num_tweet)
break
else:
print("Ok. No random sampling.")
break
else:
print("Sorry, you did not enter y or n.")
continue
exit = 1
for i in range(len(array)):
text = TextBlob(array[i])
df_result = df_result.append({'score': text.sentiment.polarity, 'magnitude' : text.sentiment.subjectivity}, ignore_index=True)
return df_result
def google_analyze_tweet(array):
print('\nThere are',len(array),'tweets in your database\n')
df_result = pd.DataFrame()
print('***IMPORTANT***\n')
print('You need copy and paste your google credentials.json in my-data folder before continuing!\n')
while True:
print("Type [y] when you have done it: ")
yes = input('[y]: ')
if (yes == "y"):
break
else:
print("Sorry, you did not enter y.")
continue
while True:
name_cred = str(input("\nPlease insert the name of your Google credential file: \n"))
dire_cred = '/data/my-data/' + name_cred + '.json'
#Does the file exists in my:data?
try:
with open(dire_cred) as f:
# google credentials
creds = service_account.Credentials.from_service_account_file(dire_cred)
client = language.LanguageServiceClient(credentials=creds)
exit = 0
while exit != 1:
while True:
while True:
try:
num_tweet = int(input('\nHow many tweet you wanna analyze?\n'))
break
except ValueError:
print("Cannot enter null or string value.")
if num_tweet < 0:
print("Sorry, you selected a negative number of tweets.")
continue
elif num_tweet > len(array):
print("Sorry, you only have "+len(array)+" in your collection.")
else:
break
while True:
print('\nDo you want do a random sampling?')
yes_no = input('[y/n]: ')
if yes_no in ("y", "n"):
if (yes_no == "y"):
array=random.choices(array, k=num_tweet)
break
else:
print("Ok. No random sampling.")
break
else:
print("Sorry, you did not enter y or n.")
continue
exit = 1
#Analyzing tweets
for i in range(num_tweet):
text = array[i]
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
sentiment = client.analyze_sentiment(document=document).document_sentiment
df_result = df_result.append({'score': sentiment.score, 'magnitude' : sentiment.magnitude}, ignore_index=True)
break
except IOError:
print("File not accessible or not existing in that directory.")
print('You need copy and paste your google credentials.json in my-data folder.\n')
print("Please check if the file is accessible and if the filename is correct.")
continue
return df_result
#Matcher to find the correct movie title
def matcher (df, title):
return SequenceMatcher(None, title, df["Release"]).ratio()
def box_office(selected_movie_title, selected_movie_date):
print("Please wait...")
#Creates empty list
boxoff_week = pd.DataFrame(columns = ['Release', 'Gross'])
#Checks if 8 days has passed from the release
current_day = pd.to_datetime(datetime.now().date())
if (current_day - selected_movie_date).days < 8:
print("Sorry. Not enough days has passed to aggregate 7 days data.")
else:
#Scrape data from boxofficemojo for the 7 days after the tweets collection
try:
delta_day = 2
selected_boxoff = 0
while delta_day < 9:
boxoff_daily = pd.read_html("https://www.boxofficemojo.com/date/"+(selected_movie_date+timedelta(days=delta_day)).strftime('%Y-%m-%d'))[0]
boxoff_daily = boxoff_daily[["Release", "Daily"]]
boxoff_daily["titlematch"] = boxoff_daily.apply(matcher, args=[selected_movie_title], axis=1)
boxoff_daily['Daily'] = boxoff_daily['Daily'].str.replace(',', '')
boxoff_daily['Daily'] = boxoff_daily['Daily'].str.replace('$', '')
boxoff_daily['Daily'] = boxoff_daily['Daily'].astype(int)
selected_boxoff += boxoff_daily.loc[boxoff_daily["titlematch"] == boxoff_daily["titlematch"].max(),"Daily"].values[0]
delta_day+=1
boxoff_week.at[0,"Release"] = selected_movie_title
boxoff_week.at[0,"Gross"] = selected_boxoff
return boxoff_week
except ValueError:
print("No data found on boxofficemojo.com.")
#____________________________________________________________________________________________________________________
#FULL FUNCTIONS
def tweet_collector():
print("Please start the following services via shell:")
print("zookeeper, kafka, mongo, nifi")
while True:
print("Type [y] when you're ready: ")
yes = input('[y]: ')
if (yes == "y"):
break
else:
print("Sorry, you did not enter y.")
continue
movies = movie_title()
movies_info_hashtags, query, mytopic = hashtags(movies)
print("The topic for kafka and nifi is: "+str(mytopic))
print("Do you want to create a nifi template with correct topic names?")
while True:
yes_no = input('[y/n]: ')
if yes_no in ("y", "n"):
if (yes_no == "y"):
nifi_template_changer("template.xml", mytopic)
print("Please upload the template to nifi and start all services.")
break
else:
print("Ok. Remember to check you nifi template's settings and to start all services.")
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
print("Type [y] when you're ready: ")
yes = input('[y]: ')
if (yes == "y"):
break
else:
print("Sorry, you did not enter y.")
continue
timeout = ask_time()
keys = key()
email = get_email()
print("Are you ready to start tweets' collection?")
while True:
print("Type [y] when you're ready: ")
yes = input('[y]: ')
if (yes == "y"):
break
else:
print("Sorry, you did not enter y.")
continue
starter(timeout, keys, mytopic, query, email)
def sentiment():
#Asks the user which sentiment service wants to use
sentiment_type = which_sentiment()
#Returns the weighted geometric mean of the score*magnitude for the selected collection
tweet_df = get_database_coll()
tweets_array = clean_tweet_auto(tweet_df)
if sentiment_type == "textblob":
sentiment_df = sentiment_textblob(tweets_array)
mean_magnitude = sentiment_df.magnitude.mean()
mean_sentiment = sentiment_df.score.mean()
mean_sentiment_perc_pos = len(sentiment_df[sentiment_df.score >= 0])/len(sentiment_df)
std_magnitude = round(statistics.stdev(sentiment_df.magnitude),4)
std_score = round(statistics.stdev(sentiment_df.score),4)
else:
sentiment_df = google_analyze_tweet(tweets_array)
mean_magnitude = sentiment_df.magnitude.mean()
mean_sentiment = sentiment_df.score.mean()
mean_sentiment_perc_pos = len(sentiment_df[sentiment_df.score >= 0])/len(sentiment_df)
std_magnitude = round(st.stdev(sentiment_df.magnitude),4)
std_score = round(st.stdev(sentiment_df.score),4)
return mean_sentiment, mean_magnitude, mean_sentiment_perc_pos, std_score, std_magnitude
def sentiment_boxoffice_all():
boxoffice_sentiment_all = pd.DataFrame()
exit = 0
while exit!=1:
selected_movie = movie_title()
selected_movie_title = selected_movie.iloc[0]["title"]
selected_movie_date = selected_movie.iloc[0]["release"]
print("Please wait...")
try:
boxoffice_sentiment_data = box_office(selected_movie_title, selected_movie_date)
boxoffice_sentiment_data = boxoffice_sentiment_data[["Release", "Gross"]]
boxoffice_sentiment_data["Genres"] = selected_movie.iloc[0]["genres"]
boxoffice_sentiment_data["sentiment_Avg"], boxoffice_sentiment_data["magnitude_Avg"], boxoffice_sentiment_data["sentiment_pos_percentage"], boxoffice_sentiment_data["sentiment_std_score"], boxoffice_sentiment_data["sentiment_std_magnitude"] = sentiment()
boxoffice_sentiment_data["sentiment_neg_percentage"] = 1 - boxoffice_sentiment_data["sentiment_pos_percentage"]
boxoffice_sentiment_all = boxoffice_sentiment_all.append(boxoffice_sentiment_data, ignore_index=True)
except TypeError:
print("No data found.")
print("Do you want to add more movies?")
while True:
yes_no = input('[y/n]: ')
if yes_no in ("y", "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if (yes_no == "y"):
print("Ok. Please select another movie to add.")
break
else:
print("Ok.")
exit = 1
break
return boxoffice_sentiment_all
def spearman_corr(df):
corr_matrix = df.corr(method="spearman")
return corr_matrix
| [
"statistics.stdev",
"google.cloud.language.LanguageServiceClient",
"smtplib.SMTP_SSL",
"multiprocessing.Process",
"time.sleep",
"random.choices",
"pymongo.MongoClient",
"datetime.timedelta",
"pandas.notnull",
"pandas.to_datetime",
"numpy.arange",
"textblob.TextBlob",
"google.oauth2.service_a... | [((2499, 2513), 'urllib.request.urlopen', 'uReq', (['page_url'], {}), '(page_url)\n', (2503, 2513), True, 'from urllib.request import urlopen as uReq\n'), ((3213, 3262), 'pandas.DataFrame', 'pd.DataFrame', (['movie_dates_list'], {'columns': "['dates']"}), "(movie_dates_list, columns=['dates'])\n", (3225, 3262), True, 'import pandas as pd\n'), ((3329, 3388), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(movie_dates.shape[0] + 1)'], {}), "('display.max_rows', movie_dates.shape[0] + 1)\n", (3342, 3388), True, 'import pandas as pd\n'), ((6289, 6314), 'bs4.BeautifulSoup', 'soup', (['text', '"""html.parser"""'], {}), "(text, 'html.parser')\n", (6293, 6314), True, 'from bs4 import BeautifulSoup as soup\n'), ((6621, 6671), 'pandas.DataFrame', 'pd.DataFrame', (['movie_titles_list'], {'columns': "['title']"}), "(movie_titles_list, columns=['title'])\n", (6633, 6671), True, 'import pandas as pd\n'), ((7877, 7888), 'imdb.IMDb', 'imdb.IMDb', ([], {}), '()\n', (7886, 7888), False, 'import imdb\n'), ((7897, 7914), 'imdb.IMDb', 'imdb.IMDb', (['"""http"""'], {}), "('http')\n", (7906, 7914), False, 'import imdb\n'), ((7987, 8053), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['title', 'release', 'genres', 'top_5_cast']"}), "(columns=['title', 'release', 'genres', 'top_5_cast'])\n", (7999, 8053), True, 'import pandas as pd\n'), ((8981, 9018), 'pandas.to_datetime', 'pd.to_datetime', (["movie_info['release']"], {}), "(movie_info['release'])\n", (8995, 9018), True, 'import pandas as pd\n'), ((10649, 10670), 'lxml.etree.parse', 'etree.parse', (['template'], {}), '(template)\n', (10660, 10670), False, 'from lxml import etree\n'), ((14430, 14469), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['"""smtp.gmail.com"""', '(465)'], {}), "('smtp.gmail.com', 465)\n", (14446, 14469), False, 'import smtplib\n'), ((14522, 14654), 'email.mime.text.MIMEText', 'text', (['"""The tweet collector have finished,\nNow you can check on Mongodb typing: \nlocalhost:8081 on your local browser."""'], {}), '(\n """The tweet collector have finished,\nNow you can check on Mongodb typing: \nlocalhost:8081 on your local browser."""\n )\n', (14526, 14654), True, 'from email.mime.text import MIMEText as text\n'), ((14929, 15023), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'stream', 'args': '(lambda : stop_threads, keys, mytopic, query)'}), '(target=stream, args=(lambda : stop_threads, keys,\n mytopic, query))\n', (14952, 15023), False, 'import multiprocessing\n'), ((15073, 15092), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (15083, 15092), False, 'import time\n'), ((15368, 15437), 'pymongo.MongoClient', 'MongoClient', (['"""mongo"""', '(27017)'], {'username': '"""admin"""', 'password': '"""<PASSWORD>!"""'}), "('mongo', 27017, username='admin', password='<PASSWORD>!')\n", (15379, 15437), False, 'from pymongo import MongoClient\n'), ((21862, 21876), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21874, 21876), True, 'import pandas as pd\n'), ((23307, 23321), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (23319, 23321), True, 'import pandas as pd\n'), ((26690, 26732), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Release', 'Gross']"}), "(columns=['Release', 'Gross'])\n", (26702, 26732), True, 'import pandas as pd\n'), ((31219, 31233), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (31231, 31233), True, 'import pandas as pd\n'), ((8461, 8508), 're.match', 're.match', (['"""USA::\\\\d+\\\\s\\\\w+\\\\s\\\\d\\\\d\\\\d\\\\d$"""', 'z'], {}), "('USA::\\\\d+\\\\s\\\\w+\\\\s\\\\d\\\\d\\\\d\\\\d$', z)\n", (8469, 8508), False, 'import re\n'), ((12428, 12471), 'tweepy.OAuthHandler', 'OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (12440, 12471), False, 'from tweepy import OAuthHandler\n'), ((12543, 12559), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (12553, 12559), False, 'import tweepy\n'), ((12807, 12870), 'tweepy.Stream', 'Stream', ([], {'auth': 'api.auth', 'tweet_mode': '"""extended"""', 'listener': 'listener'}), "(auth=api.auth, tweet_mode='extended', listener=listener)\n", (12813, 12870), False, 'from tweepy import Stream\n'), ((18189, 18230), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(-1)'], {}), "('display.max_colwidth', -1)\n", (18202, 18230), True, 'import pandas as pd\n'), ((23018, 23036), 'textblob.TextBlob', 'TextBlob', (['array[i]'], {}), '(array[i])\n', (23026, 23036), False, 'from textblob import TextBlob\n'), ((30973, 31005), 'statistics.stdev', 'st.stdev', (['sentiment_df.magnitude'], {}), '(sentiment_df.magnitude)\n', (30981, 31005), True, 'import statistics as st\n'), ((31031, 31059), 'statistics.stdev', 'st.stdev', (['sentiment_df.score'], {}), '(sentiment_df.score)\n', (31039, 31059), True, 'import statistics as st\n'), ((10172, 10233), 'pandas.concat', 'pd.concat', (['[movie, hashtag_df_title, hashtag_df_cast]'], {'axis': '(1)'}), '([movie, hashtag_df_title, hashtag_df_cast], axis=1)\n', (10181, 10233), True, 'import pandas as pd\n'), ((10300, 10354), 'pandas.concat', 'pd.concat', (['[hashtag_df_title, hashtag_df_cast]'], {'axis': '(1)'}), '([hashtag_df_title, hashtag_df_cast], axis=1)\n', (10309, 10354), True, 'import pandas as pd\n'), ((18699, 18720), 'contractions.fix', 'contractions.fix', (['row'], {}), '(row)\n', (18715, 18720), False, 'import contractions\n'), ((18906, 18982), 're.sub', 're.sub', (['"""\\\\w+:\\\\/{2}[\\\\d\\\\w-]+(\\\\.[\\\\d\\\\w-]+)*(?:(?:\\\\/[^\\\\s/]*))*"""', '""""""', 'row'], {}), "('\\\\w+:\\\\/{2}[\\\\d\\\\w-]+(\\\\.[\\\\d\\\\w-]+)*(?:(?:\\\\/[^\\\\s/]*))*', '', row)\n", (18912, 18982), False, 'import re\n'), ((18997, 19041), 're.sub', 're.sub', (['"""[\\\\w\\\\.-]+@[\\\\w\\\\.-]+"""', '""""""', 'new_row'], {}), "('[\\\\w\\\\.-]+@[\\\\w\\\\.-]+', '', new_row)\n", (19003, 19041), False, 'import re\n'), ((26508, 26551), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'title', "df['Release']"], {}), "(None, title, df['Release'])\n", (26523, 26551), False, 'from difflib import SequenceMatcher\n'), ((24005, 24069), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['dire_cred'], {}), '(dire_cred)\n', (24058, 24069), False, 'from google.oauth2 import service_account\n'), ((24095, 24144), 'google.cloud.language.LanguageServiceClient', 'language.LanguageServiceClient', ([], {'credentials': 'creds'}), '(credentials=creds)\n', (24125, 24144), False, 'from google.cloud import language\n'), ((26818, 26832), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26830, 26832), False, 'from datetime import datetime\n'), ((9420, 9432), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (9429, 9432), True, 'import numpy as np\n'), ((9546, 9558), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (9555, 9558), True, 'import numpy as np\n'), ((22691, 22725), 'random.choices', 'random.choices', (['array'], {'k': 'num_tweet'}), '(array, k=num_tweet)\n', (22705, 22725), False, 'import random\n'), ((25742, 25807), 'google.cloud.language.types.Document', 'types.Document', ([], {'content': 'text', 'type': 'enums.Document.Type.PLAIN_TEXT'}), '(content=text, type=enums.Document.Type.PLAIN_TEXT)\n', (25756, 25807), False, 'from google.cloud.language import types\n'), ((9865, 9878), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (9875, 9878), True, 'import pandas as pd\n'), ((10013, 10026), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (10023, 10026), True, 'import pandas as pd\n'), ((12680, 12693), 'json.dumps', 'json.dumps', (['v'], {}), '(v)\n', (12690, 12693), False, 'import json\n'), ((19430, 19466), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'token'], {}), "('NFKD', token)\n", (19451, 19466), False, 'import unicodedata\n'), ((25222, 25256), 'random.choices', 'random.choices', (['array'], {'k': 'num_tweet'}), '(array, k=num_tweet)\n', (25236, 25256), False, 'import random\n'), ((27271, 27296), 'datetime.timedelta', 'timedelta', ([], {'days': 'delta_day'}), '(days=delta_day)\n', (27280, 27296), False, 'from datetime import timedelta\n')] |
from simulator.utils.basic_utils import *
import numpy as np
import pandas as pd
from operator import itemgetter
from itertools import groupby
def rmse(x, y):
x, y = new_array(x), new_array(y)
return np.sqrt(np.mean((x-y)**2))
def row_norm(mat):
""" Compute the norm of a set of vectors, each of which is the row of a
numpy matrix.
:param matrix mat: The matrix that contains the vectors
:return array: The norm of each row
"""
mat = mat.values if is_pandas(mat) else np.array(mat)
return np.sum(np.abs(mat)**2,axis=-1)**(1./2)
def row_dot(mat1, mat2):
""" Compute dot product for a set of vectors, each of which is the row
of a numpy matrix
:param matrix mat1: The matrix that contains one set of vectors
:param matrix mat2: The matrix that contains the other set of vectors
:return array: The dot of the rows
"""
mat1 = mat1.values if is_pandas(mat1) else np.array(mat1)
mat2 = mat2.values if is_pandas(mat2) else np.array(mat2)
return np.einsum('ij,ij->i', mat1, mat2)
def row_angle(mat1, mat2, units='deg'):
angle = np.arccos(row_dot(mat1, mat2)/(row_norm(mat1)*row_norm(mat2)))
if units == 'rad':
return angle
elif units == 'deg':
return np.rad2deg(angle)
else:
raise ValueError('Units {} for an angle are not valid. Choices are deg or rad')
def group_consecutives(vals, just_ends=False):
"""Return list of consecutive lists of numbers from vals (number list).
From: https://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
:param list/tuple/np.array vals:
:param bool just_ends: Default is False, see examples
Example 1: vals = [2, 3, 4, 5, 12, 13, 14, 15, 16, 17]
res = group_consecutives(vals)
res = [(2, 3, 4, 5), (12, 13, 14, 15, 16, 17)]
Example 2: vals = [2, 3, 4, 5, 12, 13, 14, 15, 16, 17]
res = group_consecutives(vals, just_ends=True)
res = [(2, 5), (12, 17)]
"""
if just_ends:
def f(g):
group = list(map(itemgetter(1), g))
return (group[0], group[-1])
else: f = lambda g: tuple(map(itemgetter(1), g))
return [f(g) for k, g in groupby(enumerate(vals), lambda x: x[0]-x[1])]
def find_consecutive(vals, val=None):
""" Find indices of consecutive occurences of ``val`` within the array
vals. Indices are always inclusive. If ``val`` is not provided, then
all values present in array are returned
Example 1: vals = [1,1,2,2,2,1,1]
val = 1
find_consecutive(vals, val) = [[0,1],[5,6]]
Example 2: vals = [1,1,2,2,2,1,1]
find_consecutive(vals) = {1: [[0,1],[5,6]], 2: [[2, 4]]}
:param np.array/pd.DataFrame/pd.Series vals: Array of values
:param val: Value to search for
:return np.array or dict of np.arrays
"""
# Check inputs
vals = vals.values if is_pandas(vals) else vals
vals = vals.transpose()[0] if is_column(vals) else vals
# If val is provided, just use it
if val is not None: return _find_consecutive(vals, val)
# Find all possible vals
return {val: _find_consecutive(vals, val) for val in np.unique(vals)}
def _find_consecutive(vals, val):
""" See ``find_consecutive`` """
# Work magic
isval = np.concatenate(([False], np.equal(vals, val).view(np.int8), [False]))
absdiff = np.abs(np.diff(isval))
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
ranges[:,1] = ranges[:,1]-1
return ranges.tolist()
def combvec(*args):
""" Fast implementation of Matlab's combvec function in Python. In a nutshell:
vals = combvec([1,2,3], [4,5,6])
vals = [[1,4], [1,5], [1,6], ..., [3,4], [3,5], [3,6]]
To use combvec, simply
def eval_options(var1, var2, var3):
for v1, v2, v3 in combvec(var1, var2, var3):
....
.. Tip:: For Matlab, see `<https://www.mathworks.com/help/nnet/ref/combvec.html?searchHighlight=combvec&s_tid=doc_srchtitle>`_
.. Tip:: Implementation is based on `<https://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays>`_
"""
N = len(args)
args = [np.atleast_1d(arg) for arg in args]
idxs = [range(len(arg)) for arg in args]
opts = np.array(np.meshgrid(*idxs), dtype=int).T.reshape(-1,N)
vals = [[args[i][o] for i, o in enumerate(opt)] for opt in opts]
return vals
def prctile(vals, q=[50, 75, 95, 99, 100], axis=0):
data = vals.values if is_pandas(vals) else vals
q = new_iterable(q)
stats = np.nanpercentile(data, q=q, axis=axis)
if is_pandas(vals):
df = pd.DataFrame(stats, index=q, columns=vals.columns)
else:
df = pd.DataFrame(stats, index=q)
return df
def isinteger(x, rtol=1e-05, atol=1e-08, equal_nan=False):
""" Check if value x is an integer. Rounding error can be circumvented
See https://docs.scipy.org/doc/numpy-1.10.4/reference/generated/numpy.isclose.html
See https://stackoverflow.com/questions/21583758/how-to-check-if-a-float-value-is-a-whole-number
"""
return np.isclose(x, np.round(x).astype('int'))
def union_intervals(x, y, stacked=True):
""" Union of numeric intervals.
:param array x: Start time of the intervals
:param array y: End time of the intervals
:param bool stacked: Return as matrix
.. code:: python
>> union_intervals([1, 2, 15], [10, 11, 20])
>> array([[ 1, 11],
[15, 20]])
>> union_intervals([1, 2, 15], [10, 11, 20], stacked=False)
>> (array([ 1, 15]), array([11, 20]))
.. Danger:: The sorting algorithm used is "mergesort" because it is
the only one in numpy that is stable (see
https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.sort.html)
Note that Matlab uses a stable version of "quicksort"
"""
# Initialize variables
N = len(x)
# Sort values
x = np.append(x, y)
p = np.argsort(x, kind='mergesort')
t = x[p]
# Work magic based on Matlab's `union_sets_intervals`
z = np.cumsum(np.bincount(np.arange(2*N), weights=2*((p+1)<=N)-1))
z1 = np.append(0, z[0:-1])
x, y = t[(z1==0)&(z>0)], t[(z1>0)&(z==0)]
# If no need to stack, return immediately
if not stacked: return x, y
return np.stack((x, y), axis=1)
def xor_intervals(lb, ub, int_s, int_e, do_union=True, sort=False):
""" xor of numeric intervals
:param num lb: Lower bound for the overall interval
:param num ub: Upper bound for the overall interval
:param list ints: List of lists/tuples with the intervals
:param bool do_union: If False, then the union of ``ints`` is
not performed. Set it to False **only**
if you can guarantee that ``ints`` do
not overlap
:param bool sort: Sort the intervals prior. If ``do_union`` is
True, then this has no effect as the union
process will do the sorting for you
>> lb, ub = 0, 10
>> ints = [[2, 3], [5, 8]]
>> int_s, int_e = zip(*ints)
>> xor_intervals(lb, ub, ints_s, int_e)
[(0, 2), (3, 5), (8, 10)]
"""
# Preprocess the intervals if needed
if do_union:
# Compute the union of the intervals
int_s, int_e = union_intervals(int_s, int_e, stacked=False)
elif sort:
# If sorting is needed, do it
idx = np.argsort(int_s)
int_s, int_e = int_s[idx], int_e[idx]
# Compute xor
sint = np.append(lb, int_e)
eint = np.append(int_s, ub)
# Filter any intervals with 0 duration and return
idx = (eint-sint > 0)
return list(zip(sint[idx], eint[idx])) | [
"numpy.nanpercentile",
"numpy.equal",
"numpy.argsort",
"numpy.array",
"numpy.einsum",
"operator.itemgetter",
"numpy.arange",
"numpy.mean",
"numpy.where",
"numpy.diff",
"numpy.stack",
"pandas.DataFrame",
"numpy.meshgrid",
"numpy.rad2deg",
"numpy.round",
"numpy.abs",
"numpy.atleast_1d"... | [((1044, 1077), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'mat1', 'mat2'], {}), "('ij,ij->i', mat1, mat2)\n", (1053, 1077), True, 'import numpy as np\n'), ((4817, 4855), 'numpy.nanpercentile', 'np.nanpercentile', (['data'], {'q': 'q', 'axis': 'axis'}), '(data, q=q, axis=axis)\n', (4833, 4855), True, 'import numpy as np\n'), ((6292, 6307), 'numpy.append', 'np.append', (['x', 'y'], {}), '(x, y)\n', (6301, 6307), True, 'import numpy as np\n'), ((6316, 6347), 'numpy.argsort', 'np.argsort', (['x'], {'kind': '"""mergesort"""'}), "(x, kind='mergesort')\n", (6326, 6347), True, 'import numpy as np\n'), ((6505, 6526), 'numpy.append', 'np.append', (['(0)', 'z[0:-1]'], {}), '(0, z[0:-1])\n', (6514, 6526), True, 'import numpy as np\n'), ((6668, 6692), 'numpy.stack', 'np.stack', (['(x, y)'], {'axis': '(1)'}), '((x, y), axis=1)\n', (6676, 6692), True, 'import numpy as np\n'), ((7960, 7980), 'numpy.append', 'np.append', (['lb', 'int_e'], {}), '(lb, int_e)\n', (7969, 7980), True, 'import numpy as np\n'), ((7992, 8012), 'numpy.append', 'np.append', (['int_s', 'ub'], {}), '(int_s, ub)\n', (8001, 8012), True, 'import numpy as np\n'), ((217, 238), 'numpy.mean', 'np.mean', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (224, 238), True, 'import numpy as np\n'), ((516, 529), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (524, 529), True, 'import numpy as np\n'), ((956, 970), 'numpy.array', 'np.array', (['mat1'], {}), '(mat1)\n', (964, 970), True, 'import numpy as np\n'), ((1018, 1032), 'numpy.array', 'np.array', (['mat2'], {}), '(mat2)\n', (1026, 1032), True, 'import numpy as np\n'), ((3600, 3614), 'numpy.diff', 'np.diff', (['isval'], {}), '(isval)\n', (3607, 3614), True, 'import numpy as np\n'), ((4438, 4456), 'numpy.atleast_1d', 'np.atleast_1d', (['arg'], {}), '(arg)\n', (4451, 4456), True, 'import numpy as np\n'), ((4894, 4944), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {'index': 'q', 'columns': 'vals.columns'}), '(stats, index=q, columns=vals.columns)\n', (4906, 4944), True, 'import pandas as pd\n'), ((4968, 4996), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {'index': 'q'}), '(stats, index=q)\n', (4980, 4996), True, 'import pandas as pd\n'), ((1278, 1295), 'numpy.rad2deg', 'np.rad2deg', (['angle'], {}), '(angle)\n', (1288, 1295), True, 'import numpy as np\n'), ((3382, 3397), 'numpy.unique', 'np.unique', (['vals'], {}), '(vals)\n', (3391, 3397), True, 'import numpy as np\n'), ((6453, 6469), 'numpy.arange', 'np.arange', (['(2 * N)'], {}), '(2 * N)\n', (6462, 6469), True, 'import numpy as np\n'), ((7866, 7883), 'numpy.argsort', 'np.argsort', (['int_s'], {}), '(int_s)\n', (7876, 7883), True, 'import numpy as np\n'), ((548, 559), 'numpy.abs', 'np.abs', (['mat'], {}), '(mat)\n', (554, 559), True, 'import numpy as np\n'), ((3630, 3652), 'numpy.where', 'np.where', (['(absdiff == 1)'], {}), '(absdiff == 1)\n', (3638, 3652), True, 'import numpy as np\n'), ((5376, 5387), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (5384, 5387), True, 'import numpy as np\n'), ((2188, 2201), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (2198, 2201), False, 'from operator import itemgetter\n'), ((2282, 2295), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (2292, 2295), False, 'from operator import itemgetter\n'), ((3534, 3553), 'numpy.equal', 'np.equal', (['vals', 'val'], {}), '(vals, val)\n', (3542, 3553), True, 'import numpy as np\n'), ((4539, 4557), 'numpy.meshgrid', 'np.meshgrid', (['*idxs'], {}), '(*idxs)\n', (4550, 4557), True, 'import numpy as np\n')] |
"""
### author: <NAME>
### <EMAIL>
### date: 9/10/2018
"""
import os
import numpy as np
sep = os.sep
def get_class_weights(y):
"""
:param y: labels
:return: correct weights of each classes for balanced training
"""
cls, count = np.unique(y, return_counts=True)
counter = dict(zip(cls, count))
majority = max(counter.values())
return {cls: round(majority / count) for cls, count in counter.items()}
def get_4_flips(img_obj=None):
flipped = [img_obj]
copy0 = img_obj.__copy__()
copy0.working_arr = np.flip(copy0.working_arr, 0)
if copy0.ground_truth is not None:
copy0.ground_truth = np.flip(copy0.ground_truth, 0)
if copy0.mask is not None:
copy0.mask = np.flip(copy0.mask, 0)
flipped.append(copy0)
copy1 = copy0.__copy__()
copy1.working_arr = np.flip(copy1.working_arr, 1)
if copy1.ground_truth is not None:
copy1.ground_truth = np.flip(copy1.ground_truth, 1)
if copy1.mask is not None:
copy1.mask = np.flip(copy1.mask, 1)
flipped.append(copy1)
copy2 = copy1.__copy__()
copy2.working_arr = np.flip(copy2.working_arr, 0)
if copy2.ground_truth is not None:
copy2.ground_truth = np.flip(copy2.ground_truth, 0)
if copy2.mask is not None:
copy2.mask = np.flip(copy2.mask, 0)
flipped.append(copy2)
return flipped
| [
"numpy.flip",
"numpy.unique"
] | [((253, 285), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (262, 285), True, 'import numpy as np\n'), ((547, 576), 'numpy.flip', 'np.flip', (['copy0.working_arr', '(0)'], {}), '(copy0.working_arr, 0)\n', (554, 576), True, 'import numpy as np\n'), ((832, 861), 'numpy.flip', 'np.flip', (['copy1.working_arr', '(1)'], {}), '(copy1.working_arr, 1)\n', (839, 861), True, 'import numpy as np\n'), ((1118, 1147), 'numpy.flip', 'np.flip', (['copy2.working_arr', '(0)'], {}), '(copy2.working_arr, 0)\n', (1125, 1147), True, 'import numpy as np\n'), ((646, 676), 'numpy.flip', 'np.flip', (['copy0.ground_truth', '(0)'], {}), '(copy0.ground_truth, 0)\n', (653, 676), True, 'import numpy as np\n'), ((729, 751), 'numpy.flip', 'np.flip', (['copy0.mask', '(0)'], {}), '(copy0.mask, 0)\n', (736, 751), True, 'import numpy as np\n'), ((931, 961), 'numpy.flip', 'np.flip', (['copy1.ground_truth', '(1)'], {}), '(copy1.ground_truth, 1)\n', (938, 961), True, 'import numpy as np\n'), ((1015, 1037), 'numpy.flip', 'np.flip', (['copy1.mask', '(1)'], {}), '(copy1.mask, 1)\n', (1022, 1037), True, 'import numpy as np\n'), ((1217, 1247), 'numpy.flip', 'np.flip', (['copy2.ground_truth', '(0)'], {}), '(copy2.ground_truth, 0)\n', (1224, 1247), True, 'import numpy as np\n'), ((1301, 1323), 'numpy.flip', 'np.flip', (['copy2.mask', '(0)'], {}), '(copy2.mask, 0)\n', (1308, 1323), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.