content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from datetime import datetime as dt
import pytz
class Profile(models.Model):
user = models.OneToOneField(User, blank=True, on_delete=models.CASCADE)
main_branch = models.ForeignKey('Branch', blank=True, default=1) # this is a many-to-one relation, which means that many users can be in the same branch
track = models.ForeignKey('Track', blank=True, default=1)
personal_background = models.TextField('Personal Background', max_length=2000, blank=True)
personal_background_hebrew = models.TextField('Personal Background', max_length=2000, blank=True)
avatar = models.ImageField('Profile Picture', upload_to = 'images/avatars/', default="images/avatars/default_member.png")
def __str__(self):
"""
the str is also important for the admin GUI where it allows to distinguish between items.
"""
return self.user.username
@receiver(post_save, sender=User)
def create_or_update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
class Track(models.Model):
trackName = models.CharField(max_length=80)
trackName_hebrew = models.CharField(max_length=80, null=True, blank=True, default="NA")
trackDescription = models.TextField(default="NA")
trackDescription_hebrew = models.TextField(default="NA", null=True, blank=True)
trackLogo = models.ImageField(upload_to='static/images/tracks', default="/static/favicon.ico")
def __str__(self):
return self.trackName
#
class Meta:
ordering = ('trackName',)
class Branch(models.Model):
branchName = models.CharField(max_length=80)
branchName_hebrew = models.CharField(max_length=80, default="NA")
areas = ((None, _("Area")), ('south', 'South'), ('tel-aviv', 'Tel Aviv'), ('center', 'Center'), ('jerusalem', 'Jerusalem'), ('north', 'North'),)
areas_hebrew = ((None, _("אזור")), ('south', 'דרום'), ('tel-aviv', 'תל אביב'), ('center', 'מרכז'), ('jerusalem', 'ירושלים'), ('north', 'צפון'),)
area = models.CharField(max_length=15, choices=areas, null=True)
area_hebrew = models.CharField(max_length=15, choices=areas_hebrew, null=True)
address = models.CharField(max_length=200, default="NA")
address_hebrew = models.CharField(max_length=200, default="NA")
activityTimes = models.CharField(default=_('NA'), max_length=80, help_text="e.g. Mondays at 18:30")
activityTimes_hebrew = models.CharField(default='NA', max_length=80, help_text="למשל ימי שני ב-18:30")
availableTracks = models.ManyToManyField(Track)
nextGeneration = models.BooleanField(default=False)
facebookGroup = models.URLField(default="#")
dogs_friendly = models.BooleanField(default=False)
children_friendly = models.BooleanField(default=False)
Parking = models.CharField(max_length=200, default="NA")
Parking_hebrew = models.CharField(max_length=200, default="NA")
staff_members = models.CharField(max_length=200, default="NA")
staff_members_hebrew = models.CharField(max_length=200, default="NA")
track_openning_time = models.DateField(default=timezone.now)
# how to insert items into such a model: https://docs.djangoproject.com/en/1.11/topics/db/examples/many_to_many/
def __str__(self):
return self.branchName
class Meta:
ordering = ('-area', 'branchName', )
class Team(models.Model):
team_id = models.IntegerField('team_id', unique=True)
column = models.IntegerField('column')
name = models.CharField('Name', max_length=80)
name_hebrew = models.CharField('שם', max_length=80, default="NA")
title = models.CharField('Title', max_length=200)
title_hebrew = models.CharField('תפקיד', max_length=200, default="NA")
def __str__(self):
return self.name
class Meta:
ordering = ['column']
class Faq(models.Model):
question = models.CharField('Question', max_length=150)
question_hebrew = models.CharField('שאלה', max_length=150, default="NA")
answer = models.TextField('Answer', max_length=5000)
answer_hebrew = models.TextField('תשובה', max_length=5000, default="NA")
def __str__(self):
return self.question
class News(models.Model):
itemTitle = models.CharField(max_length=50, default="NA")
itemContent = models.TextField(default="NA")
itemPhoto = models.ImageField(upload_to = 'images/news/', default='images/news/pic1.png')
postDate = models.DateField(default=timezone.now)
languages = ((None, _("Choose Article Language")), ('english', _('English')), ('hebrew', _('עברית')), ('arabic', _('عربى')), ('russian', _('русский')),)
itemLanguage = models.CharField(max_length=150, choices=languages, null=True)
class Meta:
ordering = ['-postDate']
def __str__(self):
return self.itemTitle
class Job(models.Model):
jobTitle = models.CharField(max_length=50, default=_("NA"))
jobDescription = models.TextField(default=_("NA"))
jobPhoto = models.ImageField(upload_to = 'images/jobs/', default='images/jobs/pic1.png')
jobLink = models.TextField(default="#")
is_senior = ((None, _("Required skill level")), ('junior', _('Junior')), ('senior', _('Senior')), ('teamLeader', _('Team Leader')),)
jobSkills = models.CharField(max_length=150, default="NA", help_text="front end / data science / DevOps etc.") #In the future, for the sake of searching, it better be m2m item
seniority = models.CharField(max_length=150, choices=is_senior, null=True)
languages = ((None, _("Choose Offer Language")), ('english', _('English')), ('hebrew', _('עברית')), ('arabic', _('عربى')), ('russian', _('русский')),)
jobLanguage = models.CharField(max_length=150, choices=languages, null=True)
postDate = models.DateField(default=timezone.now)
company = models.CharField(max_length=50, default=_("NA"))
def __str__(self):
return self.jobTitle
class Event(models.Model):
eventTitle = models.CharField(max_length=50, default="NA")
eventDescription = models.TextField(default="NA")
eventPhoto = models.ImageField(upload_to = 'images/events/')
eventLink = models.TextField(default="#")
event_date = models.DateTimeField()
eventLocation = models.CharField(max_length=100, default="NA")
languages = ((None, _("Choose item Language")), ('english', _('English')), ('hebrew', _('עברית')), ('arabic', _('عربى')), ('russian', _('русский')),)
eventLanguage = models.CharField(max_length=150, choices=languages, null=True)
def __str__(self):
return self.eventTitle
def is_upcoming(self):
now = dt.now()
eventTime = self.event_date
return eventTime >= now
| nilq/baby-python | python |
#Much code directly from Google's TensorFlow
"""Library for creating sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import inspect
from rnn_enhancement import linear_enhanced as linear
# from tensorflow.models.rnn import rnn
# from tensorflow.models.rnn import rnn_cell
from rnn_enhancement import rnn_enhanced as rnn
from rnn_enhancement import rnn_cell_enhanced as rnn_cell
#Warning commenting the two lines below out allows it to work!
from rnn_enhancement import linear_functions_enhanced as lfe
from rnn_enhancement import decoding_enhanced
def average_hidden_states(decoder_states, average_hidden_state_influence = 0.5, name = None):
print('WARNING YOU ARE USING HIDDEN STATES')
with tf.op_scope(decoder_states + average_hidden_state_influence, name, "average_hidden_states"):
mean_decoder_states = tf.reduce_mean(decoder_states, 0) #nick double check the axis is right!
final_decoder_state = tf.add((1 - average_hidden_state_influence) * decoder_states[-1], average_hidden_state_influence*mean_decoder_states)
return final_decoder_state
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
output_size=None, num_heads=1, loop_function=None,
dtype=tf.float32, scope=None, average_states = False, average_hidden_state_influence = 0.5,
temperature_decode = False, temperature = 1.0):
"""RNN decoder with attention for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
output_size: size of the output vectors; if None, we use cell.output_size.
num_heads: number of attention heads that read from attention_states.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors of shape
[batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either i-th decoder_inputs or
loop_function(output {i-1}, i)) as follows. First, we run the cell
on a combination of the input and previous attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state)
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, or shapes
of attention_states are not set.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with tf.variable_scope(scope or "attention_decoder"):
batch_size = tf.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = tf.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size])
hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(tf.get_variable("AttnV_%d" % a, [attention_vec_size]))
states = [initial_state]
def attention(query): #this is part of the attention_decoder. It is placed outside to avoid re-compile time.
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
for a in xrange(num_heads):
with tf.variable_scope("Attention_%d" % a):
y = linear.linear(query, attention_vec_size, True)
y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
a = tf.nn.softmax(s)
# Now calculate the attention-weighted vector d.
d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
[1, 2])
ds.append(tf.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = tf.pack([batch_size, attn_size])
attns = [tf.zeros(batch_attn_size, dtype=dtype)
for _ in xrange(num_heads)]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
for i in xrange(len(decoder_inputs)): #RIGHT HERE! THIS IS A LIST OF DECODING TIMESTEPS! WHAAAAHOOOOO!!!!
if i > 0:
tf.get_variable_scope().reuse_variables()
inp = decoder_inputs[i]
'''nick, you can implement sampling here by changing the input here! also curriculum learning too!'''
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
inp = tf.stop_gradient(loop_function(prev, i, temperature_decode = temperature_decode,
temperature = temperature)) #basically, stop_gradient doesn't allow inputs to be taken into account
#this will make an input that is combined with attention
# Merge input and previous attentions into one vector of the right size.
x = linear.linear([inp] + attns, cell.input_size, True)
hidden_state_input = states[-1]
if average_states:
'''implement averaging of states'''
print('WARNING YOU HAVE OPTED TO USE THE AVERAGING OF STATES!')
hidden_state_input = average_hidden_states(states, average_hidden_state_influence)
# Run the RNN.
#right here, you could potentially make the skip-connections? I think you would have to
#you would have to save the output part here, and then transfer it to the next part.
cell_output, new_state = cell(x, hidden_state_input) #nick, changed this to your hidden state input
states.append(new_state)
# Run the attention mechanism.
attns = attention(new_state)
with tf.variable_scope("AttnOutputProjection"):
output = linear.linear([cell_output] + attns, output_size, True)
if loop_function is not None:
# We do not propagate gradients over the loop function.
prev = tf.stop_gradient(output)
outputs.append(output)
return outputs, states
def embedding_attention_decoder(decoder_inputs, initial_state, attention_states,
cell, num_symbols, num_heads=1,
output_size=None, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None, average_states = False, average_hidden_state_influence = 0.5,
temperature_decode = False, temperature = 1.0):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: a list of 1D batch-sized int32-Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: integer, how many symbols come into the embedding.
num_heads: number of attention heads that read from attention_states.
output_size: size of the output vectors; if None, use cell.output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_attention_decoder"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
loop_function = None
if feed_previous:
def extract_argmax_and_embed(prev, _, temperature_decode = False, temperature = 1.0): #placing this function here avoids re-compile time during training!
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
'''output prev of xw_plus_b is [batch_size x out_units]'''
#this might be where you gotta do the sampling with temperature during decoding
if temperature_decode:
print('YOU ARE USING TEMPERATURE DECODING WARNING ---')
prev_symbol = tf.stop_gradient(decoding_enhanced.batch_sample_with_temperature(prev, temperature))
else:
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
#be careful of batch sizing here nick!
emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol) #this reconverts it to the embedding I believe
return emb_prev
loop_function = extract_argmax_and_embed #oh wow they are literally passing a function right here....
emb_inp = [tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]
#this is making a list of all the embedded inputs
return attention_decoder(
emb_inp, initial_state, attention_states, cell, output_size=output_size,
num_heads=num_heads, loop_function=loop_function, average_states = average_states,
average_hidden_state_influence = average_hidden_state_influence, temperature_decode = temperature_decode,
temperature = temperature)
def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
num_heads=1, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None, average_states = False,
average_hidden_state_influence = 0.5, temperature_decode = False,
temperature = 1.0):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: integer; number of symbols on the encoder side.
num_decoder_symbols: integer; number of symbols on the decoder side.
num_heads: number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
notice nick, the list is the sequence length!!!!!!!
so outputs is a 3d tensor total -- and it has te outputs batch size x 512
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
#definitely look at this -- this is also a 3d tensor
each item has a 2d tensor and its shape is batch size
times the state size of the cell -- so you're doing all the
batches at once...okay...
"""
with tf.variable_scope(scope or "embedding_attention_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
encoder_outputs, encoder_states = rnn.rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [tf.reshape(e, [-1, 1, cell.output_size])
for e in encoder_outputs]
attention_states = tf.concat(1, top_states)
# Decoder.
output_size = None
if output_projection is None:
#right here they modify the outputprojectionwrapper
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool): #this is saying you are decoding, feed-forward network
'''nick, right here, you will find a broad if statement'''
return embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection,
feed_previous, average_states = average_states, average_hidden_state_influence = average_hidden_state_influence,
temperature_decode = temperature_decode, temperature = temperature)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
'''nick, right here, you modify by doing a broad if statement'''
outputs1, states1 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, True,
average_states = average_states,
average_hidden_state_influence = average_hidden_state_influence,
temperature_decode = temperature_decode, temperature = temperature)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, False,
average_states = average_states,
average_hidden_state_influence = average_hidden_state_influence,
temperature_decode = temperature_decode, temperature = temperature)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def sequence_loss_by_example(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols]. nick logits are 2d tensors
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: the log-perplexity for each sequence.
notice here they take the ln(perplexity) -- which is why you get loss as you do
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with tf.op_scope(logits + targets + weights, name,
"sequence_loss_by_example"):
batch_size = tf.shape(targets[0])[0]
log_perp_list = []
length = batch_size * num_decoder_symbols #this represents the batch size x vocab size
for i in xrange(len(logits)):
if softmax_loss_function is None:
# TODO(lukaszkaiser): There is no SparseCrossEntropy in TensorFlow, so
# we need to first cast targets into a dense representation, and as
# SparseToDense does not accept batched inputs, we need to do this by
# re-indexing and re-sizing. When TensorFlow adds SparseCrossEntropy,
# rewrite this method.
indices = targets[i] + num_decoder_symbols * tf.range(batch_size)
with tf.device("/cpu:0"): # Sparse-to-dense must happen on CPU for now.
dense = tf.sparse_to_dense(indices, tf.expand_dims(length, 0), 1.0,
0.0)
target = tf.reshape(dense, [-1, num_decoder_symbols])
crossent = tf.nn.softmax_cross_entropy_with_logits(
logits[i], target, name="SequenceLoss/CrossEntropy{0}".format(i))
else:
crossent = softmax_loss_function(logits[i], targets[i])
log_perp_list.append(crossent * weights[i]) #this determines the cost I think?
log_perps = tf.add_n(log_perp_list) #this adds all the elements in the tensor together
if average_across_timesteps:
total_size = tf.add_n(weights) #nick, this adds element wise all the of weights -- this produces just one number!
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights. This is adding it to just one number! total_size = total_size + 1e-12
log_perps /= total_size #one number is produced here! this is equivalent to log_perps = log_perps/total_size
return log_perps #this is the natural log of your perplexity
def sequence_loss(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True, average_across_batch=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: list of 2D Tensors os shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: the average log-perplexity per symbol (weighted).
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
with tf.op_scope(logits + targets + weights, name, "sequence_loss"): #notice how they make a list for values
#this basically assures that entire operature occurs as one point in the graph -- really useful.
'''reduce sum adds all of the elements in tensor to a single value'''
cost = tf.reduce_sum(sequence_loss_by_example(
logits, targets, weights, num_decoder_symbols,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = tf.shape(targets[0])[0]
return cost / tf.cast(batch_size, tf.float32) #cast makes the numbers in a certain formats.
else:
return cost
def norm_stabilizer_loss(logits_to_normalize, norm_regularizer_factor = 50, name = None):
print('WARNING ------YOU HAVE OPTED TO USE NORM STABILIZER LOSS -------------------------------')
'''Will add a Norm Stabilizer Loss
Args:
logits_to_normalize:This can be output logits or hidden states. The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size] (or it can be [batch_size x output_logits])
norm_regularizer_factor: The factor required to apply norm stabilization. Keep
in mind that a larger factor will allow you to achieve a lower loss, but it will take
many more epochs to do so!
Returns:
final_reg_loss: One Scalar Value representing the loss averaged across the batch'''
with tf.op_scope(logits_to_normalize, name, "norm_stabilizer_loss"): #need to have this for tf to work
batch_size = tf.shape(logits_to_normalize[0])[0] #you choose the batch size number -- this makes a tensor
squared_sum = tf.zeros_like(batch_size,dtype = tf.float32) #batch size in zeros
for q in xrange(len(logits_to_normalize)-1): #this represents the summation part from t to T
'''one problem you're having right now is that you can't take the sqrt of negative number...you need to figure this out first
You need to take the euclidean norm of the value -- can't find how to do this in tf....
okay so Amn matrix means that the m is going down and n is going horizontal -- so we choose to reduce sum on axis 1 '''
difference = tf.sub(lfe.frobenius_norm(logits_to_normalize[q+1], reduction_indices = 1),
lfe.frobenius_norm(logits_to_normalize[q], reduction_indices = 1))
'''the difference has the dimensions of [batch_size]'''
squared_sum = tf.add(squared_sum, tf.square(difference))
#We want to average across batch sizes and divide by T
batch_size_times_len_logits = len(logits_to_normalize)*tf.to_float(batch_size)
final_reg_loss = norm_regularizer_factor*(tf.reduce_sum(squared_sum))/batch_size_times_len_logits
#i think currently the problem right now is that this is returning an array rather than a number scalar
return final_reg_loss
def l1_orthogonal_regularizer(logits_to_normalize, l1_alpha_loss_factor = 10, name = None):
'''Motivation from this loss function comes from: https://redd.it/3wx4sr
Specifically want to thank spurious_recollectio and harponen on reddit for discussing this suggestion to me '''
'''Will add a L1 Loss linearly to the softmax cost function.
Returns:
final_reg_loss: One Scalar Value representing the loss averaged across the batch'''
'''this is different than unitary because it is an orthongonal matrix approximation -- it will
suffer from timesteps longer than 500 and will take more computation power of O(n^3)'''
with tf.op_scope(logits_to_normalize, name, "rnn_l2_loss"): #need to have this for tf to work
'''the l1 equation is: alpha * T.abs(T.dot(W, W.T) - (1.05) ** 2 * T.identity_like(W))'''
Weights_for_l1_loss = tf.get_variable("linear")
matrix_dot_product= tf.matmul(Weights_for_l1_loss, Weights_for_l1_loss, transpose_a = True)
#we need to check here that we have the right dimension -- should it be 0 or the 1 dim?
identity_matrix = lfe.identity_like(Weights_for_l1_loss)
matrix_minus_identity = matrix_dot_product - 2*1.05*identity_matrix
absolute_cost = tf.abs(matrix_minus_identity)
final_l1_loss = l1_alpha_loss_factor*(absolute_cost/batch_size)
return final_l1_loss
def l2_orthogonal_regularizer(logits_to_normalize, l2_alpha_loss_factor = 10, name = None):
'''Motivation from this loss function comes from: https://www.reddit.com/r/MachineLearning/comments/3uk2q5/151106464_unitary_evolution_recurrent_neural/
Specifically want to thank spurious_recollectio on reddit for discussing this suggestion to me '''
'''Will add a L2 Loss linearly to the softmax cost function.
Returns:
final_reg_loss: One Scalar Value representing the loss averaged across the batch'''
'''this is different than unitary because it is an orthongonal matrix approximation -- it will
suffer from timesteps longer than 500 and will take more computation power of O(n^3)'''
with tf.op_scope(logits_to_normalize, name, "rnn_l2_loss"): #need to have this for tf to work
'''somehow we need to get the Weights from the rnns right here....i don't know how! '''
'''the l1 equation is: alpha * T.abs(T.dot(W, W.T) - (1.05) ** 2 * T.identity_like(W))'''
'''The Equation of the Cost Is: loss += alpha * T.sum((T.dot(W, W.T) - (1.05)*2 T.identity_like(W)) * 2)'''
Weights_for_l2_loss = tf.get_variable("linear")
matrix_dot_product= tf.matmul(Weights_for_l2_loss, Weights_for_l2_loss, transpose_a = True)
#we need to check here that we have the right dimension -- should it be 0 or the 1 dim?
identity_matrix = lfe.identity_like(Weights_for_l2_loss)
matrix_minus_identity = matrix_dot_product - 2*1.05*identity_matrix
square_the_loss = tf.square(matrix_minus_identity)
final_l2_loss = l2_alpha_loss_factor*(tf.reduce_sum(square_the_loss)/(batch_size))
return final_l2_loss
def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,
buckets, num_decoder_symbols, seq2seq,
softmax_loss_function=None, name=None, norm_regularize_hidden_states = False,
norm_regularize_logits = False, norm_regularizer_factor = 50,
apply_l2_loss = False, l2_loss_factor = 5):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: a list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: a list of Tensors to feed the decoder; second seq2seq input.
targets: a list of 1D batch-sized int32-Tensors (desired output sequence).
weights: list of 1D batch-sized float-Tensors to weight the targets.
buckets: a list of pairs of (input size, output size) for each bucket.
num_decoder_symbols: integer, number of decoder symbols (output classes).
seq2seq: a sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "model_with_buckets".
Returns:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors of shape [batch_size x num_decoder_symbols] (j'th outputs).
losses: List of scalar Tensors, representing losses for each bucket.
Raises:
ValueError: if length of encoder_inputsut, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last"
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
out_hidden_states = [] #nick added this
with tf.op_scope(all_inputs, name, "model_with_buckets"):
for j in xrange(len(buckets)):
if j > 0:
tf.get_variable_scope().reuse_variables()
bucket_encoder_inputs = [encoder_inputs[i]
for i in xrange(buckets[j][0])]
bucket_decoder_inputs = [decoder_inputs[i]
for i in xrange(buckets[j][1])]
bucket_outputs, bucket_states= seq2seq(bucket_encoder_inputs,
bucket_decoder_inputs) #nick pay attention here -- you added bucket_states
outputs.append(bucket_outputs)
bucket_targets = [targets[i] for i in xrange(buckets[j][1])]
bucket_weights = [weights[i] for i in xrange(buckets[j][1])]
'''CALCULATE NORM REGULARIZE LOSS HERE'''
final_reg_loss = 0
if norm_regularize_hidden_states:
print('Warning -- You have opted to Use Norm Regularize Hidden States. Your Regularizer factor is:', norm_regularizer_factor)
final_reg_loss = norm_stabilizer_loss(bucket_states, norm_regularizer_factor = norm_regularizer_factor)
if norm_regularize_logits:
final_reg_loss += norm_stabilizer_loss(bucket_outputs, norm_regularizer_factor = norm_regularizer_factor)
print('Warning -- You have opted to Use Norm Regularize Input Logits. Your Regularizer factor is:', norm_regularizer_factor)
if apply_l2_loss:
final_reg_loss += rnn_l2_loss(l2_loss_factor = l2_loss_factor)
print('Warning -- You have opted to Use RNN L2 Orthongonal Loss, Your Scaling factor is:', l2_loss_factor)
losses.append(final_reg_loss + sequence_loss(
outputs[-1], bucket_targets, bucket_weights, num_decoder_symbols,
softmax_loss_function=softmax_loss_function))
return outputs, losses
#THE LOSSES is just for bucket listing! so you can add the losses together
'''outputs are considered logits, and the -1 gives a list of logits for that one bucket!''' | nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 13:06:28 2020
@author: tomvi
"""
import pandas as pd
import math
import statistics as stat
import statsmodels.api as sm
from statsmodels.stats.diagnostic import het_white as white, \
het_breuschpagan as bpt
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xml.etree.ElementTree as ET
from urllib.request import urlopen
from stargazer.stargazer import Stargazer
# general functions
def upper(self):
if type(self)==str:
return self.upper()
else:
return self
def log_0(x):
if x<=0:
return 0
else:
return math.log(x)
def log(x,a=math.e,zero=True):
if zero==False:
return math.log(x)/math.log(a)
if zero==True:
return log_0(x)/math.log(a)
def select(df,column,value):
return df[df[column]==value]
def identity(x):
return x
def unique_sort(list_):
final_list = list(set(list_))
final_list.sort()
return final_list
def tonot(x):
return not x
def OLS(endo, exo, c="c", summary=1):
if c == "c":
model = sm.OLS(endo, sm.add_constant(exo)).fit()
else:
model = sm.OLS(endo,exo).fit()
if summary == 1:
print(model.summary())
return(model)
def white_test(model,printed=False):
coef=white(model.resid,model.model.exog)
if printed==True:
print(coef)
return(coef)
def bp_test(model,printed=False):
coef=bpt(model.resid,model.model.exog)
if printed==True:
print(coef)
return(coef)
def aggregate_data(data,by):
grouped_data=data.groupby(by=by,as_index=False)
return grouped_data.sum().reset_index(drop=True)
def string_plus(self,x=1):
return str(int(self)+x)
def fill_ico(self):
ap="00000000"
full=ap+str(self)
return full[-8:]
def compare(sth,data,years,sth2=identity,restrict=False,bil=False,\
what="RS321"):
global compare_out
print(str(sth))
compare_table=[]
if bil==False:
bil_str=" = "
bil=1
else:
bil_str=" in bilions = "
bil=10**9
for yr in unique_sort(years):
year=data["year"]==yr
if type(restrict)==bool:
when=year
else:
when=year & restrict
if sth==sum:
result=sth(sth2(data[when][what]/bil))
else:
result=sth(sth2(data[when][what]))
print("Result for " \
+ yr + bil_str + str(result))
compare_table.append(result)
compare_out=compare_table | nilq/baby-python | python |
import torch
from torch.nn.utils import clip_grad_value_
from torch.distributions import Categorical
from radbm.utils.torch import torch_soft_hamming
from radbm.search.elba import EfficientLearnableBinaryAccess
def categorical_entropy(cat):
"""
-(cat*cat.log()).sum() without the annoying 0*inf
Parameters
----------
cat : torch.Tensor (ndim==1)
The parameter of a Categorical distribution.
Returns
-------
ent : torch.Tensor (a single float)
The entropy of the Categorical distribution.
"""
return Categorical(probs=cat).entropy()
def mi_categorical_bernoulli(pos_cat, neg_cat, p):
"""
Compute the Multual Information between a categorical and a bernoulli.
This use the fact that I(C, B) = H(C) - pH(C | B=1) - (1-p)H(C | B=0)
with C = Cat(pi) and B = Ber(p).
Parameters
----------
pos_cat : torch.tensor (ndim=1, pos_cat.sum()=1)
The parameters of C | B=1
neg_cat : torch.tensor (ndim=1, neg_cat.sum()=1)
The parameters of C | B=0
p : float
The parameters of B
Returns
-------
I : torch.tensor (a single float)
The Mutual Information I(C, B)
"""
cat = p*pos_cat + (1-p)*neg_cat
ent = categorical_entropy(cat)
pos_ent = categorical_entropy(pos_cat)
neg_ent = categorical_entropy(neg_cat)
return ent - p*pos_ent - (1-p)*neg_ent
class TriangularKernel(torch.nn.Module):
"""
Helper Module, compute the triangular kernel.
"""
def __init__(self, centroids, widths=None):
super().__init__()
if widths is None:
widths = torch.tensor(1, dtype=centroids.dtype)
self.register_buffer('centroids', centroids)
self.register_buffer('widths', widths)
self.relu = torch.nn.ReLU()
def forward(self, x):
shape = x.shape
x = x.view(*shape, 1)
centroids = self.centroids.view(*len(shape)*[1], -1)
return self.relu(1 - (centroids-x).abs()/self.widths)
class MIHash(EfficientLearnableBinaryAccess):
"""
MIHash as in "MIHash: Online Hashing with Mutual Information"
by Fatih Cakir, Kun He, Sarah Adel Bargal and Stan Sclaroff.
Parameters
----------
fq : torch.nn.Module
The query Multi-Bernoulli encoder.
fd : torch.nn.Module
The document Multi-Bernoulli encoder.
struct : BaseSDS subclass
The structure used in ELBA.
match_prob : float (in [0,1])
The probability that there is a match given a random query
and a random document.
"""
def __init__(self, fq, fd, struct, nbits, match_prob, *args, **kwargs):
super().__init__(fq, fd, struct, *args, **kwargs)
self.match_prob = match_prob
self.kernel = TriangularKernel(torch.arange(0,nbits+1))
def step(self, q, d, match, l2_ratio=0):
"""
Do a training step.
Parameters
----------
q : torch.Tensor
A batch of queries.
d : torch.Tensor
A batch of documents.
match : torch.Tensor (dtype=torch.bool)
A matrix (2D tensor) with match[i,j] indicating if q[i] match with d[j]
Returns
-------
loss : torch.Tensor (size 1)
The loss (negative mutual information) of the current batch.
"""
self.zero_grad()
qsign = torch.tanh(self.fq(q))
dsign = torch.tanh(self.fd(d))
sh = torch_soft_hamming(qsign[:,None], dsign[None,:]) #shape = (#queries, #documents)
bins = self.kernel(sh)
pos_cat = bins[match].mean(dim=0)
neg_cat = bins[~match].mean(dim=0)
loss = -mi_categorical_bernoulli(pos_cat, neg_cat, self.match_prob)
loss.backward()
clip_grad_value_(self.parameters(), 5)
self.optim.step()
return loss | nilq/baby-python | python |
import Print
import os
import shutil
import sq_tools
import io
import sys
import listmanager
import csv
import requests
try:
import ujson as json
except:
import json
from tqdm import tqdm
# SET ENVIRONMENT
squirrel_dir=os.path.abspath(os.curdir)
NSCB_dir=os.path.abspath('../'+(os.curdir))
if os.path.exists(os.path.join(squirrel_dir,'ztools')):
NSCB_dir=squirrel_dir
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
ztools_dir=os.path.join(NSCB_dir,'ztools')
squirrel_dir=ztools_dir
elif os.path.exists(os.path.join(NSCB_dir,'ztools')):
squirrel_dir=squirrel_dir
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
else:
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
_1fichier_token=os.path.join((os.path.join(zconfig_dir, 'credentials')),'_1fichier_token.tk')
def download(url,ofolder):
if not os.path.exists(_1fichier_token):
sys.exit("No 1fichier token setup")
with open(_1fichier_token,'rt',encoding='utf8') as tfile:
token=(tfile.readline().strip())
if token==None:
sys.exit("Missing 1fichier token")
APIkey=token
auth={'Authorization':f'Bearer {APIkey}','Content-Type':'application/json'}
session = requests.session()
download_params = {
'url' : url,
'inline' : 0,
'cdn' : 0,
'restrict_ip': 0,
'no_ssl' : 0,
}
info_params={
'url' : url
}
r=session.post('https://api.1fichier.com/v1/file/info.cgi',json=info_params,headers=auth)
info_dict=r.json()
# print(info_dict)
sz=info_dict['size']
name=info_dict['filename']
r=session.post('https://api.1fichier.com/v1/download/get_token.cgi',json=download_params,headers=auth)
dict_=r.json()
# print(dict_)
if not dict_['status']=="OK":
sys.exit(f"API call returned {dict_['status']}")
URL=dict_['url']
sess = requests.session()
response=sess.get(URL, stream=True)
buf=int(64*1024)
output=os.path.join(ofolder,name)
print("- Downloading file to {}".format(output))
t = tqdm(total=int(sz), unit='B', unit_scale=True, leave=False)
with open(output,"wb") as o:
for data in response.iter_content(chunk_size=buf):
o.write(data)
t.update(len(data))
if not data:
break
t.close()
print(" *Finished*") | nilq/baby-python | python |
"""Third-party commands enabled through drytoml."""
import importlib
import os
import shlex
import subprocess as sp # noqa: S404
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import IO
from typing import Callable
from typing import List
from typing import Union
from drytoml.parser import Parser
def import_callable(string: str) -> Callable:
"""Import a module from a string using colon syntax.
Args:
string: String of the form `package.module:object`
Returns:
The imported module
"""
module_str, tool_main_str = string.split(":")
module = importlib.import_module(module_str)
tool_main = getattr(module, tool_main_str)
return tool_main
class Wrapper:
"""Common skeleton for third-party wrapper commands."""
cfg: str
virtual: IO[str]
def __call__(self, importstr):
"""Execute the wrapped callback.
Args:
importstr: String of the form `package.module:object`
.. seealso:: `import_callable`
"""
with self.tmp_dump() as virtual:
self.virtual = virtual
self.pre_import()
self.pre_call()
tool_main = import_callable(importstr)
sys.exit(tool_main())
def pre_import(self):
"""Execute custom processing done before callback import."""
def pre_call(self):
"""Execute custom processing done before callback execut."""
@contextmanager
def tmp_dump(self):
"""Yield a temporary file with the configuration toml contents.
Yields:
Temporary file with the configuration toml contents
"""
parser = Parser.from_file(self.cfg)
document = parser.parse()
# ensure locally referenced files work
path = Path(self.cfg)
if path.is_absolute():
parent = path.parent
else:
parent = (Path.cwd() / self.cfg).parent
with tempfile.NamedTemporaryFile(
mode="w+",
suffix=".toml",
prefix="drytoml.",
dir=str(parent),
) as fp:
fp.write(document.as_string())
fp.seek(0)
yield fp
class Env(Wrapper):
"""Call another script, configuring it with an environment variable."""
def __init__(self, env: Union[str, List[str]]):
"""Instantiate a cli wrapper.
Args:
env: Name(s) of the env var(s) to use which selects a
configuration file.
"""
self.envs = (
[
env,
]
if isinstance(env, str)
else env
)
self.cfg = os.environ.get(self.envs[0], "pyproject.toml")
def pre_import(self):
"""Configure env var before callback import."""
for env in self.envs:
os.environ[env] = self.virtual.name
class Cli(Wrapper):
"""Call another script, configuring it with specific cli flag."""
def __init__(self, configs: List[str]):
"""Instantiate a cli wrapper.
Args:
configs: Possible names for the configuration flag of the
wrapped script.
Raises:
ValueError: Empty configs.
"""
if not configs:
raise ValueError("No configuration strings received")
for option in configs:
try:
idx = sys.argv.index(option)
pre = sys.argv[:idx]
post = sys.argv[idx + 2 :]
cfg = sys.argv[idx + 1]
break
except ValueError:
pass
else:
pre = sys.argv
post = []
cfg = "pyproject.toml"
option = configs[0]
self.cfg = cfg
self.pre = pre
self.post = post
self.option = option
def pre_call(self) -> None:
"""Prepare sys.argv to contain the configuration flag and file."""
sys.argv = [*self.pre, self.option, f"{self.virtual.name}", *self.post]
def black():
"""Execute black, configured with custom setting cli flag."""
Cli(["--config"])("black:patched_main")
def isort():
"""Execute isort, configured with custom setting cli flag."""
Cli(["--sp", "--settings-path", "--settings-file", "--settings"])(
"isort.main:main"
)
def pylint():
"""Execute pylint, configured with custom setting cli flag."""
Cli(["--rcfile"])("pylint:run_pylint")
def flakehell():
"""Execute flakehell, configured with custom env var."""
Env(["FLAKEHELL_TOML", "PYLINTRC"])("flakehell:entrypoint")
def flake8helled():
"""Execute flake8helled, configured with custom env var."""
Env(["FLAKEHELL_TOML", "PYLINTRC"])("flakehell:flake8_entrypoint")
def check():
"""Execute all formatters and linters, sequentially."""
for command in (
"dry -q isort .",
"dry -q black .",
"dry -q flakehell lint .",
):
sp.run(shlex.split(command)) # noqa: S603, W1510
| nilq/baby-python | python |
# 313. Super Ugly Number
# ttungl@gmail.com
class Solution(object):
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
# runtime: 535ms
ugly = [1]*n
i = [-1]*len(primes)
v = [1]*len(primes)
k=0
while k<n:
ugly[k] = min(v)
for j in range(len(primes)):
if v[j]==ugly[k]:
i[j] += 1
v[j] = ugly[i[j]]*primes[j]
k+=1
return ugly[-1]
| nilq/baby-python | python |
from django.core.management import call_command
from unittest import mock
from django_webpack_dev_server.management.generator import Generator
class TestCommand:
"""
Test Class for testing the Command Class defined in the generate.py
"""
@mock.patch.object(Generator, "generate")
def test_command(self, mocked_Generator_generate):
"""
Function to test the methods of the Command Class
"""
# call the management command to test
call_command("generate", "react")
# assert that the generate method of the Generator class is called
assert mocked_Generator_generate.called == True
| nilq/baby-python | python |
"""
"""
import numpy as np
from ..diffstar_ew_kernels import _calc_ew_from_diffstar_params_const_lgu_lgmet
from ..diffstar_ew_kernels import _calc_ew_from_diffstar_params_const_lgmet
from ..sfh_model import DEFAULT_MAH_PARAMS, DEFAULT_MS_PARAMS, DEFAULT_Q_PARAMS
from ..mzr import DEFAULT_MZR_PARAMS
from .retrieve_fake_fsps_data import load_fake_sps_data
OIIa, OIIb = 4996.0, 5000.0
def test_calc_ew_from_diffstar_params_const_lgu_lgmet():
res = load_fake_sps_data()
filter_waves, filter_trans, wave_ssp, _spec_ssp, lgZsun_bin_mids, log_age_gyr = res
t_obs = 11.0
lgU_bin_mids = np.array((-3.5, -2.5, -1.5))
spec_ssp = np.array([_spec_ssp for __ in range(lgU_bin_mids.size)])
mah_params = np.array(list(DEFAULT_MAH_PARAMS.values()))
ms_params = np.array(list(DEFAULT_MS_PARAMS.values()))
q_params = np.array(list(DEFAULT_Q_PARAMS.values()))
met_params = np.array(list(DEFAULT_MZR_PARAMS.values()))
lgmet = -1.0
lgmet_scatter = met_params[-1]
lgu = -2.0
lgu_scatter = 0.2
line_mid = OIIb
line_lo = line_mid - 15
line_hi = line_mid + 15
cont_lo_lo = line_mid - 100
cont_lo_hi = line_mid - 50
cont_hi_lo = line_mid + 50
cont_hi_hi = line_mid + 100
args = (
t_obs,
lgZsun_bin_mids,
log_age_gyr,
lgU_bin_mids,
wave_ssp,
spec_ssp,
*mah_params,
*ms_params,
*q_params,
lgmet,
lgmet_scatter,
lgu,
lgu_scatter,
line_lo,
line_mid,
line_hi,
cont_lo_lo,
cont_lo_hi,
cont_hi_lo,
cont_hi_hi,
)
ew, total_line_flux = _calc_ew_from_diffstar_params_const_lgu_lgmet(*args)
def test_calc_ew_from_diffstar_params_const_lgmet():
res = load_fake_sps_data()
filter_waves, filter_trans, wave_ssp, spec_ssp, lgZsun_bin_mids, log_age_gyr = res
t_obs = 11.0
mah_params = np.array(list(DEFAULT_MAH_PARAMS.values()))
ms_params = np.array(list(DEFAULT_MS_PARAMS.values()))
q_params = np.array(list(DEFAULT_Q_PARAMS.values()))
met_params = np.array(list(DEFAULT_MZR_PARAMS.values()))
lgmet = -1.0
lgmet_scatter = met_params[-1]
line_mid = OIIb
line_lo = line_mid - 15
line_hi = line_mid + 15
cont_lo_lo = line_mid - 100
cont_lo_hi = line_mid - 50
cont_hi_lo = line_mid + 50
cont_hi_hi = line_mid + 100
args = (
t_obs,
lgZsun_bin_mids,
log_age_gyr,
wave_ssp,
spec_ssp,
*mah_params,
*ms_params,
*q_params,
lgmet,
lgmet_scatter,
line_lo,
line_mid,
line_hi,
cont_lo_lo,
cont_lo_hi,
cont_hi_lo,
cont_hi_hi,
)
ew, total_line_flux = _calc_ew_from_diffstar_params_const_lgmet(*args)
| nilq/baby-python | python |
# ABC165A - We Love Golf
def main():
# input
K = int(input())
A, B = map(int, input().split())
# compute
km=False
for i in range(A,B+1):
if i%K ==0:
km=True
# output
if km:
print("OK")
else:
print("NG")
if __name__ == '__main__':
main()
| nilq/baby-python | python |
# Adapted from https://github.com/opencv/opencv_contrib/blob/master/modules/aruco/samples/detect_markers.cpp
import argparse
import cv2
import utils
def main(args):
# Read camera parameters
camera_params_file_path = utils.get_camera_params_file_path(args.camera_name)
image_width, image_height, camera_matrix, dist_coeffs = utils.get_camera_params(camera_params_file_path)
# Set up webcam
cap = utils.get_video_cap(image_width, image_height, args.camera_id)
# Set up aruco dict
params = utils.get_marker_parameters()
aruco_dict = cv2.aruco.Dictionary_get(params['dict_id'])
# Enable corner refinement
#detector_params = cv2.aruco.DetectorParameters_create()
#detector_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
while True:
if cv2.waitKey(1) == 27: # Esc key
break
_, image = cap.read()
if image is None:
continue
# Undistort image and detect markers
image = cv2.undistort(image, camera_matrix, dist_coeffs)
#corners, ids, _ = cv2.aruco.detectMarkers(image, aruco_dict, parameters=detector_params)
corners, ids, _ = cv2.aruco.detectMarkers(image, aruco_dict)
# Show detections
image_copy = image.copy()
if ids is not None:
cv2.aruco.drawDetectedMarkers(image_copy, corners, ids)
cv2.imshow('out', image_copy)
cap.release()
cv2.destroyAllWindows()
parser = argparse.ArgumentParser()
parser.add_argument('--camera-id', type=int, default=0)
parser.add_argument('--camera-name', default='logitech-c930e')
main(parser.parse_args())
| nilq/baby-python | python |
from unittest import TestCase
from vision.flashless_utility import copy_backup_flashless_files, rollback_flashless_files
from vision.constant import VisionException
from mock import patch
class TestFlashlessUtility(TestCase):
@patch('os.path.isfile', return_value=True)
@patch('shutil.copyfile')
def test_copy_backup_flashless_files_pass(self, mock_copy, mock_is_file):
copy_backup_flashless_files()
assert mock_copy.call_count == 3
@patch('shutil.copyfile', side_effect=OSError)
def test_copy_backup_flashless_files_fail(self, mock_copy):
self.assertRaises(VisionException, copy_backup_flashless_files)
@patch('os.path.isfile', return_value=True)
@patch('shutil.copyfile')
def test_rollback_flashless_files_pass(self, mock_copy, mock_is_file):
rollback_flashless_files()
assert mock_copy.call_count == 3
@patch('shutil.copyfile', side_effect=FileNotFoundError)
def test_rollback_flashless_files_fail(self, mock_copy):
self.assertRaises(VisionException, rollback_flashless_files)
| nilq/baby-python | python |
from mandaw import *
mandaw = Mandaw("Window!", width = 800, height = 600, bg_color = (0, 0, 0, 255))
mandaw.loop() | nilq/baby-python | python |
import uuid
import unittest
from TwitterSentimentAnalysis import ai, core, downloaders, datasets
import numpy as np
import os
from sklearn.metrics import mean_squared_error
class NeuralNetworksTweetsTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
base_dir = os.path.dirname(__file__)
configuration_file_path = os.path.join(base_dir, 'test_configuration.cfg')
core.initialize(configuration_file_path)
@classmethod
def tearDownClass(cls):
core.terminate()
def setUp(self):
self.tweet_downloader = downloaders.TweetDownloader()
self.tweetclassificationdataset = datasets.TweetClassificationDatasetFactory()
self.tweet_regression_dataset = datasets.TweetRegressionDatasetFactory()
self.test_db = self.tweet_downloader.db
self.test_table_name = "tweet_download_" + uuid.uuid4().hex + "_test"
self.file_path = ""
def tearDown(self):
self.test_db.drop_collection(self.test_table_name)
if self.file_path != "":
os.remove(self.file_path)
def test_multi_class_classification_neural_network(self):
neural_network = ai.MultiClassClassificationNeuralNetwork(4, 9)
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
self.assertIsNotNone(neural_network.network)
ds_train, ds_test = ds.splitWithProportion(0.75)
result = neural_network.run(ds_train, ds_test)
actual = neural_network.network.activateOnDataset(ds_test)
expected = ds_test['class']
expected_error = np.mean((np.argmax(actual, 1) != expected.T), dtype=float)
self.assertEqual(result/100, expected_error)
def test_simple_regression_neural_network(self):
neural_network = ai.SimpleRegressionNeuralNetwork()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweet_regression_dataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
result = neural_network.run(ds_train, ds_test)
actual = neural_network.network.Trainer.module.activateOnDataset(ds_test)
error = mean_squared_error(actual, ds_test['target'])
self.assertEqual(result, error)
def test_simple_classification_neural_network(self):
neural_network = ai.SimpleClassificationNeuralNetwork()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
result = neural_network.run(ds_train, ds_test)
actual = neural_network.network.Trainer.module.activateOnDataset(ds_test)
expected = ds_test['target']
expected_error = np.mean((np.argmax(actual, 1) != expected.T), dtype=float)
self.assertEqual(result/100, expected_error)
def test_naive_bayes_classifier(self):
classifier = ai.NaiveBayesClassifier()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
result = classifier.run(ds_train, ds_test)
self.assertIsNotNone(classifier.classifier)
test_ds = []
for i, k in enumerate(ds_test['input']):
features = {
'first': ds_test['input'][i][0],
'second': ds_test['input'][i][1],
'third': ds_test['input'][i][2],
'fourth': ds_test['input'][i][3]}
test_ds.append(features)
res = []
for i, test_rec in enumerate(test_ds):
res.append(classifier.classifier.classify(test_rec))
tot = 0
for i, x in enumerate(ds_test['target']):
if x == res[i]:
tot += 1
expected_error = 1-float(tot)/float(len(ds_test['target']))
self.assertAlmostEqual(result/100, expected_error)
def test_max_ent_classifier(self):
classifier = ai.MaxEntropyClassifier()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
result = classifier.run(ds_train, ds_test)
self.assertIsNotNone(classifier.classifier)
test_ds = []
for i, k in enumerate(ds_test['input']):
features = {
'first': ds_test['input'][i][0],
'second': ds_test['input'][i][1],
'third': ds_test['input'][i][2],
'fourth': ds_test['input'][i][3]}
test_ds.append(features)
res = []
for i, test_rec in enumerate(test_ds):
res.append(classifier.classifier.classify(test_rec))
tot = 0
for i, x in enumerate(ds_test['target']):
if x == res[i]:
tot += 1
expected_error = 1-float(tot)/float(len(ds_test['target']))
self.assertAlmostEqual(result/100, expected_error)
def test_linear_regression(self):
model = ai.LinearRegression()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweet_regression_dataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
result = model.run(ds_train, ds_test)
x_test = ds_test['input']
actual = model.regression.predict(x_test) # y_pred
error = mean_squared_error(ds_test['target'], actual)
self.assertEqual(result, error)
def test_save_multiclassclassification(self):
network_before = ai.MultiClassClassificationNeuralNetwork()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
res_before = network_before.run(ds_train, ds_test)
base_dir = os.path.dirname(__file__)
network_name = 'network' + uuid.uuid4().hex + '_test'
self.file_path = os.path.join(base_dir, network_name)
network_before.save(self.file_path)
network_after = ai.MultiClassClassificationNeuralNetwork()
network_after.load(network_name)
res_after = network_after.test(ds_test)
self.assertEqual(res_before, res_after)
def test_save_simpleregression(self):
network_before = ai.SimpleRegressionNeuralNetwork()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweet_regression_dataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
res_before = network_before.run(ds_train, ds_test)
base_dir = os.path.dirname(__file__)
network_name = 'network'+uuid.uuid4().hex+'_test'
self.file_path = os.path.join(base_dir, network_name)
network_before.save(self.file_path)
network_after = ai.SimpleRegressionNeuralNetwork()
network_after.load(network_name)
res_after = network_after.test(ds_test)
self.assertEqual(res_before, res_after)
def test_save_simpleclassification(self):
network_before = ai.SimpleClassificationNeuralNetwork()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
res_before = network_before.run(ds_train, ds_test)
base_dir = os.path.dirname(__file__)
network_name = 'network'+uuid.uuid4().hex+'_test'
self.file_path = os.path.join(base_dir, network_name)
network_before.save(self.file_path)
network_after = ai.SimpleClassificationNeuralNetwork()
network_after.load(network_name)
res_after = network_after.test(ds_test)
self.assertEqual(res_before, res_after)
def test_save_naivebayes(self):
classifier_before = ai.NaiveBayesClassifier()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
res_before = classifier_before.run(ds_train, ds_test)
base_dir = os.path.dirname(__file__)
network_name = 'network'+uuid.uuid4().hex+'_test'
self.file_path = os.path.join(base_dir, network_name)
classifier_before.save(self.file_path)
classifier_after = ai.NaiveBayesClassifier()
classifier_after.load(network_name)
res_after = classifier_after.test(ds_test)
self.assertEqual(res_before, res_after)
def test_save_maxentropy(self):
classifier_before = ai.MaxEntropyClassifier()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweetclassificationdataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
res_before = classifier_before.run(ds_train, ds_test)
base_dir = os.path.dirname(__file__)
classifier_name = 'network'+uuid.uuid4().hex+'_test'
self.file_path = os.path.join(base_dir, classifier_name)
classifier_before.save(self.file_path)
classifier_after = ai.MaxEntropyClassifier()
classifier_after.load(classifier_name)
res_after = classifier_after.test(ds_test)
self.assertEqual(res_before, res_after)
def test_save_linearregression(self):
regression_before = ai.LinearRegression()
self.tweet_downloader.download_tweets_using_query("erasmus", 100, self.test_table_name, tag="erasmus")
ds = self.tweet_regression_dataset.get_dataset(self.test_table_name)
ds_train, ds_test = ds.splitWithProportion(0.75)
res_before = regression_before.run(ds_train, ds_test)
base_dir = os.path.dirname(__file__)
regression_name = 'network'+uuid.uuid4().hex+'_test'
self.file_path = os.path.join(base_dir, regression_name)
regression_before.save(self.file_path)
regression_after = ai.LinearRegression()
regression_after.load(regression_name)
res_after = regression_after.test(ds_test)
self.assertEqual(res_before, res_after) | nilq/baby-python | python |
import os
import discord
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = discord.Client()
@client.event
async def on_ready():
guild = discord.utils.get(client.guilds, name=GUILD)
print(
f'{client.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})'
)
client.run(TOKEN) | nilq/baby-python | python |
class NumArray(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.prefixSum = [0] * (len(nums) + 1)
currentSum = 0
for idx, num in enumerate(nums):
currentSum += num
self.prefixSum[idx + 1] = currentSum
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
rangeSum = self.prefixSum[j + 1] - self.prefixSum[i]
return rangeSum
# Your NumArray object will be instantiated and called as such:
nums = [-2,0,3,-5,2,-1]
obj = NumArray(nums)
param_1 = obj.sumRange(0,2)
param_2 = obj.sumRange(2,5)
param_3 = obj.sumRange(0,5)
param_4 = obj.sumRange(4,3)
print("Res: ", param_4) | nilq/baby-python | python |
# -*- coding: utf-8 -*-
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import relationship
from .models import BASE
class CaseGenelistLink(BASE):
"""Link between case and gene list."""
__tablename__ = 'case_genelist_link'
__table_args__ = (UniqueConstraint('case_id', 'genelist_id',
name='_case_genelist_uc'),)
id = Column(Integer, primary_key=True)
case_id = Column(Integer, ForeignKey('case.id'))
genelist_id = Column(Integer, ForeignKey('gene_list.id'))
class GeneList(BASE):
"""Represent a list of gene identifiers."""
__tablename__ = "gene_list"
id = Column(Integer, primary_key=True)
list_id = Column(String(32), nullable=False, unique=True)
# comma separated list of gene ids
_gene_ids = Column(String(1024))
cases = relationship('Case', secondary='case_genelist_link',
backref='gene_lists')
@property
def gene_ids(self):
"""Return a list of gene ids."""
return self._gene_ids.split(',') if self._gene_ids else []
@gene_ids.setter
def gene_ids(self, value):
self._gene_ids = ','.join(value)
def delete_gene(self, *gene_ids):
"""Delete one or more gene ids form the list."""
self.gene_ids = [gene_id for gene_id in self.gene_ids
if gene_id not in gene_ids]
def __repr__(self):
return "PhenotypeTerm(list_id={this.list_id})".format(this=self)
| nilq/baby-python | python |
# Generated by Django 3.1 on 2020-09-26 19:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_auto_20200912_1626'),
('delivery', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='confirmation',
name='case',
field=models.ForeignKey(db_index=False, on_delete=django.db.models.deletion.CASCADE, to='projects.case', verbose_name='Case'),
),
migrations.AlterField(
model_name='confirmation',
name='patch',
field=models.ForeignKey(db_index=False, on_delete=django.db.models.deletion.CASCADE, to='delivery.patch', verbose_name='Patch'),
),
migrations.AlterIndexTogether(
name='confirmation',
index_together={('case', 'patch')},
),
]
| nilq/baby-python | python |
"""Monkey patch other python libraries."""
import numpy as np
import jax
from jax import core, lax, numpy as jnp
from jax._src.lib import xla_client as xc
from jax._src.lib.xla_bridge import get_backend as default_get_backend
from jax.interpreters import partial_eval as pe
from jax.interpreters.xla import (xops, jaxpr_subcomp, extend_name_stack,
register_translation, wrap_name,
_backend_specific_translations, parameter,
xla_destructure, pyval_to_ir_constant)
import flax
from flax.linen.module import compact, wrap_method_once
from alpa.global_env import global_config
from alpa.pipeline_parallel.primitive_def import xla_identity
########################################
##### Monkey patch the Jax backend
########################################
override_backend = None
def set_override_backend(backend):
"""Enable the JAX backend monkey patch."""
global override_backend
override_backend = backend
def override_get_backend(*args, **kwargs):
"""Override the `get_backend` in JAX to use PJRT backend managed by Alpa."""
if override_backend is not None:
return override_backend
return default_get_backend(*args, **kwargs)
setattr(jax._src.lib.xla_bridge, "get_backend", override_get_backend)
setattr(jax.lib.xla_bridge, "get_backend", override_get_backend)
########################################
##### Monkey patch Jax
########################################
# Monkey patch random generator to use the stateful random generator.
# This can simplify the computational graph for dropout.
def fast_uniform(key, shape, dtype, minval=0.0, maxval=1.0):
shape = core.as_named_shape(shape)
minval = jnp.asarray(minval, dtype)
maxval = jnp.asarray(maxval, dtype)
return lax.rng_uniform(minval, maxval, shape.positional)
def remove_fold_in(key, data):
return key
jax._src.random.uniform = fast_uniform
jax.random.uniform = fast_uniform
jax._src.random.fold_in = remove_fold_in
jax.random.fold_in = remove_fold_in
def _zeros(c, xla_shape):
if xla_shape.is_array():
shape, dtype = xla_shape.dimensions(), xla_shape.numpy_dtype()
zero = pyval_to_ir_constant(c, np.array(0, dtype=dtype))
return xops.Broadcast(zero, shape)
else:
# It is a token
return xops.CreateToken(c)
def _remat_using_while(ctx, in_nodes, name, call_jaxpr):
"""Lower remat to a single iteration while loop."""
c = ctx.builder
# Dummy subc for getting subcomp shapes.
dummy_inputs = xops.Tuple(c, in_nodes)
dummy_subc = xc.XlaBuilder("remat_dummy_subcomputation")
dummy_input_op = parameter(dummy_subc,
0,
c.get_shape(dummy_inputs),
replicated=[])
dummy_args = xla_destructure(dummy_subc, dummy_input_op)
dummy_ctx = ctx.replace(builder=dummy_subc,
name_stack=extend_name_stack(
ctx.name_stack, wrap_name(name, 'remat')))
dummy_subcomp_outs = jaxpr_subcomp(dummy_ctx, call_jaxpr, (), *dummy_args)
out_node_shapes = [dummy_subc.get_shape(o) for o in dummy_subcomp_outs]
i_init = xops.Constant(c, np.array(0, dtype=np.int32))
zeros_like_outs = [_zeros(c, s) for s in out_node_shapes]
inputs = xops.Tuple(c, [i_init] + list(in_nodes) + zeros_like_outs)
cond_subc = xc.XlaBuilder("remat_cond_subcomputation")
input_op = parameter(cond_subc, 0, c.get_shape(inputs), replicated=[])
i = xops.GetTupleElement(input_op, 0)
rng = xops.RngUniform(xops.Constant(cond_subc, np.array(1, dtype=np.int32)),
xops.Constant(cond_subc, np.array(2, dtype=np.int32)),
xc.Shape.array_shape(xc.PrimitiveType.S32, []))
cond_subc = cond_subc.build(xops.Lt(i, rng))
body_subc = xc.XlaBuilder("remat_body_subcomputation")
input_op = parameter(body_subc, 0, c.get_shape(inputs), replicated=[])
i, *args = xla_destructure(body_subc, input_op)[:len(in_nodes) + 1]
i_next = xops.Add(i, xops.Constant(body_subc, np.array(1, dtype=np.int32)))
body_ctx = ctx.replace(builder=body_subc,
name_stack=extend_name_stack(
ctx.name_stack, wrap_name(name, 'remat')))
subcomp_outs = jaxpr_subcomp(body_ctx, call_jaxpr, (), *args)
out_nodes = [i_next] + args + list(subcomp_outs)
body_subc = body_subc.build(xops.Tuple(body_subc, out_nodes))
outs = xops.While(cond_subc, body_subc, inputs)
return xla_destructure(c, outs)[len(in_nodes) + 1:]
def _remat_using_identity(ctx, in_nodes, name, call_jaxpr):
c = ctx.builder
args = xla_identity(c, "remat_begin", *in_nodes)
args = [xops.GetTupleElement(args, i) for i in range(len(in_nodes))]
body_ctx = ctx.replace(
name_stack=extend_name_stack(ctx.name_stack, wrap_name(name, "remat")))
outs = jaxpr_subcomp(body_ctx, call_jaxpr, (), *args)
# TODO: using an identity at the end can reduce little memory on 1 GPU,
# but there are still some bugs
# return xla_identity(c, op_type="remat_end", *outs)
return outs
def _remat_translation_rule(ctx,
avals_in,
avals_out,
*in_nodes,
name,
call_jaxpr,
prevent_cse,
differentiated,
concrete,
policy,
device=None):
del device, concrete, policy # Unused.
if differentiated and prevent_cse:
if global_config.remat_using_while:
return _remat_using_while(ctx, in_nodes, name, call_jaxpr)
else:
return _remat_using_identity(ctx, in_nodes, name, call_jaxpr)
else:
return jaxpr_subcomp(ctx, call_jaxpr, (), *in_nodes)
for dict_val in _backend_specific_translations.values():
if pe.remat_call_p in dict_val:
del dict_val[pe.remat_call_p]
register_translation(pe.remat_call_p, _remat_translation_rule)
jax._src.tree_util.tree_multimap = jax._src.tree_util.tree_map
jax.tree_multimap = jax._src.tree_util.tree_map
########################################
##### Monkey patch Flax
########################################
# Monkey patch the nn.Embed in flax to use onehot + matmul instead of gather/scatter.
# Because we currently do not support 2d partition of gather/scatter.
def embed_call_one_hot(self, inputs):
expanded = jax.nn.one_hot(inputs, self.num_embeddings, dtype=self.dtype)
ret = expanded @ jnp.asarray(self.embedding, self.dtype)
return ret
# Monkey patch the nn.Embed in flax to use always use fp32 as parameter type
def embed_setup(self):
self.embedding = self.param('embedding', self.embedding_init,
(self.num_embeddings, self.features))
if self.dtype == jnp.float16:
self.embedding_fp16 = self.embedding.astype(jnp.float16)
setattr(flax.linen.Embed, "setup", embed_setup)
setattr(flax.linen.Embed, "__call__", embed_call_one_hot)
# Monkey patch nn.LayerNorm in flax to make sure all gradients are in fp16
# when using mixed-precision.
@compact
def layer_norm_call(self, x):
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
mean = jnp.mean(x, axis=-1, keepdims=True)
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
var = mean2 - lax.square(mean)
mul = lax.rsqrt(var + self.epsilon)
mul = jnp.asarray(mul, self.dtype)
if self.use_scale:
mul = mul * jnp.asarray(
self.param('scale', self.scale_init, (features,)), self.dtype)
y = (x - mean) * mul
y = jnp.asarray(y, self.dtype)
if self.use_bias:
y = y + jnp.asarray(self.param('bias', self.bias_init,
(features,)), self.dtype)
return jnp.asarray(y, self.dtype)
setattr(flax.linen.LayerNorm, "__call__", wrap_method_once(layer_norm_call))
# Monkey patch a new method "init_dummy" to flax's Module.
# This function initializes all weights with ones for testing/benchmark purposes.
# This function is much faster than the standard initialization.
def init_dummy(self, *args, **kwargs):
avals = jax.eval_shape(self.init, *args, **kwargs)
return jax.tree_util.tree_map(lambda x: jnp.full(x.shape, 1e-8, x.dtype),
avals)
setattr(flax.linen.module.Module, "init_dummy", init_dummy)
from flax.optim import dynamic_scale as dynamic_scale_lib # noqa
setattr(flax.optim, "DynamicScale", dynamic_scale_lib.DynamicScale)
| nilq/baby-python | python |
#coding: utf-8
import sys
import os
import re
import argparse
def process_file(input_file):
log = {}
log['clients'] = []
log['95per'] = []
log['min'] = []
log['med'] = []
log['max'] = []
for line in input_file:
point = parse_line(line)
if point:
log['clients'].append(point['clients'])
log['95per'].append(point['95per'])
log['min'].append(point['min'])
log['med'].append(point['med'])
log['max'].append(point['max'])
return log
def parse_line(line):
# clients: 1000 95per-rtt: 1328ms min-rtt: 2ms median-rtt: 457ms max-rtt: 1577ms
matches = re.search('clients:\s+(\d+)\s+95per\-rtt:\s+(\d+)ms\s+min\-rtt:\s+(\d+)ms\s+median\-rtt:\s+(\d+)ms\s+max\-rtt:\s+(\d+)ms', line)
if matches:
return {
'clients': int(matches.group(1)),
'95per': int(matches.group(2)),
'min': int(matches.group(3)),
'med': int(matches.group(4)),
'max': int(matches.group(5))
}
return False
def generate_plot(log, output):
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
with plt.rc_context({'backend': 'Agg'}):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(log['clients'], log['95per'], '-', lw=1, color='r', label='95 percentile')
ax.plot(log['clients'], log['med'], '-', lw=1, color='green', dashes=[10, 5], label='Median')
ax.plot(log['clients'], log['max'], '-', lw=1, color='grey', label='Max')
ax.set_ylabel('RTT ms', color='r')
ax.set_xlabel('clients num')
ax.set_ylim(0., max(log['max']) * 1.1)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, bbox_to_anchor=(0.4, 1))
ax.grid()
fig.savefig(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate RTT chart')
parser.add_argument('-i', dest='inputfile', type=argparse.FileType('r'), help='input file containing benchmark results', required=True)
parser.add_argument('-o', dest='outputfile', type=argparse.FileType('w'), help='output file to write resulted chart PNG', required=True)
args = parser.parse_args()
data = process_file(args.inputfile)
generate_plot(data, args.outputfile)
print('Done')
| nilq/baby-python | python |
import numpy as np
from small_text.data import Dataset, DatasetView
from small_text.data.exceptions import UnsupportedOperationException
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
import torch
from torchtext.vocab import Vocab
except ModuleNotFoundError:
raise PytorchNotFoundError('Could not import torchtext')
class PytorchDataset(Dataset):
def __init__(self, device=None):
self.device = device
def to(self, other, non_blocking=False, copy=False):
raise NotImplementedError()
def to(self, device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format):
raise NotImplementedError()
class PytorchDatasetView(DatasetView):
def __init__(self, dataset, selection):
self.obj_class = type(self)
self._dataset = dataset
self.selection = selection
@property
def x(self):
"""Returns the features.
Returns
-------
x :
"""
selection = self.selection
if isinstance(self.selection, slice):
indices = np.arange(len(self._dataset))
selection = indices[self.selection]
elif isinstance(self.selection, int):
selection = [self.selection]
return [self._dataset.x[i] for i in selection]
@x.setter
def x(self, x):
raise UnsupportedOperationException('Cannot set x on a DatasetView')
@property
def data(self):
selection = self.selection
if isinstance(self.selection, slice):
indices = np.arange(len(self._dataset))
selection = indices[self.selection]
elif isinstance(self.selection, int):
selection [self.selection]
return [self._dataset.data[i] for i in selection]
@property
def vocab(self):
return self._dataset.vocab
def __iter__(self):
return self.data.__iter__()
def __len__(self):
if isinstance(self.selection, slice):
indices = np.arange(len(self._dataset))
return indices[self.selection].shape[0]
elif isinstance(self.selection, int):
return 1
return len(self.selection)
class PytorchTextClassificationDataset(PytorchDataset):
"""
Dataset class for classifiers from Pytorch Integration.
"""
INDEX_TEXT = 0
INDEX_LABEL = 1
NO_LABEL = -1
def __init__(self, data, vocab, target_labels=None, device=None):
"""
Parameters
----------
data : list of tuples (text data [Tensor], label)
Data set.
vocab : torchtext.vocab.vocab
Vocabulary object.
"""
self._data = data
self._vocab = vocab
self._target_labels = None
if target_labels is not None:
self.track_target_labels = False
self.target_labels = np.array(target_labels)
else:
self.track_target_labels = True
self._infer_target_labels()
if device is None:
self.device = None if len(data) == 0 else data[0][self.INDEX_TEXT].device
else:
self.device = device
super().__init__(device=device)
def _infer_target_labels(self):
inferred_target_labels = np.unique([d[self.INDEX_LABEL] for d in self._data])
self.target_labels = inferred_target_labels
@property
def x(self):
return [d[self.INDEX_TEXT] for d in self._data]
@x.setter
def x(self, x):
for i, _x in enumerate(x):
self._data[i] = (_x, self._data[i][self.INDEX_LABEL])
@property
def y(self):
# TODO: document that None is mapped to -1
return np.array([d[self.INDEX_LABEL] if d[self.INDEX_LABEL] is not None else self.NO_LABEL
for d in self._data], dtype=int)
@y.setter
def y(self, y):
# TODO: check same length
for i, _y in enumerate(y):
self._data[i] = (self._data[i][self.INDEX_TEXT], _y)
self._infer_target_labels()
@property
def data(self):
"""Returns the internal list of tuples storing the data.
Returns
-------
data : list of tuples (text data [Tensor], label)
Vocab object.
"""
return self._data
@property
def vocab(self):
"""Returns the vocab.
Returns
-------
vocab : torchtext.vocab.Vocab
Vocab object.
"""
return self._vocab
@property
def target_labels(self):
return self._target_labels
@target_labels.setter
def target_labels(self, target_labels):
# TODO: how to handle existing labels that outside this set
self._target_labels = target_labels
def to(self, other, non_blocking=False, copy=False):
"""Calls `torch.Tensor.to` on all Tensors in `data`.
Returns
-------
self : PytorchTextClassificationDataset
The object with `to` having been called on all Tensors in `data`.
See also
--------
`PyTorch Docs - torch.Tensor.to <https://pytorch.org/docs/stable/generated/torch.Tensor.to.html>`_
"""
data = [(d[self.INDEX_TEXT].to(other, non_blocking=non_blocking, copy=copy),
d[self.INDEX_LABEL]) for d in self._data]
if copy is True:
target_labels = None if self.track_target_labels else self._target_labels
# TODO: clone vocab
vocab = self._vocab
return PytorchTextClassificationDataset(data, vocab, target_labels=target_labels,
device=self.device)
else:
self._data = data
return self
def to(self, device=None, dtype=None, non_blocking=False, copy=False,
memory_format=torch.preserve_format):
"""Calls `torch.Tensor.to` on all Tensors in `data`.
Returns
-------
self : PytorchTextClassificationDataset
The object with `to` having been called on all Tensors in `data`.
See also
--------
`PyTorch Docs - torch.Tensor.to <https://pytorch.org/docs/stable/generated/torch.Tensor.to.html>`_
"""
data = [(d[self.INDEX_TEXT].to(device=device, dtype=dtype, non_blocking=non_blocking,
copy=copy, memory_format=memory_format),
d[self.INDEX_LABEL]) for d in self._data]
if copy is True:
target_labels = None if self.track_target_labels else self._target_labels
# TODO: clone vocab
vocab = self._vocab
return PytorchTextClassificationDataset(data, vocab, target_labels=target_labels,
device=device)
else:
self._data = data
return self
def __getitem__(self, item):
return PytorchDatasetView(self, item)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return len(self._data)
| nilq/baby-python | python |
# Generated by Django 3.2.5 on 2021-07-16 21:04
import colorfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('entertainment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='article',
name='color',
field=colorfield.fields.ColorField(choices=[('#F8D162', 'Жёлтый'), ('#8CDD94', 'Зелёный'), ('#FF8484', 'Розовый'), ('#C8D1FF', 'Голубой')], default='#FFFFFF', max_length=8, verbose_name='Цвет'),
),
migrations.AlterField(
model_name='movie',
name='link',
field=models.URLField(default=None, unique=True, verbose_name='Ссылка на фильм'),
preserve_default=False,
),
]
| nilq/baby-python | python |
import sys
import time
import json
import theano
import theano.tensor as T
import numpy as np
if __name__=='__main__':
data_location = sys.argv[1]
print 'thinking'
class Layer(object):
'''
The base layer object. an artificial neural network is composed
of many of these objects connected.
'''
def __init__(self, input, n_in, n_out, activation, rng):
W = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=-np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
b = np.zeros(
(n_out,), dtype=theano.config.floatX
)
self.input = input
self.W = theano.shared(value=W, name='W', borrow=True)
self.b = theano.shared(value=b, name='b', borrow=True)
self.params = [self.W, self.b]
linear_output = T.dot(input, self.W) + b
if activation is None:
self.output = linear_output
else:
self.output = activation(linear_output)
class ANN(object):
def __init__(self, n_in, n_out, layer_sizes):
'''
takes a list of layer sizes and activation functions
and builds an ANN from layer objects.
'''
self.rng = np.random.RandomState(12354)
self.input = self.x
self.x = T.dvector('x')
self.y = T.dscalar('y')
self.layers = []
self.params = []
for i in xrange(len(hidden_sizes)):
if i == 0:
layer_input = self.input
n_in = input_size
n_out = hidden_sizes[i]
else:
layer_input = self.layers[-1].output
n_in = hidden_sizes[i - 1]
n_out = hidden_sizes[i]
layer = Layer(input=layer_input,
n_in=n_in,
n_out=n_out,
activation=T.tanh)
self.layers.append(layer)
self.params.extend(layer.params)
self.output_layer = Layer(
input=self.layers[-1].output,
num_in=hidden_sizes[-1],
num_out=output_size,
activation=None
)
self.layers.append(self.output_layer)
self.params.extend(self.output_layer.params)
self.cost = T.mean((self.output_layer.output - self.y) ** 2)
def training(self, dataset, learning_rate=0.01):
set_x, set_y = dataset
index = T.iscalar("index")
gparams = T.grad(self.cost, self.params)
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(self.params, gparams)
]
train = theano.function(
inputs=[index],
outputs=self.cost,
updates=updates,
givens={
self.x: set_x[index],
self.y: set_y[index]
},
name='train'
)
return train
def predict(self, x):
index = T.iscalar("index")
predict = theano.function(
inputs=[index],
outputs=self.output_layer.output,
givens={
self.x: x[index]
},
name='predict'
)
return predict
class StatisticalAgent(object):
'''
This is the primary agent that directs construction and adjustment of
theano ANNs
'''
def __init__(self, parameters):
pass
| nilq/baby-python | python |
from unittest import TestCase
from nestor_api.adapters.git.github_git_provider import GitHubGitProvider
from nestor_api.adapters.git.provider import get_git_provider
class TestProvider(TestCase):
def test_get_git_provider_with_github(self):
git_provider = get_git_provider({"git": {"provider": "github"}})
self.assertIsInstance(git_provider, GitHubGitProvider)
def test_get_git_provider_without_defined(self):
with self.assertRaisesRegex(
NotImplementedError, "Adapter for this git provider is not implemented"
):
get_git_provider({"git": {"provider": "some-git-provider"}})
def test_get_git_provider_with_undefined_provider(self):
with self.assertRaisesRegex(
ValueError, "Git provider is not set in your project configuration file"
):
get_git_provider({})
| nilq/baby-python | python |
import tensorflow as tf
def conv(filters, kernel_size, strides = 1, padding = "same", use_bias = True, kernel_initializer = "he_normal", **kwargs):
return tf.keras.layers.Conv2D(filters, kernel_size, strides = strides, padding = padding, use_bias = use_bias, kernel_initializer = kernel_initializer, **kwargs)
class ClassNet(tf.keras.layers.Layer):
def __init__(self, n_anchor, n_class = 21, n_feature = 224, n_depth = 4, concat = True, convolution = conv, normalize = tf.keras.layers.BatchNormalization, activation = tf.keras.activations.relu, **kwargs):
super(ClassNet, self).__init__(**kwargs)
self.n_anchor = n_anchor
self.n_class = n_class
self.n_feature = n_feature
self.n_depth = n_depth
self.concat = concat
self.convolution = convolution
self.normalize = normalize
self.activation = activation
def build(self, input_shape):
if not isinstance(input_shape, list):
input_shape = [input_shape]
self.convs = [self.convolution(self.n_feature, 3, padding = "same", name = "depth{0}_conv".format(i + 1)) for i in range(self.n_depth)]
if self.normalize is not None:
self.norms = [[self.normalize(name = "depth{0}_norm{1}".format(i + 1, j + 1)) for j in range(len(input_shape))] for i in range(self.n_depth)]
self.acts = [tf.keras.layers.Activation(self.activation, name = "depth{0}_act".format(i + 1)) for i in range(self.n_depth)]
self.head = self.convolution(self.n_anchor * self.n_class, 3, padding = "same", name = "head")
self.reshape = tf.keras.layers.Reshape([-1, self.n_class], name = "head_reshape")
self.act = tf.keras.layers.Activation(tf.keras.activations.sigmoid, name = "logits")
if self.concat and 1 < len(input_shape):
self.post = tf.keras.layers.Concatenate(axis = -2, name = "logits_concat")
def call(self, inputs, feature = False):
if not isinstance(inputs, list):
inputs = [inputs]
out = []
features = []
for j, x in enumerate(inputs):
for i in range(self.n_depth):
x = self.convs[i](x)
if self.normalize is not None:
x = self.norms[i][j](x)
x = self.acts[i](x)
features.append(x)
x = self.act(self.reshape(self.head(x)))
out.append(x)
if len(out) == 1:
out = out[0]
elif self.concat:
out = self.post(out)
if feature:
out = [out, features]
return out
def get_config(self):
config = super(ClassNet, self).get_config()
config["n_anchor"] = self.n_anchor
config["n_class"] = self.n_class
config["n_feature"] = self.n_feature
config["n_depth"] = self.n_depth
config["concat"] = self.concat
config["convolution"] = self.convolution
config["normalize"] = self.normalize
config["activation"] = self.activation
return config
class BoxNet(tf.keras.layers.Layer):
def __init__(self, n_anchor, n_feature = 224, n_depth = 4, concat = True, convolution = conv, normalize = tf.keras.layers.BatchNormalization, activation = tf.keras.activations.relu, **kwargs):
super(BoxNet, self).__init__(**kwargs)
self.n_anchor = n_anchor
self.n_feature = n_feature
self.n_depth = n_depth
self.concat = concat
self.convolution = convolution
self.normalize = normalize
self.activation = activation
def build(self, input_shape):
if not isinstance(input_shape, list):
input_shape = [input_shape]
self.convs = [self.convolution(self.n_feature, 3, padding = "same", name = "depth{0}_conv".format(i + 1)) for i in range(self.n_depth)]
if self.normalize is not None:
self.norms = [[self.normalize(name = "depth{0}_norm{1}".format(i + 1, j + 1)) for j in range(len(input_shape))] for i in range(self.n_depth)]
self.acts = [tf.keras.layers.Activation(self.activation, name = "depth{0}_act".format(i + 1)) for i in range(self.n_depth)]
self.head = self.convolution(self.n_anchor * 4, 3, padding = "same", name = "head")
self.reshape = tf.keras.layers.Reshape([-1, 4], name = "regress")
if self.concat and 1 < len(input_shape):
self.post = tf.keras.layers.Concatenate(axis = -2, name = "regress_concat")
def call(self, inputs, feature = False):
if not isinstance(inputs, list):
inputs = [inputs]
out = []
features = []
for j, x in enumerate(inputs):
for i in range(self.n_depth):
x = self.convs[i](x)
if self.normalize is not None:
x = self.norms[i][j](x)
x = self.acts[i](x)
features.append(x)
x = self.reshape(self.head(x))
out.append(x)
if len(out) == 1:
out = out[0]
elif self.concat:
out = self.post(out)
if feature:
out = [out, features]
return out
def get_config(self):
config = super(BoxNet, self).get_config()
config["n_anchor"] = self.n_anchor
config["n_feature"] = self.n_feature
config["n_depth"] = self.n_depth
config["concat"] = self.concat
config["convolution"] = self.convolution
config["normalize"] = self.normalize
config["activation"] = self.activation
return config | nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""integration pytests for :mod:`graypy`
.. note::
These tests require an local instance of Graylog to send messages to.
"""
import requests
def validate_local_graylog_up():
"""Test to see if a localhost instance of Graylog is currently running"""
try:
requests.get("http://127.0.0.1:9000/api")
return True
except Exception:
return False
LOCAL_GRAYLOG_UP = validate_local_graylog_up()
| nilq/baby-python | python |
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
def PCDKSPsetup(F, Q, A):
# OptDB = PETSc.Options()
# OptDB['pc_hypre_type'] = 'boomeramg'
# OptDB['pc_hypre_boomeramg_strong_threshold'] = 0.5
# OptDB['pc_hypre_boomeramg_grid_sweeps_all'] = 1
kspF = PETSc.KSP()
kspF.create(comm=PETSc.COMM_WORLD)
pcF = kspF.getPC()
kspF.setType('preonly')
pcF.setType('hypre')
kspF.setFromOptions()
kspA = PETSc.KSP()
kspA.create(comm=PETSc.COMM_WORLD)
pcA = kspA.getPC()
kspA.setType('preonly')
pcA.setType('hypre')
kspA.setFromOptions()
kspQ = PETSc.KSP()
kspQ.create(comm=PETSc.COMM_WORLD)
pcA = kspQ.getPC()
kspQ.setType('preonly')
pcA.setType('hypre')
kspQ.setTolerances(tol)
kspQ.setFromOptions()
kspF.setOperators(F,F)
kspA.setOperators(A,A)
kspQ.setOperators(Q,Q)
return kspF, kspA, kspQ
def LSCKSPsetup(F, QB, B):
# OptDB = PETSc.Options()
# OptDB['pc_hypre_type'] = 'boomeramg'
# OptDB['pc_hypre_boomeramg_strong_threshold'] = 0.5
# OptDB['pc_hypre_boomeramg_grid_sweeps_all'] = 1
BQB = B*QB
kspF = PETSc.KSP()
kspF.create(comm=PETSc.COMM_WORLD)
pcF = kspF.getPC()
kspF.setType('preonly')
pcF.setType('hypre')
kspF.setFromOptions()
kspBQB = PETSc.KSP()
kspBQB.create(comm=PETSc.COMM_WORLD)
pcBQB = kspBQB.getPC()
kspBQB.setType('preonly')
pcBQB.setType('hypre')
kspBQB.setFromOptions()
kspF.setOperators(F,F)
kspBQB.setOperators(BQB,BQB)
return kspF, kspBQB
| nilq/baby-python | python |
"""
Reasoner based on the 'model theory' by Guerth:
https://github.com/CognitiveComputationLab/cogmods/blob/master/modal/student_projects/2019_guerth/models/mmodalsentential/reasoner.py
Modified by Kaltenbl for propositional reasoning
"""
import numpy as np
from .assertion_parser import parse_all, facts
from .model_builder import premises_model, remove_duplicates, not_model
from .logger import logging
def model(premises):
"""Turn premises into one model
Arguments:
premises {list} -- list of premise strings
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Returns:
MentalModel -- the model
"""
if not isinstance(premises, list):
premises = [premises]
parsed = parse_all(premises)
return premises_model(parsed)
def all_val(arr, val):
"""Check if all values in array have specific value
Arguments:
arr {np.array} -- 1D numpy array of ints
val {int} -- value to check
Returns:
bool -- yes/no
"""
for el in arr:
if el != val:
return False
return True
def some_val(arr, val):
"""Check if at least one value in array has a specific value
Arguments:
arr {np.array} -- 1D numpy array of ints
val {int} -- value to check
Returns:
bool -- yes/no
"""
for el in arr:
if el == val:
return True
return False
def what_follows(premises, system=1):
"""What follows from a set of premises?
Facts already in the premises are dismissed from return value.
Arguments:
premises {list} -- list of premise strings
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Returns:
tuple -- necessary and possible clauses that follow: (nec, pos)
"""
f = facts(premises)
nec, pos = nec_and_pos(premises, system)
nec_without_facts = [n for n in nec if n not in f]
pos_without_facts_and_nec = [p for p in pos if p not in f and p not in nec]
return nec_without_facts, pos_without_facts_and_nec
def nec_and_pos(premises, system=1):
"""Return clauses that are necessary and possible
Arguments:
premises {list} -- list of premise strings
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Raises:
Exception: if system other that 1 or 2
Returns:
tuple -- necessary and possible clauses that follow: (nec, pos)
"""
m = model(premises)
if system == 1:
nec = []
pos = []
for c in m.clauses:
column = m.get_column(c)
if all_val(column, 1):
nec.append([c])
elif all_val(column, -1):
nec.append(['not'] + [c])
if some_val(column, 1):
pos.append([c])
if some_val(column, -1):
pos.append(['not'] + [c])
return nec, pos
elif system == 2:
nec = []
pos = []
for c in m.full_clauses:
column = m.full_get_column(c)
if all_val(column, 1):
nec.append([c])
elif all_val(column, -1):
nec.append(['not'] + [c])
if some_val(column, 1):
pos.append([c])
if some_val(column, -1):
pos.append(['not'] + [c])
return nec, pos
else:
raise Exception
def how_possible(premises, conclusion):
"""Return how possible the conclusion is given the premisses
Arguments:
premises {list} -- list of assertion strings
conclusion {str} -- assertion string
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Returns:
str -- the description of how possible
"""
p = probability(premises, conclusion, 2)
if p == 0:
return "impossible"
elif p < 0.1:
return "almost impossible"
elif p < 0.3:
return "less possible"
elif p <= 0.7:
return "possible"
elif p <= 0.9:
return "very possible"
elif p < 1:
return "almost certain"
else:
return "certain"
def probability(premises, conclusion, system=1):
"""Return probability of an assertion given the premises
Based on an "assumption of equal possibilities": The number of models of
the conclusion that are also models of the premises divided by the number
of models of the premises.
Arguments:
premises {list} -- list of premise strings
conclusion {str} -- conclusion string
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Returns:
float -- probability
"""
if system == 1:
return None
m1 = model(premises)
m2 = model(conclusion)
common = in_common(m1, m2, system)
if not common:
return None
poss_1, poss_2 = poss_in_common(m1, m2, system, common)
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
return round(matches / len(m1.full_poss), 2)
def poss_in_common(m1, m2, system=1, common=None, keep_duplicates=True):
"""Return only those parts of the possibilities for which the two models
have clauses in common
Arguments:
m1 {MentalModel} -- model 1
m2 {MentalModel} -- model 2
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
common {(str,int,int)} -- (clause, index_1, index_2) (default: {None})
keep_duplicates {bool} -- if True keep duplicate rows else discard (default: {True})
Returns:
(np.array, np.array) -- the reduced possibilities of the models
"""
if not common:
common = in_common(m1, m2, system)
n_columns = len(common)
if system == 1:
n_rows = len(m1.poss)
else:
n_rows = len(m1.full_poss)
poss_1 = np.zeros((n_rows, n_columns), dtype=int)
for i, cl in enumerate(common):
if system == 1:
poss_1[:, i] = m1.get_column(cl[0])
else:
poss_1[:, i] = m1.full_get_column(cl[0])
n_columns = len(common)
if system == 1:
n_rows = len(m2.poss)
else:
n_rows = len(m2.full_poss)
poss_2 = np.zeros((n_rows, n_columns), dtype=int)
for i, cl in enumerate(common):
if system == 1:
poss_2[:, i] = m2.get_column(cl[0])
else:
poss_2[:, i] = m2.full_get_column(cl[0])
if not keep_duplicates:
poss_1 = remove_duplicates(poss_1)
poss_2 = remove_duplicates(poss_2)
return poss_1, poss_2
def matching_poss(poss_1, poss_2):
"""Count how many rows the possibilities have in common.
Arguments:
poss_1 {np.array} -- possibilities 1
poss_2 {np.array} -- possibilities 2
Returns:
int -- the count/matches
"""
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
return matches
def verify(premises, evidence, system=1):
"""Verify premisses given the evidence.
Arguments:
premises {list} -- list of assertion strings
evidence {list} -- list of assertion strings
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Raises:
NotImplementedError
Exception: invalid system
Returns:
bool -- True/False
str -- Undetermined/Possibly True
"""
logging("Given evidence '" + evidence + "', verify premisses '" + str(premises) + "' (system " + str(system) + ")")
p = model(premises)
e = model(evidence)
common = in_common(p, e, system)
if system == 1:
if len(common) != len(e.clauses):
logging("Evidence lacks information in premises")
return "Undetermined"
else:
poss_1, poss_2 = poss_in_common(p, e, system, common, False)
matches = matching_poss(poss_1, poss_2)
neg_p = not_model(p)
neg_poss_1, neg_poss_2 = poss_in_common(neg_p, e, system, in_common(neg_p, e), False)
neg_matches = matching_poss(neg_poss_1, neg_poss_2)
if neg_matches and not matches:
return False
elif neg_matches and matches:
return "Undetermined"
elif not neg_matches and matches:
return True
else:
return "Undetermined"
# if all and only those poss in premisses are supported by evidence, then true
# if all poss in premisses are supported by evidence but evidence has more models, then undetermined
# if not all poss in premisses are supported by evidence, then false
elif system == 2:
if len(common) != len(e.full_clauses):
logging("Evidence lacks information in premises")
return "Undetermined"
else:
poss_1, poss_2 = poss_in_common(p, e, system, common, False)
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
# if all and only those poss in premisses are supported by evidence, then true
if matches == len(poss_1) and len(poss_1) == len(poss_2):
return True
elif matches == len(poss_1):
return "Undetermined"
# if some evidence supports some premisses, then possibly true
elif matches > 0:
return "Possibly True"
elif matches == 0:
return False
else:
raise NotImplementedError
else:
raise Exception
def in_common(m1, m2, system=1):
"""Return clauses in common
Arguments:
m1 {MentalModel} -- model 1
m2 {MentalModel} -- model 2
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Raises:
Exception: if system not 1 or 2
Returns:
tuple -- (clause in common, index in model 1, index in model 2)
"""
if system == 1:
clauses_1 = m1.clauses
clauses_2 = m2.clauses
elif system == 2:
clauses_1 = m1.full_clauses
clauses_2 = m2.full_clauses
else:
raise Exception
return [
(cl1, i1, i2)
for i1, cl1 in enumerate(clauses_1)
for i2, cl2 in enumerate(clauses_2)
if cl1 == cl2]
def necessary(premises, conclusion, system=1, weak=False):
"""Is conclusion necessary given the premises?
Arguments:
premises {list} -- list of premise strings
conclusion {str} -- conclusion string
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
weak {bool} -- weak necessity (default: {False})
Raises:
Exception: if not system 1 or 2
Returns:
bool -- yes or no
"""
m1 = model(premises)
m2 = model(conclusion)
common = in_common(m1, m2, system)
if not common:
return False
poss_1, poss_2 = poss_in_common(m1, m2, system, common, False)
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
if matches != len(poss_2):
return False
elif matches == len(poss_1):
return True
elif weak and matches < len(poss_1):
return True
else:
return False
def possible(premises, conclusion, system=1):
"""Is conclusion possible given the premises?
Arguments:
premises {list} -- list of premise strings
conclusion {str} -- conclusion string
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Raises:
Exception: if not system 1 or 2
Returns:
bool -- yes or no
"""
m1 = model(premises)
m2 = model(conclusion)
common = in_common(m1, m2, system)
if not common:
return False
poss_1, poss_2 = poss_in_common(m1, m2, system, common, False)
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
if matches == 0:
return False
elif matches == len(poss_2) and matches == len(poss_1):
return True
elif matches == len(poss_2) and matches != len(poss_1):
return True
elif matches != len(poss_2) and matches == len(poss_1):
return True
elif matches != len(poss_2) and matches != len(poss_1):
return True
def defeasance(premises, fact, system=1):
"""Revise premises given the fact.
Arguments:
premises {list} -- list of assertion strings
fact {str} -- assertion string
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Returns:
MentalModel -- revised model of premisses
"""
fact_model = model(fact)
premisses_models = [model(p) for p in premises]
keep = []
reject = []
not_in_common = []
for i, m in enumerate(premisses_models):
common = in_common(m, fact_model, system)
if common:
if part_of_model(m, fact_model, common, system):
logging("fact model MATCHES premisse model")
keep.append(premises[i])
else:
logging("fact model MISMATCHES premisse model")
reject.append(premises[i])
else:
not_in_common.append(premises[i])
logging("premisses to reject:")
for p in reject:
logging(p)
logging("premisses to keep:")
for p in keep:
logging(p)
logging("premisses not in common:")
for p in not_in_common:
logging(p)
logging("new model that needs explaining:")
if reject:
keep.extend(not_in_common)
keep.append(fact)
new_model = model(keep)
logging(new_model)
else:
keep.append(fact)
new_model = model(keep)
logging(new_model)
new_model = match_knowledge(new_model, system)
return new_model
def part_of_model(m, fact_model, common, system=1):
"""Check if a model is part of another model.
If all possibility rows of the fact model are also part of the other model
then return True, else False.
Arguments:
m {MentalModel} -- model
fact_model {MentalModel} -- fact model
common {(str,int,int)} -- clauses in common with indices
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Returns:
bool -- True if fact is part, else False
"""
# if all rows of fact are in a model then return True, else False
poss_1, poss_2 = poss_in_common(m, fact_model, system, common)
for p2 in poss_2:
match = False
for p1 in poss_1:
if np.array_equal(p1, p2):
match = True
if not match:
return False
return True
def match_knowledge(m, system=1):
"""Return knowledge model if it matches the model, else return back the model
Arguments:
m {MentalModel} -- the model
Keyword Arguments:
system {int} -- system 1 or 2 (default: {1})
Returns:
MentalModel -- either the matching knowledge model or the unchanged input model
"""
knowledge = []
# knowledge.append(model(['a poisonous snake bites her & she dies']))
# knowledge.append(model(['~a poisonous snake bites her & ~she dies']))
knowledge.append(model(['a poisonous snake bites her & she takes antidote & ~ she dies']))
knowledge.append(model(['~a poisonous snake bites her & the snake has a weak jaw & ~ she dies']))
for k in knowledge:
common = in_common(k, m, system)
if part_of_model(k, m, common, system):
logging("knowledge did match")
# print(k)
return k
logging("knowledge did not match")
return m
# ((( a-poisonous-snake-bites-her) ( she-dies))
# ((- a-poisonous-snake-bites-her) (- she-dies))
# ((- a-poisonous-snake-bites-her)(the-snake-has-a-weak-jaw) (- she-dies))
# ((- a-poisonous-snake-bites her)(the-snake-is-blind) (- she-dies)))
# ((( a-poisonous-snake-bites-her) (she-takes-antidote) (- she-dies))
# (( a-poisonous-snake-bites-her) (the-tourniquet-blocks-the-poison)(- she-dies))
# (( a-poisonous-snake-bites-her) (someone-sucks-out-the-poison) (- she-dies))
# (( a-poisonous-snake-bites her) (its-venom-lacks-potency) (- she-dies)))
# ((( she-anticipates-bite) ( she-takes-antidote))
# ((- she-anticipates-bite) (- she-takes-antidote)))
# ((( she-uses-a-tourniquet) ( the-tourniquet-blocks-the-poison))
# ((- she-uses-a-tourniquet) (- the-tourniquet-blocks-the-poison)))
# ((( someone-knows-what-to-do) ( someone-sucks-out-the-poison))
# ((- someone-knows-what-to-do) (- someone-sucks-out-the-poison)))
# ((( the-snake-has-a-disease) ( its-venom-lacks-potency))
# ((- the-snake-has-a-disease) (- its-venom-lacks-potency)))
# ((( the-snake-is-tired) ( the-snake-has-a-weak-jaw))
# ((- the-snake-is-tired) (- the-snake-has-a-weak-jaw)))
# ((( the-snake-is-diseased) ( the-snake-is-blind))
# ((- the-snake-is-diseased) (- the-snake-is-blind)))
def original_mSentential():
# Examples from the original lisp program:
# (inference '((if a or b then c)(a)))
# (inference '((God exists or atheism is right)))
# (inference '((if a or b then c)(a)) 'what-follows?)
# (inference '((a)(a or b)) 'necessary?)
# (inference '((if a then b)(not b)(not a)) 'necessary?)
# (inference '((if a poisonous snake bites her then she dies)(A poisonous snake bites her)(not she dies)) 'necessary?)
# (inference '((a)(a or b)) 'possible?)
# (inference '((it is hot or it is humid)(it is hot)) 'probability?)
# (inference '((if a then b)(not a and not b)) 'verify?)
print("model(['(a | b) -> c', 'a'])")
print(model(['(a | b) -> c', 'a']))
print()
print()
print("model(['God exists | atheism is right'])")
print(model(['God exists | atheism is right']))
print()
print()
print("what_follows(['a | b -> c', 'a'])")
print(what_follows(['a | b -> c', 'a']))
print()
print()
print("what_follows(['a | b -> c', 'a'], 2)")
print(what_follows(['a | b -> c', 'a'], 2))
print()
print()
print("necessary(['a'], 'a|b')")
print(necessary(['a'], 'a|b'))
print()
print()
print("necessary(['a'], 'a|b', 2)")
print(necessary(['a'], 'a|b', 2))
print()
print()
print("necessary(['a -> b', '~b'], '~a')")
print(necessary(['a -> b', '~b'], '~a'))
print()
print()
print("necessary(['a -> b', '~b'], '~a', 2)")
print(necessary(['a -> b', '~b'], '~a', 2))
print()
print()
print("necessary(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies')")
print(necessary(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies'))
print()
print()
print("necessary(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies', 2)")
print(necessary(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies', 2))
print()
print()
print("possible(['a'], 'a|b')")
print(possible(['a'], 'a|b'))
print()
print()
print("possible(['a'], 'a|b', 2)")
print(possible(['a'], 'a|b', 2))
print()
print()
print("probability(['it is hot | it is humid'], 'it is hot')")
print(probability(['it is hot | it is humid'], 'it is hot'))
print()
print()
print("probability(['it is hot | it is humid'], 'it is hot', 2)")
print(probability(['it is hot | it is humid'], 'it is hot', 2))
print()
print()
print("verify(['a -> b'], '~a & ~b')")
print(verify(['a -> b'], '~a & ~b'))
print()
print()
print("verify(['a -> b'], '~a & ~b', 2)")
print(verify(['a -> b'], '~a & ~b', 2))
print()
print()
def weak_necessity():
# weak necessity
print("necessary(['a|b'], 'a^b', weak=False)")
print(necessary(['a|b'], 'a^b', weak=False))
print()
print()
print("necessary(['a|b'], 'a^b', 2, weak=False)")
print(necessary(['a|b'], 'a^b', 2, weak=False))
print()
print()
print("necessary(['a|b'], 'a^b', weak=True)")
print(necessary(['a|b'], 'a^b', weak=True))
print()
print()
print("necessary(['a|b'], 'a^b', 2, weak=True)")
print(necessary(['a|b'], 'a^b', 2, weak=True))
print()
print()
def from_paper():
# New tests
print("possible('trump | ~trump', '~trump')")
print(possible('trump | ~trump', '~trump'))
print()
print()
print("how_possible('<e:0.9> snow', 'snow', 2)")
print(how_possible('<e:0.9> snow', 'snow', 2))
print()
print()
print("possible('<>pat & <>~viv', 'pat & ~viv')")
print(possible('<>pat & <>~viv', 'pat & ~viv'))
print()
print()
print("model('<>(Ivanka | Jared)')")
print(model('<>(Ivanka | Jared)'))
print()
print()
print("probability('<e:0.9> snow', 'snow', 2)")
print(probability('<e:0.9> snow', 'snow', 2))
print()
print()
print("how_possible('<>pat & <>~viv', 'pat & ~viv', 2)")
print(how_possible('<>pat & <>~viv', 'pat & ~viv', 2))
print()
print()
print("model('pie ^ cake', 'pie ^ ~cake')")
print(model(['pie ^ cake', 'pie ^ ~cake']))
print()
print()
print("model(['<>A', 'A->B'])")
print(model(['<>A', 'A->B']))
print()
print()
print("necessary(['cold & (snowing ^ raining)'], 'snowing ^ raining', 2)")
print(necessary(['cold & (snowing ^ raining)'], 'snowing ^ raining', 2))
print()
print()
print("model(['canal -> [a] flooding'])")
print(model(['canal -> [a] flooding']))
print()
print()
print("model(['canal -> <a> flooding'])")
print(model(['canal -> <a> flooding']))
print()
print()
print("model(['children -> [d] taking care', 'taking care -> [d] ~leaving'])")
print(model(['children -> [d] taking care', 'taking care -> [d] ~leaving']))
print()
print()
print("what_follows(['children -> [d] taking care', 'taking care -> [d] ~leaving'])")
print(what_follows(['children -> [d] taking care', 'taking care -> [d] ~leaving']))
print()
print()
print("model(['[d] (children -> taking care)', '[d] (taking care -> ~leaving)'])")
print(model(['[d] (children -> taking care)', '[d] (taking care -> ~leaving)']))
print()
print()
print("what_follows(['[d] (children -> taking care)', '[d] (taking care -> ~leaving)'], 2)")
print(what_follows(['[d] (children -> taking care)', '[d] (taking care -> ~leaving)'], 2))
print()
print()
def open_questions():
print("model(['[d] ich sage immer die wahrheit', '~ich sage immer die wahrheit'])")
print(model(['[d] ich sage immer die wahrheit', '~ich sage immer die wahrheit']))
print()
print()
print("model(['[e] ich sage immer die wahrheit', '~ich sage immer die wahrheit'])")
print(model(['[e] ich sage immer die wahrheit', '~ich sage immer die wahrheit']))
print()
print()
print("model(['[a] ich sage immer die wahrheit', '~ich sage immer die wahrheit'])")
print(model(['[a] ich sage immer die wahrheit', '~ich sage immer die wahrheit']))
print()
print()
print("model(['~<e:0.9>a'])")
print(model(['~<e:0.9>a']))
print()
print()
print(model('<e:0.9>snow'))
print(how_possible('<e:0.9>snow', 'snow'))
print(how_possible('<e:1>snow', 'snow'))
print(how_possible('<e:0.5>snow', 'snow'))
print(how_possible('<e:0.2>snow', 'snow'))
print(probability('<e:0.2>snow', 'snow', 2))
def testing_defeasance():
print("defeasance(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies')")
print(defeasance(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies'))
print("defeasance(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies', 2)")
print(defeasance(['a poisonous snake bites her -> she dies', 'a poisonous snake bites her'], '~she dies', 2))
def testing_verify():
print(verify('a | b', 'a & b'))
print(verify('a ^ b', 'a & b'))
print(verify('a -> b', 'a & b'))
print(verify('a | b', 'a & b', 2))
print(verify('a ^ b', 'a & b', 2))
print(verify('a -> b', 'a & b', 2))
print(verify('a | b', '~a & ~b'))
print(verify('a ^ b', '~a & ~b'))
print(verify('a -> b', '~a & ~b'))
print(verify('a | b', '~a & ~b', 2))
print(verify('a ^ b', '~a & ~b', 2))
print(verify('a -> b', '~a & ~b', 2))
print(verify('a -> b', 'a & ~b'))
print(verify('a -> b', 'a & ~b', 2))
print("######################################################")
print(verify('a -> b', 'a & b', 2))
print(verify('a -> b', 'a & ~b', 2))
print(verify('a -> b', '~a & b', 2))
print(verify('a -> b', '~a & ~b', 2))
print(verify('a <-> b', 'a & b', 2))
print(verify('a <-> b', 'a & ~b', 2))
print(verify('a <-> b', '~a & b', 2))
print(verify('a <-> b', '~a & ~b', 2))
print(verify('a | b', 'a & b', 2))
print(verify('a | b', 'a & ~b', 2))
print(verify('a | b', '~a & b', 2))
print(verify('a | b', '~a & ~b', 2))
print(verify('a ^ b', 'a & b', 2))
print(verify('a ^ b', 'a & ~b', 2))
print(verify('a ^ b', '~a & b', 2))
print(verify('a ^ b', '~a & ~b', 2))
print(verify('a ^ b', '~a & ~b', 2))
if __name__ == "__main__":
print('############################### original examples ####################################\n\n')
original_mSentential()
print('############################### weak necessity ######################################\n\n')
weak_necessity()
print('############################### examples from paper ######################################\n\n')
from_paper()
print('############################### open questions ######################################\n\n')
open_questions()
print('############################### TESTING ######################################\n\n')
testing_defeasance()
testing_verify()
| nilq/baby-python | python |
def find(db, user):
"""
find the notelist
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user})
return document
def find_all_lists(db, user):
"""
It finds all lists
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return document.get("lists", [])
def find_list(db, user, list_name):
"""
It finds the list
:param db:
:param user:
:param list_name:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists.{}".format(list_name): 1})
if not document:
return []
return document["lists"].get(list_name, [])
def find_all_lists_names(db, user):
"""
It finds all the lists names
:param db:
:param user:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return [name for name in document["lists"].keys()]
def find_notes(db, user, list_name):
"""
It returns all the notes of a list
:param db:
:param user:
:param list_name:
:return:
"""
document = db.notelist.find_one({"_id": user}, {"lists": 1})
return document["lists"][list_name]
def insert_new_notelist(db, user):
"""
It inserts a new notelist
:param db:
:param user:
:return:
"""
db.notelist.insert({"_id": user, "lists": {}})
def add_new_list(db, user, list_name):
"""
It adds a new list
:param db:
:param user:
:param list_name:
:return:
"""
notelist = find(db, user)
if not notelist:
insert_new_notelist(db, user)
db.notelist.update({"_id": user}, {"$set": {"lists.{}".format(list_name): []}})
def remove_list(db, user, list_name):
"""
It removes the given list
:param db:
:param user:
:param list_name:
:return:
"""
db.notelist.update({"_id": user}, {"$unset": {"lists.{}".format(list_name): 1}})
def add_note(db, user, list_name, note):
"""
It adds a note
:param db:
:param user:
:param list_name:
:param note:
:return:
"""
the_list = find_list(db, user, list_name)
if not the_list:
add_new_list(db, user, list_name)
db.notelist.update({"_id": user}, {"$addToSet": {"lists.{}".format(list_name): note}})
return True
def remove_note(db, user, list_name, note):
"""
It removes a note
:param db:
:param user:
:param list_name:
:param note:
:return:
"""
result = False
the_list = find_list(db, user, list_name)
if the_list:
try:
index = int(note) - 1
db.notelist.update({"_id": user}, {"$unset": {"lists.{}.{}".format(list_name, index): 1}})
db.notelist.update({"_id": user}, {"$pull": {"lists.{}".format(list_name): None}})
except:
db.notelist.update({"_id": user}, {"$pull": {"lists.{}".format(list_name): note}})
result = True
return result
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from .dataObject import DataObject
from .muscle import Muscle
from .cell import Cell
from .network import Network
class Worm(DataObject):
"""
A representation of the whole worm.
All worms with the same name are considered to be the same object.
Attributes
----------
neuron_network : ObjectProperty
The neuron network of the worm
muscle : ObjectProperty
Muscles of the worm
"""
def __init__(self,scientific_name=False,**kwargs):
DataObject.__init__(self,**kwargs)
self.name = Worm.DatatypeProperty("scientific_name", owner=self)
Worm.ObjectProperty("neuron_network", owner=self, value_type=Network)
Worm.ObjectProperty("muscle", owner=self, value_type=Muscle, multiple=True)
Worm.ObjectProperty("cell", owner=self, value_type=Cell)
if scientific_name:
self.scientific_name(scientific_name)
else:
self.scientific_name("C. elegans")
def get_neuron_network(self):
"""
Return the neuron network of the worm.
Example::
# Grabs the representation of the neuronal network
>>> net = P.Worm().get_neuron_network()
# Grab a specific neuron
>>> aval = net.aneuron('AVAL')
>>> aval.type()
set([u'interneuron'])
#show how many connections go out of AVAL
>>> aval.connection.count('pre')
77
:returns: An object to work with the network of the worm
:rtype: PyOpenWorm.Network
"""
return self.neuron_network()
def muscles(self):
"""
Get all Muscle objects attached to the Worm
Returns a set of all muscles::
Example::
>>> muscles = P.Worm().muscles()
>>> len(muscles)
96
:returns: A set of all muscles
:rtype: set
"""
return set(x for x in self._muscles_helper())
def _muscles_helper(self):
for x in self.muscle.get():
yield x
def get_semantic_net(self):
"""
Get the underlying semantic network as an RDFLib Graph
:returns: A semantic network containing information about the worm
:rtype: rdflib.ConjunctiveGraph
"""
return self.rdf
def identifier(self, *args, **kwargs):
# If the DataObject identifier isn't variable, then self is a specific
# object and this identifier should be returned. Otherwise, if our name
# attribute is _already_ set, then we can get the identifier from it and
# return that. Otherwise, there's no telling from here what our identifier
# should be, so the variable identifier (from DataObject.identifier() must
# be returned
ident = DataObject.identifier(self, *args, **kwargs)
if 'query' in kwargs and kwargs['query'] == True:
if not DataObject._is_variable(ident):
return ident
if self.name.hasValue():
# name is already set, so we can make an identifier from it
n = next(self.name._get())
return self.make_identifier(n)
else:
return ident
| nilq/baby-python | python |
# This code is available under the MIT License.
# (c)2010-2011 Nakatani Shuyo / Cybozu Labs Inc.
# (c)2018-2019 Hiroki Iida / Retrieva Inc.
import nltk
import re
import MeCab
stopwords_list = nltk.corpus.stopwords.words('english')
recover_list = {"wa":"was", "ha":"has"}
wl = nltk.WordNetLemmatizer()
def load_corpus(ranges):
"""
load data from corpus
"""
tmp = re.match(r'(\d+):(\d+)$', ranges)
if tmp:
start = int(tmp.group(1))
end = int(tmp.group(2))
from nltk.corpus import brown as corpus
return [corpus.words(fileid) for fileid in corpus.fileids()[start:end]]
def load_dataframe(documents):
corpus = []
for doc in documents:
sentences = re.findall(r'\w+(?:\'\w+)?', doc)
if len(sentences) > 0:
corpus.append(sentences)
return corpus
def load_dataframe_jp(documents):
corpus = []
tagger = MeCab.Tagger('-O wakati')
tagger.parse("")
for doc in documents:
tokens = tagger.parse(doc.strip()).split()
corpus.append(tokens)
return corpus
def load_file(filename):
"""
for one file
one line corresponds to one doc
"""
corpus = []
f = open(filename, 'r')
for line in f:
doc = re.findall(r'\w+(?:\'\w+)?', line)
if len(doc) > 0:
corpus.append(doc)
f.close()
return corpus
def is_stopword(w):
return w in stopwords_list
def lemmatize(w0):
w = wl.lemmatize(w0.lower())
if w in recover_list: return recover_list[w]
return w
class Vocabulary:
def __init__(self, excluds_stopwords=False):
self.vocas = [] # id to word
self.vocas_id = dict() # word to id
self.docfreq = [] # id to document frequency
self.excluds_stopwords = excluds_stopwords
def term_to_id(self, term0):
term = lemmatize(term0)
if self.excluds_stopwords and is_stopword(term):
return None
if term not in self.vocas_id:
voca_id = len(self.vocas)
self.vocas_id[term] = voca_id
self.vocas.append(term)
self.docfreq.append(0)
else:
voca_id = self.vocas_id[term]
return voca_id
def doc_to_ids(self, doc):
ids_list = []
words = dict()
for term in doc:
id = self.term_to_id(term)
if id is not None:
ids_list.append(id)
if id not in words:
words[id] = 1
self.docfreq[id] += 1
if "close" in dir(doc):
doc.close()
return ids_list
def cut_low_freq(self, corpus, threshold=1):
new_vocas = []
new_docfreq = []
self.vocas_id = dict()
conv_map = dict()
for id, term in enumerate(self.vocas):
freq = self.docfreq[id]
if freq > threshold:
new_id = len(new_vocas)
self.vocas_id[term] = new_id
new_vocas.append(term)
new_docfreq.append(freq)
conv_map[id] = new_id
self.vocas = new_vocas
self.docfreq = new_docfreq
def conv(doc):
new_doc = []
for id in doc:
if id in conv_map: new_doc.append(conv_map[id])
return new_doc
return [conv(doc) for doc in corpus]
def __getitem__(self, v):
return self.vocas[v]
def size(self):
return len(self.vocas)
def is_stopword_id(self, id):
return self.vocas[id] in stopwords_list
| nilq/baby-python | python |
#coding=utf8
import logging
import logging.handlers
import time
from django.conf import settings
from django.core.management.base import BaseCommand
import django_rq
from redis_cache import get_redis_connection
from dbss.cardspace.models import warp_update_index
from dbss.daemonize import Daemonize
def test():
pass
class MyDaemonized(Daemonize):
def run(self):
while True:
self.logger.info('cron update index start')
index_queue = django_rq.get_queue(settings.INDEX_QUEUE)
if index_queue.count < 1 :
index_redis = get_redis_connection('djrq')
index_count = int(index_redis.get(settings.INDEX_NAME)) if index_redis.get(settings.INDEX_NAME) else 0
if index_count > 0:
self.logger.info('index count is ' + str(index_count) + ', cron update index enqueue')
index_redis.set(settings.INDEX_NAME, 0)
index_queue.enqueue(warp_update_index)
self.logger.info('cron update index done, sleep ' + str(settings.INDEX_TIME) + '\n*********************')
time.sleep(settings.INDEX_TIME)
class Command(BaseCommand):
help = '''
crond job to update index'
cron excute update index action
configurate time and count in settings.py
'''
def handle(self, *args, **kwargs):
loghandler = logging.handlers.RotatingFileHandler('/var/log/cronindex.log' , maxBytes=10*1024*1024, backupCount=5)
formatter = logging.Formatter('%(asctime)s-%(filename)s-[line:%(lineno)d]-%(levelname)s: %(message)s')
loghandler.setFormatter(formatter)
cronlog = logging.getLogger('cronindex')
cronlog.addHandler(loghandler)
cronlog.setLevel(logging.DEBUG)
daemond = MyDaemonized(app='cronindex', pid='/tmp/cronui.pid', action = test, keep_fds=[loghandler.stream.fileno()])
daemond.start()
| nilq/baby-python | python |
from numpy import interp
def rangeMap(num):
return int(interp(num,[-32768,32768],[1,10]))
while 1:
print(rangeMap(int(input()))) | nilq/baby-python | python |
"""
@brief test log(time=0s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import check_pep8
class TestCodeStyle(unittest.TestCase):
def test_code_style_src(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
thi = os.path.abspath(os.path.dirname(__file__))
src_ = os.path.normpath(os.path.join(thi, "..", "..", "src"))
check_pep8(src_, fLOG=fLOG,
pylint_ignore=('C0103', 'C1801',
'E0203',
'R0201', 'R0901', 'R0902', 'R0911', 'R0912',
'R0913', 'R0914', 'R0915', 'R1702', 'R1705',
'W0613', 'C0415', 'R1732', 'W1514', 'R1735',
'W0123', 'W0212', 'W0703', 'W0201', 'C0209'),
skip=["_nbconvert_config.py:",
#
"Redefining name 'fLOG'",
"tk_window.py:56",
"tk_window.py:68",
"function_helper.py:122",
"Unable to import 'Tkinter'",
"tk_window.py:50: W0603",
"tk_window.py:62: W0603",
"R1720",
])
def test_code_style_test(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
thi = os.path.abspath(os.path.dirname(__file__))
test = os.path.normpath(os.path.join(thi, "..", ))
check_pep8(test, fLOG=fLOG, neg_pattern="temp_.*",
max_line_length=200,
pylint_ignore=('C0111', 'C0103', 'R0914', 'W0212', 'C0413', 'W0621',
'W0703', 'W0622', 'W0122', 'R0912', 'R0201', 'R1735',
'R0915', 'C1801', 'C0415', 'R1732', 'W1514', 'C0209'),
skip=["[E402] module ",
"test_windows_autopy3.py:",
"R1720",
])
if __name__ == "__main__":
unittest.main()
| nilq/baby-python | python |
#!/usr/bin/env python
# encoding: utf-8
"""
Run the talus worker daemon, specifying the maximum number of cores to use,
the maximum RAM available for the VMs, the AMQP host to connect to, and
a plugins directory.
"""
# system imports
import argparse
import math
import multiprocessing
import os
import sys
# local imports
import slave
if __name__ == "__main__":
parser = argparse.ArgumentParser(
__file__,
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter
)
# see #28 - configurable RAM/cpus per VM
total_ram = math.ceil(os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / (1024.0 ** 2))
default_ram = total_ram - 1024 # leave one gb of ram left by default
parser.add_argument("--ram",
help="Maximum amount of ram to use in GB (default {})".format(
default_ram
),
type=int,
required=False,
default=default_ram/1024.0,
)
total_cpus = multiprocessing.cpu_count()
default_cpus = multiprocessing.cpu_count() - 2
parser.add_argument("--cpus",
help="Maximum number of cores to use (default {})".format(
default_cpus
),
type=int,
required=False,
default=default_cpus,
)
parser.add_argument("-i", "--intf",
help="Network interface",
type=str,
)
parser.add_argument("--plugins",
type=str,
help="Path to plugins directory"
)
parser.add_argument("--amqp",
help="the hostname of the AMQP server",
default=None,
)
args = parser.parse_args(sys.argv[1:])
ram = args.ram*1024
if args.amqp is None:
print("ERROR! --amqp must be specified")
exit(1)
# Two tests for user supplied ram and cpus to be < what the total possible amount is
if ram > total_ram:
print("ERROR! --ram must be less than total_ram")
if args.cpus > total_cpus:
print("ERROR! --cpu must be less than total_cpu")
slave.main(
amqp_host=args.amqp,
max_ram=ram,
max_cpus=args.cpus,
intf=args.intf,
plugins_dir=args.plugins
)
| nilq/baby-python | python |
from __future__ import annotations
from dataclasses import is_dataclass, fields, MISSING
from typing import Any
def nub(x: Any) -> dict[str, Any]:
assert is_dataclass(x)
out: dict[str, Any] = {}
for f in fields(x):
a = getattr(x, f.name)
if (
isinstance(a, dict | set | list)
and not a
and f.default_factory is not MISSING
and not f.default_factory()
):
continue
if a != f.default:
out[f.name] = a
return out
| nilq/baby-python | python |
#! /usr/bin/env python
#
"""CLI wrapper script, ensures that relative imports work correctly in a PyInstaller build"""
from isolyzer.isolyzer import main
if __name__ == '__main__':
main()
| nilq/baby-python | python |
from io import StringIO
from collections import UserDict, UserList
import numpy as np
import confiddler.json as json
def test():
"""
Our drop-in json replacement can encode custom
mappings and sequences, and also numpy arrays.
"""
d = UserDict()
d['l'] = UserList([1,2,3])
d['a'] = np.arange(5)
d['o'] = {}
f = StringIO()
json.dump(d, f)
f.seek(0)
assert json.load(f) == {'l': [1,2,3], 'a': [0,1,2,3,4], 'o': {}}
s = json.dumps(d)
assert json.loads(s) == {'l': [1,2,3], 'a': [0,1,2,3,4], 'o': {}}
if __name__ == "__main__":
import pytest
pytest.main(['-s', '--tb=native', '--pyargs', 'confiddler.tests.test_json'])
| nilq/baby-python | python |
from rick_roll_detector.image import verify_image
import cv2
# vid_cap has to be a cv2.VideoCapture()
def verify_video(vid_cap: cv2.VideoCapture) -> bool:
success, image = vid_cap.read()
while success:
success, image = vid_cap.read()
# If the video ended return false.
if not success:
vid_cap.release()
return False
# Return true if the frame contains Rick
if verify_image(image):
vid_cap.release()
return True
| nilq/baby-python | python |
from src.bsl_python.preprocessing.experiments.experiment import Experiment
from src.bsl_python.preprocessing.processor.spiking_activity import DefaultSpikingActivity, MeanFilteredActivity, \
FilteredActivity
from src.bsl_python.preprocessing.processor.tuning_curve import TuningCurve
from collections import Counter
from numpy import unique
import math
import pandas as pd
import numpy as np
class FMS(Experiment):
def __init__(self, nwb_file):
super(FMS, self).__init__(nwb_file)
self.set_processing_window()
def set_stimuli_conditions(self):
stimuli = [(condition["rate"], condition["decB"]) for condition in self.info]
unique_stimuli = unique(stimuli, axis=0)
self.stimuli_conditions = [{'name': 'Level, dB',
'key': 'decB',
'value': unique_stimuli[:, 1]},
{'name': 'Sweep rate, oct/s',
'key': 'rate',
'value': unique_stimuli[:, 0]}]
def set_repetitions(self):
columns = [stimuli["key"] for stimuli in self.stimuli_conditions]
self.repetitions = pd.DataFrame(self.info).groupby(columns).count()["start_time"]
def compute_sweep_time(self):
fq_min = 2000
fq_max = 48000
sweep_oct = abs(math.log2(fq_max / fq_min))
return abs(sweep_oct / self.stimuli_conditions[1]["value"]) / 1000 + 0.09
def set_processing_window(self):
sweep_time = self.compute_sweep_time()
self.processing_window = {'min': [0.01] * len(sweep_time), 'max': sweep_time + 0.01}
def preprocess(self):
fs = 24414.0625 / 1000
list_trials = range(len(self.info))
list_electrodes = -self.channels['imp'].data[()].astype(np.int64)
activity = DefaultSpikingActivity(fs, list_electrodes, self.spikes, list_trials)
filtered_activity = FilteredActivity(activity)
mean_filtered_activity = MeanFilteredActivity(filtered_activity, list_trials)
tuning_curve = TuningCurve(self.get_stim_spikes(), self.get_spontaneous_spikes(), self.stimuli_conditions,
self.channels, self.repetitions, self.spontaneous_window, filtered_activity, self.info)
self.processors.append(activity)
self.processors.append(filtered_activity)
self.processors.append(mean_filtered_activity)
self.processors.append(tuning_curve)
def get_stim_spikes(self):
if "in_processing_range" not in self.spikes:
feature_1_key = self.stimuli_conditions[0]["key"]
feature_1 = self.stimuli_conditions[0]["value"]
feature_2_key = self.stimuli_conditions[1]["key"]
feature_2 = self.stimuli_conditions[1]["value"]
self.spikes["in_processing_range"] = [False] * len(self.spikes)
self.spikes["sweep_time"] = [np.nan] * len(self.spikes)
nb_spikes = 0
unique_feat_1 = Counter(self.spikes[feature_1_key].values).values()
unique_feat_2 = Counter(self.spikes[feature_2_key].values).values()
for condition_index in range(len(feature_2)):
filter_spikes = (self.spikes[feature_1_key] == feature_1[condition_index]) & (
self.spikes[feature_2_key] == feature_2[condition_index])
nb_spikes += np.sum(filter_spikes)
filter_spikes = filter_spikes & (
self.processing_window['min'][condition_index] < self.spikes["trial_time"]) & (
self.spikes["trial_time"] <= self.processing_window['max'][condition_index])
# filter_spikes = (self.processing_window['min'][condition_index] < self.spikes["trial_time"]) & (
# self.spikes["trial_time"] <= self.processing_window['max'][condition_index])
self.spikes.loc[filter_spikes, ["in_processing_range"]] = True
self.spikes.loc[filter_spikes, ["sweep_time"]] = self.processing_window['max'][condition_index] - 0.01
return self.spikes.loc[self.spikes["in_processing_range"]]
| nilq/baby-python | python |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
# Initialize the node
rospy.init_node('dbw_node')
# Setup the Constants
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
# Setup the global variables
self.exp_lin_vel = 0.0
self.exp_ang_vel = 0.0
self.act_lin_vel = 0.0
self.act_ang_vel = 0.0
# Tracking information
self.time = None
# Debug
self.run_cnt = 0
self.dir = 1
# Setup the publishers
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1)
# Create `TwistController` object
throttle_params = {
'kp': 1.0,
'ki': 0.0,
'kd': 0.0,
'max': float('inf'),
'min': 0.0,
}
brake_params = {
'kp': 1.0,
'ki': 0.0,
'kd': 0.0,
'max': float('inf'),
'min': 0.0,
}
steer_params = {
'kp': 3.0,
'ki': 0.0,
'kd': 0.0,
'max': float('inf'),
'min': float('-inf'),
}
self.controller = Controller(throttle_params, brake_params, steer_params)
# Subscribe to all the topics you need to
self.sub_twist_cmd = rospy.Subscriber('/twist_cmd', TwistStamped, self.__twist_cb, queue_size=1)
self.sub_cur_vel = rospy.Subscriber('/current_velocity', TwistStamped, self.__vel_cb, queue_size=1)
self.loop()
def loop(self):
# Lower the rate to avoid performance issues
# https://carnd.slack.com/archives/C6NVDVAQ3/p1504061507000179
rate = rospy.Rate(10) # 50Hz
while not rospy.is_shutdown():
if self.time is None:
self.time = rospy.get_time()
rospy.loginfo(self.time)
else:
sample_time = rospy.get_time() - self.time
lin_err = self.exp_lin_vel - self.act_lin_vel
ang_err = self.exp_ang_vel - self.act_ang_vel
rospy.loginfo(sample_time)
rospy.loginfo(lin_err)
rospy.loginfo(ang_err)
# Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
throttle, brake, steering = self.controller.control(sample_time, lin_err, ang_err)
self.publish(throttle, brake, steering)
rate.sleep()
def __twist_cb(self, msg):
self.exp_lin_vel = msg.twist.linear.x
self.exp_ang_vel = msg.twist.angular.z
def __vel_cb(self, msg):
self.act_lin_vel = msg.twist.linear.x
self.act_ang_vel = msg.twist.angular.z
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode() | nilq/baby-python | python |
#!/usr/bin/env python2
"""Utilities for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import numpy as np
from caffe2.python import utils as c2_py_utils
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.utils.logging import log_json_stats
from detectron.utils.logging import SmoothedValue
from detectron.utils.timer import Timer
import detectron.utils.net as nu
class TrainingStats(object):
"""Track vital training statistics."""
def __init__(self, model):
# Window size for smoothing tracked values (with median filtering)
self.WIN_SZ = int(1280 / cfg.NUM_GPUS)
# Output logging period in SGD iterations
self.LOG_PERIOD = int(1280 / cfg.NUM_GPUS)
self.smoothed_losses_and_metrics = {
key: SmoothedValue(self.WIN_SZ)
for key in model.losses + model.metrics
}
self.losses_and_metrics = {
key: 0
for key in model.losses + model.metrics
}
self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
self.iter_total_loss = np.nan
self.iter_timer = Timer()
self.model = model
self.mem = dict()
self.mem = None
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self):
"""Update tracked iteration statistics."""
for k in self.losses_and_metrics.keys():
self.losses_and_metrics[k] = nu.average_multi_gpu_blob(k)
for k, v in self.smoothed_losses_and_metrics.items():
v.AddValue(self.losses_and_metrics[k])
self.iter_total_loss = np.sum(
np.array([self.losses_and_metrics[k] for k in self.model.losses]))
self.smoothed_total_loss.AddValue(self.iter_total_loss)
self.smoothed_mb_qsize.AddValue(
self.model.roi_data_loader._minibatch_queue.qsize())
if self.mem is not None:
self.GetMem()
def LogIterStats(self, cur_iter, lr):
"""Log the tracked statistics."""
if (cur_iter % self.LOG_PERIOD == 0
or cur_iter == cfg.SOLVER.MAX_ITER - 1):
stats = self.GetStats(cur_iter, lr)
log_json_stats(stats)
if self.mem is not None:
mem_sorted = sorted(self.mem.items(), key=lambda d: d[1])
print(mem_sorted)
def GetStats(self, cur_iter, lr):
eta_seconds = self.iter_timer.average_time * (
cfg.SOLVER.MAX_ITER - cur_iter)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
mem_stats = c2_py_utils.GetGPUMemoryUsageStats()
mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
stats = dict(
iter=cur_iter,
lr=float(lr),
time=self.iter_timer.average_time,
loss=self.smoothed_total_loss.GetAverageValue(),
eta=eta,
mb_qsize=int(np.round(self.smoothed_mb_qsize.GetAverageValue())),
mem=int(np.ceil(mem_usage / 1024 / 1024)))
for k, v in self.smoothed_losses_and_metrics.items():
stats[k] = v.GetAverageValue()
return stats
def is_grad(self, b):
name = str(b)
return name.endswith("_grad")
def is_shared(self, b):
name = str(b)
return name.endswith("_shared")
def GetMem(self):
for op_idx in range(len(self.model.net._net.op)):
op = self.model.net._net.op[op_idx]
for b in list(op.output):
if self.is_grad(b):
pass
elif self.is_shared(b):
pass
else:
continue
blob = workspace.FetchBlob(str(b))
if b not in self.mem.keys():
self.mem[str(b)] = 0
self.mem[str(b)] = max(self.mem[str(b)], blob.size)
| nilq/baby-python | python |
import json
AppSettings = dict()
with open('app.config') as json_data:
for k, v in json.load(json_data).items():
AppSettings[k] = v
| nilq/baby-python | python |
# assign this folder as a namespace
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
# __all__ = ['flow']
| nilq/baby-python | python |
from os.path import expanduser, exists
from ansible.parsing.vault import VaultLib, VaultSecret
from yaml import load, SafeLoader
class VaultReader:
"""
Read data from a vault file.
"""
def __init__(self, vault_file, vault_pass):
"""
Create a vault reader.
:param vault_file: path to an ansible vault file
:param vault_pass: the vault file's password as bytes
"""
if not exists(expanduser(vault_file)):
raise Exception(f"No such file: {vault_file}")
if not isinstance(vault_pass, bytes):
raise Exception("Vault pass must be instance of `bytes`")
self.vault_file = vault_file
self.vault_pass = vault_pass
@property
def secrets(self):
return dict(
default=VaultSecret(self.vault_pass),
)
def read(self):
"""
Read vault data as a Python dictionary.
"""
with open(expanduser(self.vault_file), "rb") as vault_file:
encrypted = vault_file.read()
vault_lib = VaultLib(self.secrets.items())
plaintext = vault_lib.decrypt(encrypted, filename=self.vault_file)
return load(plaintext, Loader=SafeLoader)
| nilq/baby-python | python |
import pymongo
import os
def get_mongo_config():
config = {
'username':os.environ['MDBUSR'],
'password':os.environ['MDBPWD'],
'host':'mongo',
'port':27017,
}
return config
def connect_to_mongodb():
config = get_mongo_config()
try:
mc = pymongo.MongoClient(**config)
# This is so that we check that the connection is live
mc.server_info()
# TODO
mdb = mc[os.environ['ALLIANCE_UID']]
return mdb
except Exception:
raise
| nilq/baby-python | python |
from typing import List, Sequence
def remove_redundant_fao_area_codes(s: Sequence[str]) -> List[str]:
"""Filters the input sequence of FAO areas to keep only the smallest non
overlapping areas.
This is useful to prune lists of FAO areas that result from intersecting a
geometry (ports, vessel position...) with all FAO areas. In such cases we only
want to keep the smallest (most precise) FAO areas in the result.
Args:
s (Sequence[str]): list of FAO areas.
Returns:
List[str]: subset of the input sequence.
Examples:
>>> remove_redundant_fao_area_codes(['27.8.a', '27', '37.1'])
['27.8.a', '37.1']
"""
s = set(s)
return [a for a in s if True not in {a in t for t in (s - {a})}]
| nilq/baby-python | python |
# Skill pour demander au robot de nous guider
import core.robot as robot
from core.skills.ArgSkill import ArgSkill
from core.communication import *
with open('core/map.json', encoding='utf-8') as data_file:
map = json.load(data_file)
phrases = [
"guide moi",
"amène moi",
"amene moi",
"emmene moi",
"ou est",
"je veux aller",
"je dois aller",
"j'aimerai aller",
"je voudrais aller",
"va à",
"va au"
]
words = [
"guide",
"amene",
"emmene"
]
badwords = [
]
def response(orderJson):
order = orderJson["msg"]
for l in map["label"]:
if cleanString(l["text"]) in order:
if orderJson["type"]=="confirmation":
if isConfirmation(orderJson["answer"]):
robot.goto(l["text"])
return("Je vous amène à "+l["text"])
else:
return "Dommage, j'aime bien me ballader"
else:
askConfirmation("Voulez vous que je vous amène à "+l["text"]+" ?", orderJson["msg"], orderJson["client"])
return ""
return("Je n'ai pas compris votre destination.")
ArgSkill(phrases, words,badwords, response)
phrases2 = ["Avance", "Avance d'un mètre", "Avance un peu"]
words2 = ["avance"]
badwords2 = []
def response2(orderJson):
robot.forward()
return("Chaud devant !")
ArgSkill(phrases2, words2, badwords2, response2)
| nilq/baby-python | python |
import math
import sys
import time
import pybullet as p
import pybullet_data
import model as m
import util as ut
BOUNCE = "bounce"
ROLL = "roll"
TIME_STEP_S = 0.01
def configPyBullet():
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath()) # used by loadURDF
p.resetSimulation()
p.setGravity(0,0,-9.8) # m/s^2
p.setTimeStep(TIME_STEP_S) # sec
p.setRealTimeSimulation(0)
#planeId = p.loadURDF("plane.urdf")
p.createCollisionShape(p.GEOM_PLANE)
p.createMultiBody(0, 0)
return physicsClient#, planeId
def reset():
p.resetSimulation()
def step():
p.stepSimulation()
def main():
physicsClient = configPyBullet()
m.build()
def run(arg):
action = []
if ROLL == arg:
action = [.1,.1,-.1,-.1]
while (1):
start = time.time()
m.update()
m.act(action)
step()
keys = p.getKeyboardEvents()
stop = time.time()
delta = stop - start
if delta < TIME_STEP_S:
time.sleep(TIME_STEP_S - delta)
if __name__ == '__main__':
arg = None
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg == BOUNCE:
m.POSITION = (0,0,5)
main()
run(arg)
| nilq/baby-python | python |
import datetime as dt
import json
from json import encoder
import xarray as xr
import numpy as np
from pandas import to_datetime
from collections import OrderedDict
import dateutil
import logging
import six
logging.basicConfig()
encoder.FLOAT_REPR = lambda o: format(o, '.4f').rstrip('0').rstrip('.')
AXIS_VAR=['time','lat','latitude','lon','longitude','site']
SPECIAL_ATTRS=['missing_value','cell_methods']
@xr.register_dataset_accessor('cfjson')
class CFJSONinterface(object):
def __init__(self, xarray_obj):
self._obj=xarray_obj
def to_dict(self,mapping):
"""
Dumps the dataset as an ordered dictionary following the same conventions as ncdump.
"""
res=OrderedDict()
try:
res['dimensions']=OrderedDict()
for dim in self._obj.dims:
if self._obj.dims[dim]>1:
res['dimensions'][dim]=self._obj.dims[dim]
except:
print('Failed to export dimensions')
raise
try:
res['attributes']=OrderedDict()
res['attributes'].update(self._obj.attrs)
except:
print('Failed to export all global_attribute %s'%(att))
res['variables']=OrderedDict()
#Put axis variables first
for special_var in AXIS_VAR:
if special_var in self._obj.variables.keys():
res['variables'][special_var]=None
for var in self._obj.variables:
try:
if var=='dum1': #This is a UDS artefact
continue
if var=='time':
res['variables']['time']={
'shape':['time'],
'attributes':{'units':'ISO8601 datetimes'}
}
continue
vardims=[d for d in self._obj.variables[var].dims if d in res['dimensions']]
varout=mapping.get(var,var)
res['variables'][varout]={'attributes':OrderedDict()}
if vardims:
res['variables'][varout]['shape'] = vardims
else:
res['variables'][varout]['shape'] = []
for att in self._obj.variables[var].attrs:
if att not in SPECIAL_ATTRS:
newatt=self._obj.variables[var].attrs[att]
try:
newatt=float(newatt)
except:
newatt=str(newatt)
res['variables'][varout]['attributes'][att]=newatt
except:
print('Failed to export variable %s description or attributes'%(var))
raise
for var in self._obj.variables:
varout=mapping.get(var,var)
try:
if var=='dum1':
continue
rawvals=np.atleast_1d(self._obj.variables[var].values.squeeze())
if var == 'time':
vals=[t.strftime('%Y-%m-%dT%H:%M:%SZ') for t in to_datetime(rawvals)]
res['variables'][varout]['data']=vals
else:
res['variables'][varout]['data']=rawvals.tolist()
except:
print('Failed to export values for variable %s'%(var))
raise
return res
def json_dumps(self, indent=2, separators=None, mapping={}, attributes={}):
"""
Dumps a JSON representation of the Dataset following the same conventions as ncdump.
Assumes the Dataset is CF complient.
"""
dico=self.to_dict(mapping)
try:
dico['attributes'].update(attributes)
except:
print('Failed to set global attributes %s'%(attributes))
return json.dumps(dico, indent=indent, separators=separators).replace('NaN','null')
def from_json(self, js):
"""Convert CF-JSON string or dictionary to xarray Dataset
Example:
import xarray as xr
from cfjson import xrdataset
cfjson_string = '{"dimensions": {"time": 1}, "variables": {"x": {"shape": ["time"], "data": [1], "attributes": {}}}}'
dataset = xr.Dataset()
dataset.cfjson.from_json(cfjson_string)
"""
if isinstance(js, six.string_types):
try:
dico = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(js)
except:
print('Could not decode JSON string')
raise
else:
dico = js
if 'attributes' in dico.keys():
# Copy global attributes
logging.debug('copying global attributes: {}'.format(dico['attributes'].items()))
for k,v in six.iteritems(dico['attributes']):
self._obj.attrs[k] = v
else:
logging.debug('no global attributes found')
# Copy variables and their attributes and dimensions
for varname,var in six.iteritems(dico['variables']):
logging.debug('copying variable "{}" data'.format(varname))
if "shape" not in var.keys():
logging.warning("missing shape (required by CF-JSON spec >=0.2)")
var["shape"] = []
# Ideally we'd use udunits to find "time" variables, but tricky in
# Python (cf_units doesn't seem to provide utScan or utIsTime)...
if 'units' in var['attributes'] and 'ISO8601' in var['attributes']['units']:
logging.debug('found "ISO8601" in units string, guessing time variable, converting to datetime64')
time_strings = var['data']
time_dt = [dateutil.parser.parse(tx) for tx in time_strings]
# If timezone information was provided (e.g., "Z")
if any([t.utcoffset() is not None for t in time_dt]):
if all([t.utcoffset() == dt.timedelta(0) for t in time_dt]):
# Timezone-aware, even if they're all the same timezone, would lead to dtype=object
time_dt = [t.replace(tzinfo=None) for t in time_dt]
else:
logging.warning('Mixed timezones (or mixed naive / aware) in input, may lead to dtype=object in output')
self._obj[varname] = (var['shape'], time_dt)
logging.debug('copying variable "{}" attributes: {}'.format(varname, var['attributes'].items()))
self._obj[varname].attrs = var['attributes']
self._obj[varname].attrs['units'] = 'Python datetime64 objects'
else:
if var['shape']:
# shape=['dim1'] (in contrast to shape=[])
self._obj[varname] = (var['shape'], var['data'])
else:
# shape=[] is allowed, but a bit more tricky...
if isinstance(var['data'], list):
if len(var['data']) > 1:
msg = 'len(data) > 1 not allowed with empty / missing shape; varname: {}'.format(varname)
raise Exception(msg)
# shape=[] with data=[1.2] (in contrast to data=1.2)
self._obj[varname] = (var['shape'], var['data'][0])
else:
# shape=[] with data=1.2 (in contrast to data=[1.2])
self._obj[varname] = (var['shape'], var['data'])
# TODO: is shape=[] with data=[] allowed and needs to be handled?
logging.debug('copying variable "{}" attributes: {}'.format(varname, var['attributes'].items()))
# Some cases result in a dtype=object array with None elements,
# but if this is just due to a mix of "null" and numeric values,
# we can avoid some downstream problems by casting now, which
# should also convert any None values to numpy NaN.
if self._obj[varname].dtype == 'O':
dtype_set = set([type(el) for el in self._obj[varname].data.flatten()])
if str not in dtype_set:
if float not in dtype_set:
logging.warning('casting variable "{}" to float to preserve None / NaN, but no floats in original data'.format(varname))
self._obj[varname] = self._obj[varname].astype(float)
self._obj[varname].attrs = var['attributes']
if __name__ == '__main__':
import sys
if len(sys.argv)<2:
print('Usage: xarray.py netcdf_file [json_file]')
else:
nc=xr.open_dataset(sys.argv[1])
s=nc.cfjson.json_dumps(indent=2)
if len(sys.argv)<3:
print(s)
else:
f=open(sys.argv[2],'w')
f.write(s)
f.close()
| nilq/baby-python | python |
A, B = int(input()), int(input())
if A * B > 0:
print(2 * max(abs(A), abs(B)))
else:
print(2 * abs(A) + 2 * abs(B))
| nilq/baby-python | python |
from Crypto.Cipher import AES
from base64 import b64encode, b64decode
BLOCK_SIZE = 16
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
def encrypt(key, msg):
cipher = AES.new(pad(key).encode('utf-8'))
return b64encode(cipher.encrypt(pad(msg))).decode('utf-8')
def decrypt(key, msg):
cipher = AES.new(pad(key).encode('utf-8'))
return cipher.decrypt(b64decode(msg)).decode('utf-8').rstrip(PADDING)
| nilq/baby-python | python |
"""Doxygen module.
Create project's documentation.
Website: http://www.doxygen.org
"""
import os
def doxygen(loader, project=None, variant=None, *args): #pylint:disable=keyword-arg-before-vararg
loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
binargs = ['doxygen', config['doxygen.config']] + list(args)
os.execvp(binargs[0], binargs)
commands = (doxygen,)
| nilq/baby-python | python |
import sys
sys.path.insert(0, '..')
import numpy as np
import pandas as pd
from itertools import combinations
from scipy.stats import binom
import scipy.special
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from IPython.display import display, HTML
#sys.path.append("../")
from FrameBuilder.eigenstepsbuilder import *
from decimal import *
from copy import deepcopy
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms
from env.numerical_analysis_dpp import *
from env.plot_functions import *
def swap_elements_of_list(list_1,indices):
list_1_ = deepcopy(list_1)
N = len(list_1_)
list_indices = []
#print(N)
for item in indices:
#print(item)
list_indices.append(list_1_[item])
list_1_2 = list_indices #+ list_1_
#list_final = list_1_2[0:N]
return list_1_2
def extract_first_elements(list_1):
list_1_ = deepcopy(list_1)
max_list_1 = max(list_1_)
min_list_1 = min(list_1_)
effective_number = max_list_1 - min_list_1+1
N = len(list_1_)
index_of_firsts = [0]*effective_number
index_of_firsts[0] = 0
counter = 1
counter_on_N = 1
hit_indices = [list_1_[0]]
#print(effective_number)
#print(list_1_)
#print(list_2_)
while counter<effective_number:
if not(list_1_[counter_on_N] in hit_indices):
index_of_firsts[counter] = counter_on_N
counter = counter +1
hit_indices.append(list_1_[counter_on_N])
counter_on_N = counter_on_N +1
#print(index_of_firsts)
list_2_ = [list_1_[i] for i in index_of_firsts]
I_arg_sort = np.argsort(list_2_)
list_3_ = []
sorted_index_of_firsts = []
for i in I_arg_sort:
sorted_index_of_firsts.append(index_of_firsts[i])
list_3_.append(list_1_[index_of_firsts[i]])
return list_3_,sorted_index_of_firsts
#
#def extract_first_elements(list_1,list_2):
#
#
# list_1_ = deepcopy(list_1)
# list_2_ = deepcopy(list_2)
# max_list_1 = max(list_1_)
# min_list_1 = min(list_1_)
#
# array = np.array(list_1_)
# effective_number = max_list_1 - min_list_1
# N = len(list_1_)
# index_of_firsts = [0]*effective_number
# index_of_firsts[0] = 0
# counter = 1
# counter_on_N = 1
# while counter<effective_number-2:
# if list_1_[counter_on_N] != list_1_[counter_on_N-1]:
# index_of_firsts[counter] = counter_on_N
# counter = counter +1
# counter_on_N = counter_on_N +1
# return [list_1_[i] for i in index_of_firsts],[list_2_[i] for i in index_of_firsts]
#
def generate_list_of_list_from_list(list_1):
list_1_ = deepcopy(list_1)
list_of_list = []
for item in list_1_:
list_of_list.append([item])
return list_of_list
def plot_results_of_multi_experiments(N,real_dim,r,T_,k_,mean,cov_,static_list_,activate_correction_factor,file_name_comment):
#print(np.diag(cov_))
lv_scores_vector = k_/real_dim*np.ones(real_dim) # The vector of leverage scores (the last one)
T = deepcopy(T_) # The number of experiments
versions_number = 1
epsilon_vizualisation = 0.01
k = deepcopy(k_)
cov_1 = deepcopy(cov_)
volume_sampling_fro_list = []
projection_dpp_fro_list = []
p_eff_list = []
cardinal_list = []
cardinal_global_list_list = []
avoiding_proba_list = []
static_list = deepcopy(static_list_)
volume_sampling_fro_list = []
projection_dpp_fro_list = []
#derandomized_projection_dpp_fro_list = []
greedy_selection_fro_list = []
effective_kernel_fro_list = []
p_eff_list = []
p_eff_list_list = []
cardinal_global_list = []
theta_list = []
theta_complete_list = []
theoretical_bound_avoiding_probability_list = []
static_list_len = len(static_list)
static_list_counter = 0
matrix_rank = min(np.count_nonzero(cov_),N)
correction_factor = 1
if activate_correction_factor == 1:
beta_factor = cov_[k,k]/cov_[matrix_rank-1,matrix_rank-1]
dimension_factor = (real_dim - k_)/(matrix_rank - k_)
correction_factor = np.float(beta_factor)**2*np.float(dimension_factor)
for t in range(T):
print("Matrix number")
print(t)
#print(correction_factor)
#print("real_dim")
#print(real_dim)
cardinal_list_element = static_list[static_list_counter] #list(np.random.choice(static_list, 1))
cardinal_list = [static_list[static_list_counter]] #list(np.random.choice(static_list, 1))
static_list_counter = static_list_counter +1
if static_list_counter == static_list_len:
static_list_counter = 0
NAL_1 = Numrerical_Analysis_DPP(N,real_dim,r,k,versions_number,mean,cov_1,lv_scores_vector,cardinal_list)
#print("NAL")
projection_DPP_res_fro_1 = (1-epsilon_vizualisation)*NAL_1.get_expected_error_fro_for_projection_DPP()
volume_sampling_res_fro_1 = (1-epsilon_vizualisation)*NAL_1.get_expected_error_fro_for_volume_sampling()
#derandomized_DPP_res_fro_1 = NAL_1.get_error_fro_for_derandomized_projection_DPP_selection()
greedy_selection_res_fro_1 = NAL_1.get_error_fro_for_deterministic_selection()
effective_kernel_sampling_res_fro_1 = NAL_1.get_expected_error_fro_for_effective_kernel_sampling()
# upper_tight_bound_projection_DPP_res_fro_1 = NAL_1.get_tight_upper_bound_error_fro_for_projection_DPP()
# alpha_sum_res_1 = NAL_1.get_alpha_sum_k_leverage_scores(1)
# sum_U_res_1 = NAL_1.get_sum_k_leverage_scores()
p_eff_res_1 = NAL_1.get_p_eff_leverage_scores()
avoiding_proba_res_1,theta_list,avoiding_proba_theoretical_list = NAL_1.get_avoiding_probability()
avoiding_proba_list.append(avoiding_proba_res_1)
greedy_selection_fro_list.append(greedy_selection_res_fro_1)
theta_complete_list.append(theta_list)
#theoretical_bound_avoiding_probability_list.append(avoiding_proba_theoretical_list)
#derandomized_projection_dpp_fro_list.append(derandomized_DPP_res_fro_1)
effective_kernel_fro_list.append(list(effective_kernel_sampling_res_fro_1))
volume_sampling_fro_list.append(list(volume_sampling_res_fro_1))
projection_dpp_fro_list.append(list(projection_DPP_res_fro_1))
p_eff_list_list.append(list(p_eff_res_1))
p_eff_list_element = int(p_eff_res_1[0])
p_eff_list.append(p_eff_list_element)
cardinal_global_list.append(cardinal_list_element)
cardinal_global_list_list.append(cardinal_list)
#print("next")
for theta in theta_list:
theoretical_bound_avoiding_probability_list.append(1/theta)
#avoiding_proba_list,theta_list = NAL_1.get_avoiding_probability()
#versions_number = int(len(avoiding_proba_list)/len(theta_list))
#ones_list = [1]*versions_number
#theta_complete_list = list(np.kron(ones_list,theta_list))
flattened_cardinal_list= [item for items in cardinal_global_list_list for item in items]
flattened_p_eff_list= [item for items in p_eff_list_list for item in items]
theoretical_projection_DPP_error_bound_list_pre_factor = from_p_eff_to_error_bound(flattened_cardinal_list,k,real_dim)
theoretical_projection_DPP_error_bound_list = [correction_factor * i for i in theoretical_projection_DPP_error_bound_list_pre_factor]
theoretical_effective_kernel_error_bound_list_pre_factor = from_p_eff_to_error_bound_2(flattened_p_eff_list,k,real_dim)
theoretical_effective_kernel_error_bound_list = [correction_factor * i for i in theoretical_effective_kernel_error_bound_list_pre_factor]
cardinal_global_list_len = len(cardinal_global_list_list)
volume_sampling_fro_bound_list = [k+1]*cardinal_global_list_len
error_lists = []
error_lists.append(volume_sampling_fro_bound_list)
error_lists.append(volume_sampling_fro_list)
error_lists.append(projection_dpp_fro_list)
error_lists.append(theoretical_projection_DPP_error_bound_list)
legends_list = []
legends_list.append("Borne th. VS")
legends_list.append("VS")
legends_list.append("PPD")
legends_list.append("Borne th. PPD")
axislabel_list = []
axislabel_list.append(r'$\mathrm{p}$')
filename_list = []
filename_list.append("dpp_k_")
filename_list.append(str(k))
filename_list.append(str(T))
filename_list.append(str(N))
filename_list.append(file_name_comment)
plot_approximation_errors_on_toy_datasets(cardinal_global_list,cardinal_global_list_list,error_lists,legends_list,axislabel_list,filename_list)
# palette_paired = plt.get_cmap('Paired')
# #palette_PuBuGn = plt.get_cmap('PuBuGn')
#
#
# plt.scatter(cardinal_global_list,volume_sampling_fro_bound_list,label="Volume sampling bound",marker='_',color=palette_paired(1))
# plt.scatter(cardinal_global_list,volume_sampling_fro_list,label="Volume sampling",marker='_',color=palette_paired(0))
# plt.scatter(cardinal_global_list,projection_dpp_fro_list,label="Projection DPP",marker='_',color=palette_paired(4))
# #plt.scatter(cardinal_list,derandomized_projection_dpp_fro_list,label="derandomized projection dpp", marker='_')
# plt.scatter(cardinal_global_list,theoretical_projection_DPP_error_bound_list,marker='_',label="Projection DPP bound",color=palette_paired(5))
# plt.xlabel(r'$\mathrm{p}$', fontsize=12)
# plt.ylabel(r'$\mathrm{\mathbb{E} \|\| X- \pi_{C} X \|\| _{Fr}^{2}}$', fontsize=12)
# plt.title('The case k = '+str(k)+', '+str(T)+' matrices')
# #plt.xticks(map(int, Y_cov[:-1]))
# plt.legend(bbox_to_anchor=(0.495,0.34), loc="upper left")
# plt.xticks(range(4, 21, 1), fontsize=12)
# figfile_title= "dpp_k_"+str(k)+"_matrices_number_"+str(T)+"_N_"+str(N)+"_"+file_name_comment+".pdf"
# plt.savefig(figfile_title)
# plt.show()
#####
#####
#####
legends_list = []
legends_list.append("V.S. bound")
legends_list.append("V.S.")
legends_list.append("R.P. DPP")
legends_list.append("R.P. DPP")
error_lists = []
axislabel_list = []
axislabel_list.append(r'$\mathrm{p_{eff}}(\frac{1}{2})$')
#print(np.shape(volume_sampling_fro_bound_list))
#print(p_eff_list)
#print(error_lists[0])
#print(volume_sampling_fro_bound_list)
p_eff_list_len = len(p_eff_list)
#error_list_len = len(error_lists[0])
p_eff_list_temp,indices_list = extract_first_elements(p_eff_list)
p_eff_list = swap_elements_of_list(p_eff_list,indices_list)
#print(p_eff_list)
#p_eff_list = p_eff_list_temp + p_eff_list
#p_eff_list = p_eff_list[0:error_list_len-p_eff_list_len]
#print(len(p_eff_list))
p_eff_list_list_temp = generate_list_of_list_from_list(p_eff_list)
#p_eff_list_list = p_eff_list_list_temp + p_eff_list_list
volume_sampling_fro_bound_list_ = swap_elements_of_list(volume_sampling_fro_bound_list,indices_list)
theoretical_effective_kernel_error_bound_list_ = swap_elements_of_list(theoretical_effective_kernel_error_bound_list,indices_list)
error_lists.append(volume_sampling_fro_bound_list_)
error_lists.append(volume_sampling_fro_list)
error_lists.append(effective_kernel_fro_list)
error_lists.append(theoretical_effective_kernel_error_bound_list_)
filename_list = []
filename_list.append("effective_kernel_k_")
filename_list.append(str(k))
filename_list.append(str(T))
filename_list.append(str(N))
filename_list.append(file_name_comment)
plot_approximation_errors_effective_kernel_on_toy_datasets(p_eff_list,p_eff_list_list,error_lists,legends_list,axislabel_list,filename_list)
# plt.scatter(p_eff_list,volume_sampling_fro_bound_list,label="Volume sampling bound",marker='_',color=palette_paired(1))
# plt.scatter(p_eff_list,volume_sampling_fro_list,label="Volume Sampling",marker='_',color=palette_paired(0))
# #plt.scatter(p_eff_list,derandomized_projection_dpp_fro_list,label="derandomized projection dpp", marker='_')
# plt.scatter(p_eff_list,effective_kernel_fro_list,label="Effective kernel",marker='_',color=palette_paired(4))
# plt.scatter(p_eff_list,theoretical_effective_kernel_error_bound_list,marker='_',label="Effective kernel bound",color=palette_paired(5))
# plt.xlabel(r'$\mathrm{p_{eff}(\frac{1}{2})}$', fontsize=12)
# plt.ylabel(r'$\mathrm{\mathbb{E} \|\| X- \pi_{C} X \|\| _{Fr}^{2}}$', fontsize=12)
# plt.title('The case k = '+str(k)+', '+str(T)+' matrices')
# plt.legend(bbox_to_anchor=(0.495,0.34), loc="upper left")
# plt.xticks(range(2, 13, 1), fontsize=12)
# figfile_title= "effective_kernel_k_"+str(k)+"_matrices_number_"+str(T)+"_N_"+str(N)+"_"+file_name_comment+".pdf"
# plt.savefig(figfile_title)
# plt.show()
#####
#####
#####
plt.scatter(theta_complete_list,avoiding_proba_list,label="Avoiding Probability",marker='x')
plt.plot(theta_list,theoretical_bound_avoiding_probability_list,color='red',label="Theoretical bound")#)
plt.xlabel(r'$\mathrm{\theta}$', fontsize=16)
plt.ylabel(r'$\mathrm{\mathbb{P}(S\cap T_{eff} = \emptyset)}$', fontsize=16)
#plt.title('The case k = '+str(k)+', '+str(T)+' matrices')
plt.legend(bbox_to_anchor=(0.55,1), loc="upper left")
plt.xticks(fontsize=12)
#plt.tight_layout()
figfile_title= "avoid_proba_k_"+str(k)+"_matrices_number_"+str(T)+"_N_"+str(N)+"_"+file_name_comment+".pdf"
plt.savefig(figfile_title)
plt.show()
| nilq/baby-python | python |
#!/usr/bin/env python3
import math
import torch
from torch.distributions import MultivariateNormal as TMultivariateNormal
from torch.distributions.kl import register_kl
from torch.distributions.utils import _standard_normal, lazy_property
from .. import settings
from ..lazy import LazyTensor, lazify
from .distribution import Distribution
from ..utils.broadcasting import _mul_broadcast_shape
class _MultivariateNormalBase(TMultivariateNormal, Distribution):
"""
Constructs a multivariate Normal random variable, based on mean and covariance
Can be multivariate, or a batch of multivariate Normals
Passing a vector mean corresponds to a multivariate Normal
Passing a matrix mean corresponds to a batch of multivariate Normals
Args:
mean (Tensor): vector n or matrix b x n mean of MVN distribution
covar (Tensor): matrix n x n or batch matrix b x n x n covariance of
MVN distribution
"""
def __init__(self, mean, covariance_matrix, validate_args=False):
self._islazy = isinstance(mean, LazyTensor) or isinstance(covariance_matrix, LazyTensor)
if self._islazy:
if validate_args:
# TODO: add argument validation
raise NotImplementedError()
self.loc = mean
self._covar = covariance_matrix
self.__unbroadcasted_scale_tril = None
self._validate_args = validate_args
batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:]
# TODO: Integrate argument validation for LazyTensors into torch.distribution validation logic
super(TMultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=False)
else:
super().__init__(loc=mean, covariance_matrix=covariance_matrix, validate_args=validate_args)
@property
def _unbroadcasted_scale_tril(self):
if self.islazy and self.__unbroadcasted_scale_tril is None:
# cache root decoposition
with settings.fast_computations(covar_root_decomposition=False):
ust = self.lazy_covariance_matrix.root_decomposition().root.evaluate()
self.__unbroadcasted_scale_tril = ust
return self.__unbroadcasted_scale_tril
@_unbroadcasted_scale_tril.setter
def _unbroadcasted_scale_tril(self, ust):
if self.islazy:
raise NotImplementedError("Cannot set _unbroadcasted_scale_tril for lazy MVN distributions")
else:
self.__unbroadcasted_scale_tril = ust
def expand(self, batch_size):
new_loc = self.loc.expand(torch.Size(batch_size) + self.loc.shape[-1:])
new_covar = self._covar.expand(torch.Size(batch_size) + self._covar.shape[-2:])
res = self.__class__(new_loc, new_covar)
return res
def confidence_region(self):
"""
Returns 2 standard deviations above and below the mean.
Returns:
Tuple[Tensor, Tensor]: pair of tensors of size (b x d) or (d), where
b is the batch size and d is the dimensionality of the random
variable. The first (second) Tensor is the lower (upper) end of
the confidence region.
"""
std2 = self.stddev.mul_(2)
mean = self.mean
return mean.sub(std2), mean.add(std2)
@lazy_property
def covariance_matrix(self):
if self.islazy:
return self._covar.evaluate()
else:
return super().covariance_matrix
def get_base_samples(self, sample_shape=torch.Size()):
"""Get i.i.d. standard Normal samples (to be used with rsample(base_samples=base_samples))"""
with torch.no_grad():
shape = self._extended_shape(sample_shape)
base_samples = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return base_samples
@lazy_property
def lazy_covariance_matrix(self):
"""
The covariance_matrix, represented as a LazyTensor
"""
if self.islazy:
return self._covar
else:
return lazify(super().covariance_matrix)
def log_prob(self, value):
if settings.fast_computations.log_prob.off():
return super().log_prob(value)
if self._validate_args:
self._validate_sample(value)
mean, covar = self.loc, self.lazy_covariance_matrix
diff = value - mean
# Repeat the covar to match the batch shape of diff
if diff.shape[:-1] != covar.batch_shape:
if len(diff.shape[:-1]) < len(covar.batch_shape):
diff = diff.expand(covar.shape[:-1])
else:
padded_batch_shape = (*(1 for _ in range(diff.dim() + 1 - covar.dim())), *covar.batch_shape)
covar = covar.repeat(
*(diff_size // covar_size for diff_size, covar_size in zip(diff.shape[:-1], padded_batch_shape)),
1, 1
)
# Get log determininat and first part of quadratic form
inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
return res
def rsample(self, sample_shape=torch.Size(), base_samples=None):
covar = self.lazy_covariance_matrix
if base_samples is None:
# Create some samples
num_samples = sample_shape.numel() or 1
# Get samples
res = covar.zero_mean_mvn_samples(num_samples) + self.loc.unsqueeze(0)
res = res.view(sample_shape + self.loc.shape)
else:
# Make sure that the base samples agree with the distribution
if self.loc.shape != base_samples.shape[-self.loc.dim() :]:
raise RuntimeError(
"The size of base_samples (minus sample shape dimensions) should agree with the size "
"of self.loc. Expected ...{} but got {}".format(self.loc.shape, base_samples.shape)
)
# Determine what the appropriate sample_shape parameter is
sample_shape = base_samples.shape[: base_samples.dim() - self.loc.dim()]
# Reshape samples to be batch_size x num_dim x num_samples
# or num_bim x num_samples
base_samples = base_samples.view(-1, *self.loc.shape)
base_samples = base_samples.permute(*tuple(range(1, self.loc.dim() + 1)), 0)
# Now reparameterize those base samples
covar_root = covar.root_decomposition().root
# If necessary, adjust base_samples for rank of root decomposition
if covar_root.shape[-1] < base_samples.shape[-2]:
base_samples = base_samples[..., : covar_root.shape[-1], :]
elif covar_root.shape[-1] > base_samples.shape[-2]:
raise RuntimeError("Incompatible dimension of `base_samples`")
res = covar_root.matmul(base_samples) + self.loc.unsqueeze(-1)
# Permute and reshape new samples to be original size
res = res.permute(-1, *tuple(range(self.loc.dim()))).contiguous()
res = res.view(sample_shape + self.loc.shape)
return res
def sample(self, sample_shape=torch.Size(), base_samples=None):
with torch.no_grad():
return self.rsample(sample_shape=sample_shape, base_samples=base_samples)
@property
def variance(self):
if self.islazy:
# overwrite this since torch MVN uses unbroadcasted_scale_tril for this
diag = self.lazy_covariance_matrix.diag()
diag = diag.view(diag.shape[:-1] + self._event_shape)
return diag.expand(self._batch_shape + self._event_shape)
else:
return super().variance
def __add__(self, other):
if isinstance(other, _MultivariateNormalBase):
return self.__class__(
mean=self._mean + other.mean,
covariance_matrix=(self.lazy_covariance_matrix + other.lazy_covariance_matrix),
)
elif isinstance(other, int) or isinstance(other, float):
return self.__class__(self.mean + other, self.lazy_covariance_matrix)
else:
raise RuntimeError("Unsupported type {} for addition w/ MultivariateNormal".format(type(other)))
def __radd__(self, other):
if other == 0:
return self
return self.__add__(other)
def __mul__(self, other):
if not (isinstance(other, int) or isinstance(other, float)):
raise RuntimeError("Can only multiply by scalars")
if other == 1:
return self
return self.__class__(mean=self.mean * other, covariance_matrix=self.lazy_covariance_matrix * (other ** 2))
def __truediv__(self, other):
return self.__mul__(1.0 / other)
try:
# If pyro is installed, add the TorchDistributionMixin
from pyro.distributions.torch_distribution import TorchDistributionMixin
class MultivariateNormal(_MultivariateNormalBase, TorchDistributionMixin):
pass
except ImportError:
class MultivariateNormal(_MultivariateNormalBase):
pass
@register_kl(MultivariateNormal, MultivariateNormal)
def kl_mvn_mvn(p_dist, q_dist):
output_shape = _mul_broadcast_shape(p_dist.batch_shape, q_dist.batch_shape)
if output_shape != p_dist.batch_shape:
p_dist = p_dist.expand(output_shape)
if output_shape != q_dist.batch_shape:
q_dist = q_dist.expand(output_shape)
q_mean = q_dist.loc
q_covar = q_dist.lazy_covariance_matrix
p_mean = p_dist.loc
p_covar = p_dist.lazy_covariance_matrix
root_p_covar = p_covar.root_decomposition().root.evaluate()
mean_diffs = p_mean - q_mean
if isinstance(root_p_covar, LazyTensor):
# right now this just catches if root_p_covar is a DiagLazyTensor,
# but we may want to be smarter about this in the future
root_p_covar = root_p_covar.evaluate()
inv_quad_rhs = torch.cat([mean_diffs.unsqueeze(-1), root_p_covar], -1)
logdet_p_covar = p_covar.logdet()
trace_plus_inv_quad_form, logdet_q_covar = q_covar.inv_quad_logdet(inv_quad_rhs=inv_quad_rhs, logdet=True)
# Compute the KL Divergence.
res = 0.5 * sum([logdet_q_covar, logdet_p_covar.mul(-1), trace_plus_inv_quad_form, -float(mean_diffs.size(-1))])
return res
| nilq/baby-python | python |
import torch
from torch.utils.data import DataLoader
import torchvision
from torchvision.transforms import ToTensor
from appfl.misc.data import *
DataSet_name = "MNIST"
num_channel = 1 # 1 if gray, 3 if color
num_classes = 10 # number of the image classes
num_pixel = 28 # image size = (num_pixel, num_pixel)
""" Data """
test_data_raw = eval("torchvision.datasets." + DataSet_name)(
f"../datasets/RawData", download=False, train=False, transform=ToTensor()
)
test_data_input = []
test_data_label = []
for idx in range(len(test_data_raw)):
test_data_input.append(test_data_raw[idx][0].tolist())
test_data_label.append(test_data_raw[idx][1])
test_dataset = Dataset(
torch.FloatTensor(test_data_input), torch.tensor(test_data_label)
)
dataloader = server_dataloader = DataLoader(
test_dataset,
num_workers=0,
batch_size=64,
shuffle=False,
)
""" Model """
device = "cpu"
file = "./resulting_models/MNIST_CNN_Iter_10.pt"
model = torch.jit.load(file)
model.eval()
loss_fn = torch.nn.CrossEntropyLoss()
model.to(device)
test_loss = 0
correct = 0
tmpcnt = 0
tmptotal = 0
with torch.no_grad():
for img, target in dataloader:
tmpcnt += 1
tmptotal += len(target)
img = img.to(device)
target = target.to(device)
output = model(img)
test_loss += loss_fn(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss = test_loss / tmpcnt
accuracy = 100.0 * correct / tmptotal
print("test_loss=", test_loss, " accuracy=", accuracy)
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from shutil import copy
import pyshanb
from pyshanb.helper import windows, home, default_configfile
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
requirements = [
'requests>=1.1.0',
'beautifulsoup4',
'html5lib',
'shanbay',
]
if sys.version_info[:2] < (2, 7):
requirements.append('argparse')
if windows:
requirements.extend(['mp3play', 'colorama'])
# copy setting file to home directory.
current_dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(default_configfile):
copy(os.path.join(current_dir, 'pyshanb.conf'), home)
packages = [
'pyshanb',
'pyshanb.plugins',
]
def long_description():
md = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read()
return md
setup(
name='pyshanb',
version=pyshanb.__version__,
description=pyshanb.__doc__.strip(),
long_description=long_description(),
url='https://github.com/mozillazg/PyShanb',
download_url='https://github.com/mozillazg/PyShanb',
author=pyshanb.__author__,
author_email='mozillazg101@gmail.com',
license=pyshanb.__license__,
packages=packages,
package_data={'': ['LICENSE.txt', '*.conf']},
package_dir={'pyshanb': 'pyshanb'},
include_package_data=True,
install_requires=requirements,
# setup_requires=['sphinx'],
zip_safe=False,
entry_points={
'console_scripts': [
'shanbay = pyshanb.__main__:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities',
],
)
| nilq/baby-python | python |
import os
import time
import queue
import demomgr.constants as CNST
from demomgr.filterlogic import process_filterstring, FILTERFLAGS
from demomgr.helpers import readdemoheader
from demomgr.threads.read_folder import ThreadReadFolder
from demomgr.threads._threadsig import THREADSIG
from demomgr.threads._base import _StoppableBaseThread
class ThreadFilter(_StoppableBaseThread):
"""
Thread to filter a directory of demos.
"""
REQUIRED_CFG_KEYS = ThreadReadFolder.REQUIRED_CFG_KEYS
def __init__(self, queue_out, filterstring, curdir, cfg, silent = False):
"""
Thread requires output queue and the following args:
filterstring <Str>: Raw user input from the entry field
curdir <Str>: Absolute path to current directory
cfg <Dict>: Program configuration, reduced to cls.REQUIRED_CFG_KEYS
silent <Bool>: If True, thread will not drop progress messages
"""
self.filterstring = filterstring
self.curdir = curdir
self.cfg = cfg
self.silent = silent
super().__init__(None, queue_out)
def run(self):
starttime = time.time()
self.queue_out_put(THREADSIG.INFO_STATUSBAR, ("Filtering demos; Parsing filter...", ))
try:
filters, flags = process_filterstring(self.filterstring)
except Exception as error:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Error parsing filter request: {error}", 4000)
)
self.queue_out_put(THREADSIG.FAILURE); return
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, ("Filtering demos; Reading information...", )
)
self.datafetcherqueue = queue.Queue()
self.datafetcherthread = ThreadReadFolder(
self.datafetcherqueue, targetdir = self.curdir, cfg = self.cfg
)
self.datafetcherthread.start()
# NOTE: Can't really wait for join to this thread here.
self.datafetcherthread.join(None, nostop = True)
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
demo_data = None
while True:
try:
queueobj = self.datafetcherqueue.get_nowait()
if queueobj[0] == THREADSIG.RESULT_DEMODATA:
demo_data = queueobj[1]
elif queueobj[0] < 0x100: # Finish signal
if queueobj[0] == THREADSIG.FAILURE:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR,
("Demo fetching thread failed unexpectedly during filtering.", 4000)
)
self.queue_out_put(THREADSIG.FAILURE); return
break
except queue.Empty:
break
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
filtered_demo_data = {
"col_filename": [], "col_ks": [], "col_bm": [], "col_ctime": [], "col_filesize": []
}
file_amnt = len(demo_data["col_filename"])
for i, j in enumerate(demo_data["col_filename"]): # Filter
if not self.silent:
self.queue_out_put(
THREADSIG.INFO_STATUSBAR, (f"Filtering demos; {i+1} / {file_amnt}", )
)
curdataset = {
"name": j,
"killstreaks": () if demo_data["col_ks"][i] is None else demo_data["col_ks"][i],
"bookmarks": () if demo_data["col_bm"][i] is None else demo_data["col_bm"][i],
"header": None,
"filedata": {
"filesize": demo_data["col_filesize"][i],
"modtime": demo_data["col_ctime"][i],
},
}
if flags & FILTERFLAGS.HEADER:
try:
curdataset["header"] = readdemoheader(os.path.join(self.curdir, j))
except (FileNotFoundError, PermissionError, OSError):
break
if all(lambda_(curdataset) for lambda_ in filters):
filtered_demo_data["col_filename"].append(j)
filtered_demo_data["col_ks" ].append(demo_data["col_ks"][i])
filtered_demo_data["col_bm" ].append(demo_data["col_bm"][i])
filtered_demo_data["col_ctime" ].append(demo_data["col_ctime"][i])
filtered_demo_data["col_filesize"].append(demo_data["col_filesize"][i])
if self.stoprequest.is_set():
self.queue_out_put(THREADSIG.ABORTED); return
self.queue_out_put(
THREADSIG.INFO_STATUSBAR,
(f"Filtered {file_amnt} demos in {round(time.time() - starttime, 3)} seconds.", 3000)
)
self.queue_out_put(THREADSIG.RESULT_DEMODATA, filtered_demo_data)
self.queue_out_put(THREADSIG.SUCCESS)
| nilq/baby-python | python |
"""
writen by stephen
"""
import os
import numpy as np
import tensorflow as tf
from alexnet import AlexNet
from datagenerator import ImageDataGenerator
from datetime import datetime
import glob
from tensorflow.contrib.data import Iterator
learning_rate = 1e-4
num_epochs = 100 # 代的个数
batch_size = 1024
dropout_rate = 0.5
num_classes = 2 # 类别标签
train_layers = ['fc8', 'fc7', 'fc6']
display_step = 20
filewriter_path = "tensorboard" # 存储tensorboard文件
checkpoint_path = "checkpoints" # 训练好的模型和参数存放目录
if not os.path.isdir(checkpoint_path):
os.mkdir(checkpoint_path)
train_image_path = 'train/' # 指定训练集数据路径(根据实际情况指定训练数据集的路径)
test_image_cat_path = 'test/cat/' # 指定测试集数据路径(根据实际情况指定测试数据集的路径)
test_image_dog_path = 'test/dog/' # 指定测试集数据路径(根据实际情况指定测试数据集的路径)
label_path = []
test_label = []
# 打开训练数据集目录,读取全部图片,生成图片路径列表
image_path = np.array(glob.glob(train_image_path + 'cat.*.jpg')).tolist()
image_path_dog = np.array(glob.glob(train_image_path + 'dog.*.jpg')).tolist()
image_path[len(image_path):len(image_path)] = image_path_dog
for i in range(len(image_path)):
if 'dog' in image_path[i]:
label_path.append(1)
else:
label_path.append(0)
# 打开测试数据集目录,读取全部图片,生成图片路径列表
test_image = np.array(glob.glob(test_image_cat_path + '*.jpg')).tolist()
test_image_path_dog = np.array(glob.glob(test_image_dog_path + '*.jpg')).tolist()
test_image[len(test_image):len(test_image)] = test_image_path_dog
for i in range(len(test_image)):
if i < 1500:
test_label.append(0)
else:
test_label.append(1)
# 调用图片生成器,把训练集图片转换成三维数组
tr_data = ImageDataGenerator(
images=image_path,
labels=label_path,
batch_size=batch_size,
num_classes=num_classes)
# 调用图片生成器,把测试集图片转换成三维数组
test_data = ImageDataGenerator(
images=test_image,
labels=test_label,
batch_size=batch_size,
num_classes=num_classes,
shuffle=False)
with tf.name_scope('input'):
# 定义迭代器
iterator = Iterator.from_structure(tr_data.data.output_types,
tr_data.data.output_shapes)
training_initalize=iterator.make_initializer(tr_data.data)
testing_initalize=iterator.make_initializer(test_data.data)
# 定义每次迭代的数据
next_batch = iterator.get_next()
x = tf.placeholder(tf.float32, [batch_size, 227, 227, 3])
y = tf.placeholder(tf.float32, [batch_size, num_classes])
keep_prob = tf.placeholder(tf.float32)
# 图片数据通过AlexNet网络处理
model = AlexNet(x, keep_prob, num_classes, train_layers)
# List of trainable variables of the layers we want to train
var_list = [v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers]
# 执行整个网络图
score = model.fc8
with tf.name_scope('loss'):
# 损失函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=score,
labels=y))
gradients = tf.gradients(loss, var_list)
gradients = list(zip(gradients, var_list))
with tf.name_scope('optimizer'):
# 优化器,采用梯度下降算法进行优化
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.apply_gradients(grads_and_vars=gradients)
# 定义网络精确度
with tf.name_scope("accuracy"):
correct_pred = tf.equal(tf.argmax(score, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 把精确度加入到Tensorboard
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(filewriter_path)
saver = tf.train.Saver()
# 定义一代的迭代次数
train_batches_per_epoch = int(np.floor(tr_data.data_size / batch_size))
test_batches_per_epoch = int(np.floor(test_data.data_size / batch_size))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 把模型图加入Tensorboard
writer.add_graph(sess.graph)
# 把训练好的权重加入未训练的网络中
model.load_initial_weights(sess)
print("{} Start training...".format(datetime.now()))
print("{} Open Tensorboard at --logdir {}".format(datetime.now(),
filewriter_path))
# 总共训练10代
for epoch in range(num_epochs):
sess.run(training_initalize)
print("{} Epoch number: {} start".format(datetime.now(), epoch + 1))
#开始训练每一代
for step in range(train_batches_per_epoch):
img_batch, label_batch = sess.run(next_batch)
sess.run(train_op, feed_dict={x: img_batch,
y: label_batch,
keep_prob: dropout_rate})
if step % display_step == 0:
s = sess.run(merged_summary, feed_dict={x: img_batch,
y: label_batch,
keep_prob: 1.})
writer.add_summary(s, epoch * train_batches_per_epoch + step)
# 测试模型精确度
print("{} Start validation".format(datetime.now()))
sess.run(testing_initalize)
test_acc = 0.
test_count = 0
for _ in range(test_batches_per_epoch):
img_batch, label_batch = sess.run(next_batch)
acc = sess.run(accuracy, feed_dict={x: img_batch,
y: label_batch,
keep_prob: 1.0})
test_acc += acc
test_count += 1
test_acc /= test_count
print("{} Validation Accuracy = {:.4f}".format(datetime.now(), test_acc))
# 把训练好的模型存储起来
print("{} Saving checkpoint of model...".format(datetime.now()))
checkpoint_name = os.path.join(checkpoint_path, 'model_epoch' + str(epoch + 1) + '.ckpt')
save_path = saver.save(sess, checkpoint_name)
print("{} Epoch number: {} end".format(datetime.now(), epoch + 1))
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# © 2016 Danimar Ribeiro, Trustcode
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from decimal import Decimal
from datetime import date
from datetime import datetime
from unicodedata import normalize
def normalize_str(string):
"""
Remove special characters and strip spaces
"""
if string:
if not isinstance(string, str):
string = str(string, 'utf-8', 'replace')
string = string.encode('utf-8')
return normalize(
'NFKD', string.decode('utf-8')).encode('ASCII', 'ignore').decode()
return ''
def strip_line_feed(string):
if string:
if not isinstance(string, str):
string = str(string, 'utf-8', 'replace')
remap = {
ord('\t'): ' ',
ord('\n'): ' ',
ord('\f'): ' ',
ord('\r'): None, # Delete
}
return string.translate(remap).strip()
return string
def format_percent(value):
if value:
return Decimal(value) / 100
def format_datetime(value):
"""
Format datetime
"""
dt_format = '%Y-%m-%dT%H:%M:%I'
if isinstance(value, datetime):
return value.strftime(dt_format)
return value
def format_date(value):
"""
Format date
"""
dt_format = '%Y-%m-%d'
if isinstance(value, date):
return value.strftime(dt_format)
return value
| nilq/baby-python | python |
"""
Test Runner class. Lets you setup testrail and run a bunch of tests one after the other
"""
import os,subprocess
class Test_Runner_Class:
"Test Runner class"
def __init__(self,base_url='http://qxf2.com',testrail_flag='N',browserstack_flag='N',os_name='Windows',os_version='7',browser='firefox',browser_version='33'):
"Constructor"
self.python_executable = "python"
self.util_directory = os.path.abspath((os.path.dirname(__file__)))
self.test_directory = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','tests'))
self.setup_testrail_script = os.path.join(self.util_directory,"setup_testrail.py")
self.reset(base_url=base_url,
testrail_flag=testrail_flag,
browserstack_flag=browserstack_flag,
os_name=os_name,
os_version=os_version,
browser=browser,
browser_version=browser_version)
def check_file_exists(self,file_path):
"Check if the config file exists and is a file"
file_exist_flag = True
if os.path.exists(file_path):
if not os.path.isfile(file_path):
print('\n****')
print('Script file provided is not a file: ')
print(file_path)
print('****')
file_exist_flag = False
else:
print('\n****')
print('Unable to locate the provided script file: ')
print(file_path)
print('****')
conf_flag = False
return file_exist_flag
def reset(self,base_url=None,testrail_flag=None,browserstack_flag=None,os_name=None,os_version=None,browser=None,browser_version=None):
"Reset the private variables"
if base_url is not None:
self.base_url = base_url
if testrail_flag is not None:
self.testrail_flag = testrail_flag
if browserstack_flag is not None:
self.browserstack_flag = browserstack_flag
if os_name is not None:
self.os_name = os_name
if os_version is not None:
self.os_version = os_version
if browser is not None:
self.browser = browser
if browser_version is not None:
self.browser_version = browser_version
def run_test(self,test_name):
"Run the test script with the given command line options"
testscript_args_list = self.setup_test_script_args_list(test_name)
self.run_script(testscript_args_list)
def run_setup_testrail(self,test_name=None,test_run_name='',case_ids_list=None,name_override_flag=True):
"Run the setup_testrail with given command line options"
if self.testrail_flag.lower() == 'y':
testrail_args_list = self.setup_testrail_args_list(test_name,test_run_name,case_ids_list,name_override_flag)
self.run_script(testrail_args_list)
def run_script(self,args_list):
"Run the script on command line with given args_list"
print("\nWill be running the following script:")
print(' '.join(args_list))
print("Starting..")
subprocess.call(args_list,shell=True)
print("Done!")
def setup_testrail_args_list(self,test_name=None,test_run_name='',case_ids_list=None,name_override_flag=True):
"Convert the command line arguments into list for setup_testrail.py"
args_list = []
#python setup_testrail.py -r test_run_name -d test_run_description
if self.check_file_exists(self.setup_testrail_script):
args_list = [self.python_executable,self.setup_testrail_script]
if test_run_name != '':
args_list.append("-r")
args_list.append(test_run_name)
if test_name is not None:
args_list.append("-d")
args_list.append(test_name)
if name_override_flag is False:
args_list.append("-n")
args_list.append("N")
if case_ids_list is not None:
args_list.append("-c")
case_ids_list = ','.join(case_ids_list)
args_list.append(case_ids_list)
return args_list
def setup_test_script_args_list(self,test_name):
"convert the command line arguments into list for test script"
args_list = []
#python test_script.py -x Y
test_script_name = test_name + ".py"
test_script_name = os.path.join(self.test_directory,test_script_name)
if self.check_file_exists(test_script_name):
args_list = [self.python_executable,test_script_name,"-b",self.browser,"-u",self.base_url,"-x",self.testrail_flag,"-s",self.browserstack_flag,"-o",self.os_version,"-v",self.browser_version,"-p",self.os_name]
return args_list
| nilq/baby-python | python |
import settings as S*
from util import *
import stress
from genrig import *
from graph import *
from dist import *
from numpy import *
from scipy.linalg.basic import *
from scipy.linalg.decomp import *
v=8
d=2
E=array([[0,1],[0,2],[1,2],[1,4],[2,3],[2,5],[3,4],[3,7],[4,5],[5,6],[5,7],[6,7],[3,6],[0,4]], 'i')
e=len(E)
gr = GenericRigidity(v, d, E)
g = Graph(random_p(v, d, None), E)
g.gr = gr
dim_T = locally_rigid_rank(v, d)
n_samples = int(dim_T * 16)
L_rhos, L_rho = measure_L_rho(g, 1e-5, 0, n_samples)
S_basis, cov_spec = estimate_stress_space(L_rhos, dim_T)
stress_samples = stress.sample(S_basis)
K_basis, stress_spec = estimate_stress_kernel(g, stress_samples)
vcc, cc = stress.detect_LC_from_kernel(g, K_basis)
print vcc, cc
## D = rigidity_matrix(v, d, E)
## t = matrix_rank(D)
## sb = asmatrix(svd(D)[0][:,t:])
## print e
## print t
## print sb.shape
## w = sb * asmatrix(random.random((e-t,1)))
## #w = sb[:,0]
## w /= norm(w)
## omega = stress.matrix_from_vector(w, E, v)
## eigval, eigvec = eig(omega)
## eigval = abs(eigval)
## order = range(v)
## order.sort(key = lambda i: eigval[i])
## print eigval[order]
## skd = len(eigval[eigval <= EPS])
## K = eigvec[:,order[:skd]]
| nilq/baby-python | python |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
p1 = head
p2 = head
# stops when p1 and p2 points to the same node
while p1.next != p2.next:
# if any pointer points to none then there is no cycle
if p1.next is None or p2.next is None:
return False
p1 = p1.next
p2 = p2.next.next
return True
| nilq/baby-python | python |
import os
# Dates
DATE_FORMAT: str = "%Y-%m-%d" # Default date format
TIMESTAMP_FORMAT: str = '%Y-%m-%d %H:%M:%S.%f' # Default timestamp format
# Paths
DATA_PATH: str = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "data")
# Order
VAT: float = 1.21 # VAT tax applied in Order
ORDER_TYPES = ['sale',
'consignment',
'consignment sale',
'return',
'credit',
'discount',
'stock refill']
# Data loader
SORT_COLUMN = "timestamp"
ID_SUFFIX = "id"
TABLE_FORMAT = "csv"
ENTITY_NAME_COLUMN_SUFFIX = 'name'
COLUMN_NAME_SEPARATOR = '_'
# Other
SEP: str = ";" # column separator in csv files
# Main
DATASOURCES = ['customer', 'product', 'manager', 'order']
# has to be added to DB as first entry
DEFAULT_VALUE = 'default'
# Database
DATABASE = os.path.join(
os.path.dirname(os.path.dirname(__file__)), os.sep.join(['database', 'sandeliapp.db']))
| nilq/baby-python | python |
###################################################################################
# #
# Workflow logic will be coded here, just to get rid of dirty code in the models. #
# #
###################################################################################
from __future__ import unicode_literals
from contextlib import contextmanager
from django.apps import apps as registry
from django.core.exceptions import ValidationError
from django.db.transaction import atomic
from django.utils.translation import ugettext_lazy as _
from django.utils.six import string_types
from django.contrib.contenttypes.models import ContentType
from cantrips.iteration import iterable, items
from . import exceptions, models
import json
@contextmanager
def wrap_validation_error(obj):
try:
yield
except exceptions.WorkflowInvalidState:
raise
except ValidationError as e:
raise exceptions.WorkflowInvalidState(obj, e)
class Workflow(object):
"""
Workflow helpers. When used directly, we refer to instances, like calling:
- workflow = Workflow.get(a document)
- workflow = Workflow.create(a user, a wrapped spec, a document)
- workflow.start(a user[, 'path.to.course'])
- workflow.cancel(a user[, 'path.to.course'])
- workflow.execute(a user, an action[, 'path.to.course'])
- dict_ = workflow.get_available_actions()
When using its namespaced class Workflow.Spec, we refer to specs, like calling:
- workflow_spec = Workflow.Spec.install(a workflow spec data)
- workflow_spec = Workflow.Spec.get(a workflow spec code)
- workflow = workflow_spec.instantiate(a user, a document) # Calls Workflow.create() with this spec
- dict_ = workflow.serialized()
"""
class Spec(object):
def __init__(self, workflow_spec):
self._spec = workflow_spec
@property
def spec(self):
return self._spec
def document_class(self):
"""
Document class for this spec.
:return: The document class.
"""
return self.spec.document_type.model_class()
def serialized(self, dump=False):
"""
Serialized representation of this spec.
:param dump: If True, the returned value is a json-parseable string. Otherwise [default]
the returned value is a nested dictionary/list structure.
:return: A dict with the specification data for this spec, or a json string, depending on
whether `dump` is False or True.
"""
spec = self.spec
course_specs_data = []
workflow_spec_data = {
'code': spec.code,
'name': spec.name,
'description': spec.description,
'create_permission': spec.create_permission,
'cancel_permission': spec.cancel_permission,
'courses': course_specs_data
}
for course_spec in spec.course_specs.all():
node_specs_data = []
transition_specs_data = []
course_specs_data.append({
'code': course_spec.code,
'name': course_spec.name,
'description': course_spec.description,
'cancel_permission': course_spec.cancel_permission,
'nodes': node_specs_data,
'transitions': transition_specs_data
})
for node_spec in course_spec.node_specs.all():
node_specs_data.append({
'type': node_spec.type,
'code': node_spec.code,
'name': node_spec.name,
'description': node_spec.description,
'landing_handler': node_spec.landing_handler and node_spec.landing_handler.path,
'exit_value': node_spec.exit_value,
'joiner': node_spec.execute_permission and node_spec.execute_permission.path,
'execute_permission': node_spec.execute_permission,
'branches': list(node_spec.branches.values_list('code', flat=True))
})
for transition_spec in models.TransitionSpec.objects.filter(origin__course_spec=course_spec):
transition_specs_data.append({
'origin': transition_spec.origin.code,
'destination': transition_spec.destination.code,
'action_name': transition_spec.action_name,
'name': transition_spec.name,
'description': transition_spec.description,
'permission': transition_spec.permission,
'condition': transition_spec.condition,
'priority': transition_spec.priority
})
return json.dumps(workflow_spec_data) if dump else workflow_spec_data
def instantiate(self, user, document):
"""
Instantiates the spec.
:param user: The user trying to instantiate the workflow.
:param document: The document instance to associate to the workflow instance.
:return: A wrapped workflow instance.
"""
return Workflow.create(user, self, document)
@classmethod
def install(cls, spec_data):
"""
Takes a json specification (either as string or python dict) which includes the model to associate,
and tries to create a new workflow spec.
:param spec_data: The data used to install the spec. Either json or a dict.
:return: The new spec, wrapped by this class.
"""
if isinstance(spec_data, string_types):
spec_data = json.loads(spec_data)
if not isinstance(spec_data, dict):
raise TypeError('Spec data to install must be a valid json evaluating as a dict, or a dict itself')
model = registry.get_model(spec_data['model'])
if not issubclass(model, models.Document) or model._meta.abstract:
raise TypeError('Model to associate must be a strict concrete descendant class of Document')
with atomic():
code = spec_data.get('code')
name = spec_data.get('name')
description = spec_data.get('description', '')
create_permission = spec_data.get('create_permission')
cancel_permission = spec_data.get('cancel_permission')
workflow_spec = models.WorkflowSpec(code=code, name=name, description=description,
create_permission=create_permission,
cancel_permission=cancel_permission,
document_type=ContentType.objects.get_for_model(model))
with wrap_validation_error(workflow_spec):
workflow_spec.full_clean()
workflow_spec.save()
course_specs_data = spec_data.get('courses') or []
branches_map = {} # node_spec => [course__code, ...]
def install_course(course_spec_data):
code = course_spec_data.get('code')
name = course_spec_data.get('name')
description = course_spec_data.get('description', '')
cancel_permission = course_spec_data.get('cancel_permission')
node_specs_data = course_spec_data.get('nodes') or []
transitions_specs_data = course_spec_data.get('transitions') or []
# Install the course
course_spec = models.CourseSpec(workflow_spec=workflow_spec, code=code, name=name,
description=description, cancel_permission=cancel_permission)
with wrap_validation_error(course_spec):
course_spec.full_clean()
course_spec.save()
# Install the course nodes
for node_spec_data in node_specs_data:
type_ = node_spec_data.get('type')
code = node_spec_data.get('code')
name = node_spec_data.get('name')
description = node_spec_data.get('description', '')
landing_handler = node_spec_data.get('landing_handler')
exit_value = node_spec_data.get('exit_value')
joiner = node_spec_data.get('joiner')
execute_permission = node_spec_data.get('execute_permission')
node_spec = models.NodeSpec(type=type_, code=code, name=name, description=description,
landing_handler=landing_handler, exit_value=exit_value,
joiner=joiner, execute_permission=execute_permission,
course_spec=course_spec)
with wrap_validation_error(node_spec):
node_spec.full_clean()
node_spec.save()
# Deferring branches installation
branches_map[node_spec] = node_spec_data.get('branches') or []
# Install the node transitions
for transition_spec_data in transitions_specs_data:
origin_code = transition_spec_data.get('origin')
destination_code = transition_spec_data.get('destination')
action_name = transition_spec_data.get('action_name')
name = transition_spec_data.get('name')
description = transition_spec_data.get('description', '')
permission = transition_spec_data.get('permission')
condition = transition_spec_data.get('condition')
priority = transition_spec_data.get('priority')
try:
origin = course_spec.node_specs.get(code=origin_code)
except models.NodeSpec.DoesNotExist:
raise exceptions.WorkflowCourseNodeDoesNotExist(course_spec, origin_code)
try:
destination = course_spec.node_specs.get(code=destination_code)
except models.NodeSpec.DoesNotExist:
raise exceptions.WorkflowCourseNodeDoesNotExist(course_spec, destination_code)
transition = models.TransitionSpec(origin=origin, destination=destination, name=name,
action_name=action_name, description=description,
permission=permission, condition=condition,
priority=priority)
with wrap_validation_error(transition):
transition.full_clean()
transition.save()
# Install the courses
for course_spec_data in course_specs_data:
install_course(course_spec_data)
# Link the branches
for node_spec, branches in items(branches_map):
for branch in branches:
try:
node_spec.branches.add(workflow_spec.course_specs.get(code=branch))
except models.CourseSpec.DoesNotExist:
raise exceptions.WorkflowCourseDoesNotExist(
workflow_spec, _('No course exists in the workflow spec with such code'), branch
)
#
# Massive final validation
#
# Workflow (one main course; acyclic)
with wrap_validation_error(workflow_spec):
workflow_spec.full_clean()
# Courses (having required nodes; having SPLIT parents, if any; having valid code)
for course_spec in workflow_spec.course_specs.all():
with wrap_validation_error(course_spec):
course_spec.full_clean()
# Nodes (inbounds, outbounds, and attributes)
for node_spec in course_spec.node_specs.all():
with wrap_validation_error(node_spec):
node_spec.full_clean()
# Transitions (consistency, attributes, wrt origin node)
for transition_spec in models.TransitionSpec.objects.filter(origin__course_spec=course_spec):
with wrap_validation_error(transition_spec):
transition_spec.full_clean()
# Everything is valid, so we return the wrapped instance
return cls(workflow_spec)
class PermissionsChecker(object):
"""
Permissions checks raise different subclasses of PermissionDenied.
These checks are all performed against the associated document (since
each workflow instance must be tied to a specific model or, say, document,
these points can be addressed easily).
"""
@classmethod
def can_instantiate_workflow(cls, workflow_instance, user):
"""
Verifies the user can create a workflow instance, given the instance and user.
:param workflow_instance: The instance to check (will be already valid).
:param user: The user to check
:return: nothing
"""
permission = workflow_instance.workflow_spec.create_permission
document = workflow_instance.document
if permission and not user.has_perm(permission, document):
raise exceptions.WorkflowCreateDenied(workflow_instance)
@classmethod
def can_cancel_course(cls, course_instance, user):
"""
Verifies the user can cancel a course instance, given the instance and user.
Both the workflow permission AND the course permission, if any, must be
satisfied by the user.
:param course_instance: The instance to check (will be already valid).
:param user: The user to check
:return: nothing
"""
wf_permission = course_instance.course_spec.workflow_spec.cancel_permission
cs_permission = course_instance.course_spec.cancel_permission
document = course_instance.workflow_instance.document
if wf_permission and not user.has_perm(wf_permission, document):
raise exceptions.WorkflowCourseCancelDeniedByWorkflow(course_instance)
if cs_permission and not user.has_perm(cs_permission, document):
raise exceptions.WorkflowCourseCancelDeniedByCourse(course_instance)
@classmethod
def course_available_actions(cls, course_instance, user):
"""
Returns the available actions given a course instance, for a
specific user.
:return: None, if the associated course spec has a permission
the user does not satisfy (or if there is no INPUT node).
Otherwise, a possibly empty list, filled with the available
actions (i.e. actions without required permission or actions
with a permission the user satisfies; outbounds without an
action name will also be discarded).
"""
try:
node_spec = course_instance.node_instance.node_spec
document = course_instance.workflow_instance.document
if node_spec.type != models.NodeSpec.INPUT:
return None
if node_spec.execute_permission and not user.has_perm(node_spec.execute_permission, document):
return None
results = []
for transition in node_spec.outbounds.all():
action_name = transition.action_name
permission = transition.permission
if action_name and (not permission or user.has_perm(permission, document)):
results.append({
'action_name': action_name,
'display_name': transition.display_name
})
return results
except models.NodeInstance.DoesNotExist:
return None
@classmethod
def can_advance_course(cls, course_instance, transition, user):
"""
Verifies the user can advance a course instance, given the instance and user.
This check involves several cases:
- The course instance is started and waiting on an Input node: the user
satisfies the node's permission (if any) and the transition's permission
(if any).
- The course instance is starting and trying to execute the only transition
from the only starting node: the user satisfies the transition's permission
(if any).
- The user is standing on a different node (not ENTER, not INPUT): we ignore
this case.
"""
document = course_instance.workflow_instance.document
node_spec = transition.origin
# The node is INPUT, ENTER or a type we ignore (this method is )
if node_spec.type not in (models.NodeSpec.INPUT, models.NodeSpec.ENTER):
return
elif node_spec.type == models.NodeSpec.INPUT:
node_permission = node_spec.execute_permission
if node_permission and not user.has_perm(node_permission, document):
raise exceptions.WorkflowCourseAdvanceDeniedByNode(course_instance)
transition_permission = transition.permission
if transition_permission and not user.has_perm(transition_permission, document):
raise exceptions.WorkflowCourseAdvanceDeniedByTransition(course_instance)
class CourseHelpers(object):
"""
Helpers to get information from a course (instance or spec).
"""
@classmethod
def _check_status(cls, course_instance, types, invert=False):
"""
Checks whether the instance's current node has a specific type or list of types.
The condition can be inverted to see whether the instance's current node does
not have that/those type(s). If the node does not exist, this method returns
False. If the node does not exist AND the condition is requested to be inverted,
this method returns True.
:param course_instance: Instance to ask for.
:param types: Node type or iterable with Node types to ask for.
:param invert: Whether this condition is inverted or not.
:return: Boolean indicating whether the course instance's node's type is among the
given types.
"""
try:
return (course_instance.node_instance.node_spec.type in iterable(types)) ^ bool(invert)
except models.NodeInstance.DoesNotExist:
return bool(invert)
@classmethod
def is_empty(cls, course_instance):
return cls._check_status(course_instance, (), True)
@classmethod
def is_waiting(cls, course_instance):
return cls._check_status(course_instance, (models.NodeSpec.INPUT,))
@classmethod
def is_cancelled(cls, course_instance):
return cls._check_status(course_instance, (models.NodeSpec.CANCEL,))
@classmethod
def is_ended(cls, course_instance):
return cls._check_status(course_instance, (models.NodeSpec.EXIT,))
@classmethod
def is_splitting(cls, course_instance):
return cls._check_status(course_instance, (models.NodeSpec.SPLIT,))
@classmethod
def is_joined(cls, course_instance):
return cls._check_status(course_instance, (models.NodeSpec.JOINED,))
@classmethod
def is_terminated(cls, course_instance):
return cls._check_status(course_instance, (models.NodeSpec.JOINED, models.NodeSpec.EXIT,
models.NodeSpec.CANCEL))
@classmethod
def get_exit_code(cls, course_instance):
"""
Gets the exit code from a given course instance.
:param course_instance: The course instance to get the exit code from.
:return: None for non-terminated courses. -1 for joined and cancelled courses, and a non-negative
integer for courses reaching an exit node (actually, the exit_value field of the reached exit node).
"""
if not cls.is_terminated(course_instance):
return None
if cls.is_joined(course_instance) or cls.is_cancelled(course_instance):
return -1
return course_instance.node_instance.node_spec.exit_value
@classmethod
def find_course(cls, course_instance, path):
"""
Finds a specific course instance given a starting course instance and traversing the tree. The path
will be broken by separating dot and the descendants will be searched until one course instance is
found as described (by course codes) or an exception telling no element was found (or no element
can be found) is triggered.
:param course_instance: The course instance to check.
:param path: The path to check under the course instance.
:return: A descendant, or the same given, course instance.
"""
if path == '':
return course_instance
elif not cls.is_splitting(course_instance):
raise exceptions.WorkflowCourseInstanceDoesNotExist(
course_instance, _('Course does not have children')
)
else:
course_instance.verify_consistency()
parts = path.split('.', 1)
if len(parts) == 1:
head, tail = parts[0], ''
else:
head, tail = parts
try:
return cls.find_course(course_instance.node_instance.branches.get(course_spec__code=head), tail)
except models.NodeInstance.DoesNotExist:
raise exceptions.WorkflowCourseInstanceDoesNotExist(
course_instance, _('There is no children course with this path/code'), path, head
)
except models.NodeInstance.MultipleObjectsReturned:
raise exceptions.WorkflowNoSuchElement(course_instance, _('Multiple children courses exist '
'with course code in path'), head)
except models.CourseInstance.DoesNotExist:
raise exceptions.WorkflowCourseInstanceDoesNotExist(
course_instance, _('There is no children course with this path/code'), path, head
)
except models.CourseInstance.MultipleObjectsReturned:
raise exceptions.WorkflowNoSuchElement(
course_instance, _('There are multiple children courses with the same path/code'), path, head
)
class WorkflowHelpers(object):
"""
Helpers to get information from a node (instance or spec).
"""
@classmethod
def find_course(cls, workflow_instance, path):
"""
Finds a specific course instance given a target workflow instance and traversing the tree. The path
will be broken by separating dot and the descendants will be searched until one course instance is
found as described (by course codes) or an exception telling no element was found (or no element
can be found) is triggered.
:param workflow_instance: The workflow instance to query.
:param path: The path to check under the course instance.
:return: A descendant, or the first (root), course instance.
"""
workflow_instance.verify_exactly_one_parent_course()
return Workflow.CourseHelpers.find_course(workflow_instance.courses.get(parent__isnull=True), path)
class WorkflowRunner(object):
@classmethod
def _instantiate_course(cls, workflow_instance, course_spec, parent, user):
"""
Creates a new course instance for a workflow instance.
:param workflow_instance: Workflow instance to tie the course instance to.
:param course_spec: Course spec to base the course instance on.
:param parent: The parent node, or None, to make this instance dependent on.
:param user: The user triggering the action.
:return: The created course instance.
"""
course_instance = workflow_instance.courses.create(course_spec=course_spec, parent=parent)
enter_node = course_spec.node_specs.get(type=models.NodeSpec.ENTER)
enter_node.full_clean()
cls._move(course_instance, enter_node, user)
transition = enter_node.outbounds.get()
transition.full_clean()
cls._run_transition(course_instance, transition, user)
return course_instance
@classmethod
def _move(cls, course_instance, node, user):
"""
Moves the course to a new node. Checks existence (if node code specified) or consistency
(if node instance specified).
:param course_instance: The course instance to move.
:param node: The node instance or code to move this course instance.
:param user: The user invoking the action that caused this movement.
"""
if isinstance(node, string_types):
try:
node_spec = course_instance.course_spec.node_specs.get(code=node)
except models.NodeSpec.DoesNotExist:
raise exceptions.WorkflowCourseNodeDoesNotExist(course_instance, node)
else:
if node.course_spec != course_instance.course_spec:
raise exceptions.WorkflowCourseInstanceDoesNotAllowForeignNodes(course_instance, node)
node_spec = node
# We run validations on node_spec.
node_spec.clean()
# Now we must run the callable, if any.
handler = node_spec.landing_handler
if handler:
handler(course_instance.workflow_instance.document, user)
# Nodes of type INPUT, EXIT, SPLIT, JOINED and CANCEL are not intermediate execution nodes but
# they end the advancement of a course (EXIT, JOINED and CANCEL do that permanently, while
# INPUT and SPLIT will continue by running other respective workflow calls).
#
# Nodes of type ENTER, MULTIPLEXER and STEP are temporary and so they should not be saved like that.
if node_spec.type in (models.NodeSpec.INPUT, models.NodeSpec.SPLIT, models.NodeSpec.EXIT,
models.NodeSpec.CANCEL, models.NodeSpec.JOINED):
try:
course_instance.node_instance.delete()
except models.NodeInstance.DoesNotExist:
pass
node_instance = models.NodeInstance.objects.create(course_instance=course_instance, node_spec=node_spec)
# We must log the step.
models.CourseInstanceLog.objects.create(user=user, course_instance=course_instance, node_spec=node_spec)
# For split nodes, we also need to create the pending courses as branches.
if node_spec.type == models.NodeSpec.SPLIT:
for branch in node_spec.branches.all():
cls._instantiate_course(course_instance.workflow_instance, branch, node_instance, user)
@classmethod
def _cancel(cls, course_instance, user, level=0):
"""
Moves the course recursively (if this course has children) to a cancel node.
For more information see the _move method in this class.
:param course_instance: The course instance being cancelled.
:param user: The user invoking the action leading to this call.
:param level: The cancellation level. Not directly useful except as information for the
user, later in the database.
:return:
"""
if Workflow.CourseHelpers.is_terminated(course_instance):
return
node_spec = course_instance.course_spec.verify_has_cancel_node()
course_instance.clean()
if Workflow.CourseHelpers.is_splitting(course_instance):
next_level = level + 1
for branch in course_instance.node_instance.branches.all():
cls._cancel(branch, user, next_level)
cls._move(course_instance, node_spec, user)
course_instance.term_level = level
course_instance.save()
@classmethod
def _join(cls, course_instance, user, level=0):
"""
Moves the course recursively (if this course has children) to a joined node.
For more information see the _move method in this class.
:param course_instance: The course instance being joined.
:param user: The user invoking the action leading to this call.
:param level: The joining level. Not directly useful except as information for the
user, later in the database.
:return:
"""
if Workflow.CourseHelpers.is_terminated(course_instance):
return
node_spec = course_instance.course_spec.verify_has_joined_node()
if not node_spec:
raise exceptions.WorkflowCourseInstanceNotJoinable(course_instance, _('This course is not joinable'))
course_instance.clean()
if Workflow.CourseHelpers.is_splitting(course_instance):
next_level = level + 1
for branch in course_instance.node_instance.branches.all():
cls._join(branch, user, next_level)
cls._move(course_instance, node_spec, user)
course_instance.term_level = level
course_instance.save()
@classmethod
def _run_transition(cls, course_instance, transition, user):
"""
Runs a transition in a course instance. Many things are ensured already:
- The course has a valid origin (one which can have outbounds).
- The transition's origin is the course instance's current node instance's
node spec.
:param course_instance: The course instance to run the transition on.
:param transition: The transition to execute.
:param user: The user trying to run by this transition.
:return:
"""
####
# course_instance and transition are already clean by this point
####
# Obtain and validate elements to interact with
origin = transition.origin
origin.clean()
destination = transition.destination
destination.clean()
course_spec = course_instance.course_spec
course_spec.clean()
# Check if we have permission to do this
Workflow.PermissionsChecker.can_advance_course(course_instance, transition, user)
# We move to the destination node
cls._move(course_instance, destination, user)
# We must see what happens next.
# ENTER, CANCEL and JOINED types are not valid destination types.
# INPUT, SPLIT are types which expect user interaction and will not
# continue the execution.
# While...
# STEP nodes will continue the execution from the only transition they have.
# EXIT nodes MAY continue the execution by exciting a parent joiner or completing
# parallel branches (if the parent SPLIT has no joiner and only one outbound).
# MULTIPLEXER nodes will continue from a picked transition, depending on which
# one satisfies the condition. It will be an error if no transition satisfies
# the multiplexer condition.
if destination.type == models.NodeSpec.EXIT:
if course_instance.parent:
course_instance.parent.clean()
parent_course_instance = course_instance.parent.course_instance
parent_course_instance.clean()
cls._test_split_branch_reached(parent_course_instance, user, course_instance)
elif destination.type == models.NodeSpec.STEP:
# After cleaning destination, we know that it has exactly one outbound.
transition = destination.outbounds.get()
# Clean the transition.
transition.clean()
# Run the transition.
cls._run_transition(course_instance, transition, user)
elif destination.type == models.NodeSpec.MULTIPLEXER:
# After cleaning destination, we know that it has more than one outbound.
transitions = list(destination.outbounds.order_by('priority').all())
# Clean all the transitions.
for transition in transitions:
transition.clean()
# Evaluate the conditions and take the transition satisfying the first.
# If no transition is picked, an error is thrown.
for transition in transitions:
condition = transition.condition
# Condition will be set since we cleaned the transition.
if condition(course_instance.workflow_instance.document, user):
cls._run_transition(course_instance, transition, user)
break
else:
raise exceptions.WorkflowCourseNodeMultiplexerDidNotSatisfyAnyCondition(
destination, _('No condition was satisfied when traversing a multiplexer node')
)
@classmethod
def _test_split_branch_reached(cls, course_instance, user, reaching_branch):
"""
Decides on a parent course instance what to do when a child branch has reached and end.
:param course_instance: The parent course instance being evaluated. This instance will have
a node instance referencing a SPLIT node.
:param user: The user causing this action by running a transition or cancelling a course.
:param reaching_branch: The branch reaching this end. It will be a branch of the
`course_instance` argument.
:return:
"""
# We validate the SPLIT node spec
node_spec = course_instance.node_instance.node_spec
node_spec.clean()
joiner = node_spec.joiner
branches = course_instance.node_instance.branches.all()
if not joiner:
# By cleaning we know we will be handling only one transition
transition = node_spec.outbounds.get()
transition.clean()
# If any branch is not terminated, then we do nothing.
# Otherwise we will execute the transition.
if all(Workflow.CourseHelpers.is_terminated(branch) for branch in branches):
cls._run_transition(course_instance, transition, user)
else:
# By cleaning we know we will be handling at least one transition
transitions = node_spec.outbounds.all()
one_transition = transitions.count() == 1
# We call the joiner with its arguments
reaching_branch_code = reaching_branch.course_spec.code
# Making a dictionary of branch statuses
branch_statuses = {branch.course_spec.code: Workflow.CourseHelpers.get_exit_code(branch)
for branch in branches}
# Execute the joiner with (document, branch statuses, and current branch being joined) and
# get the return value.
returned = joiner(course_instance.workflow_instance.document, branch_statuses, reaching_branch_code)
if (one_transition and not returned) or returned is None:
# If all the branches have ended (i.e. they have non-None values), this
# is an error.
# Otherwise, we do nothing.
if all(bool(status) for status in branch_statuses.values()):
raise exceptions.WorkflowCourseNodeNoTransitionResolvedAfterCompleteSplitJoin(
node_spec, _('The joiner callable returned None -not deciding any action- but '
'all the branches have terminated')
)
elif not one_transition and isinstance(returned, string_types):
# The transitions will have unique and present action codes.
# We validate they have unique codes and all codes are present.
# IF the count of distinct action_names is not the same as the count
# of transitions, this means that either some transitions do not
# have action name, or have a repeated one.
count = transitions.count()
transition_codes = {transition.action_name for transition in transitions if transition.action_name}
if len(transition_codes) != count:
raise exceptions.WorkflowCourseNodeBadTransitionActionNamesAfterSplitNode(
node_spec, _('Split node transitions must all have a unique action name')
)
try:
# We get the transition by its code.
transition = transitions.get(action_name=returned)
except models.TransitionSpec.DoesNotExist:
raise exceptions.WorkflowCourseNodeTransitionDoesNotExist(
node_spec, _('No transition has the specified action name'), returned
)
# We clean the transition
transition.clean()
# We force a join in any non-terminated branch (i.e. status in None)
for code, status in items(branch_statuses):
if status is None:
cls._join(branches.get(course_spec__code=code), user)
# And THEN we execute our picked transition
cls._run_transition(course_instance, transition, user)
elif not one_transition:
# Invalid joiner return value type
raise exceptions.WorkflowCourseNodeInvalidSplitResolutionCode(
node_spec, _('Invalid joiner resolution code type. Expected string or None'), returned
)
else:
# We know we have one transition, and the returned joiner value was bool(x) == True
transition = transitions.first()
transition.clean()
# We force a join in any non-terminated branch (i.e. status in None)
for code, status in items(branch_statuses):
if status is None:
cls._join(branches.get(course_spec__code=code), user)
# And THEN we execute our picked transition
cls._run_transition(course_instance, transition, user)
def __init__(self, workflow_instance):
"""
In the end, this whole class is just a Wrapper of a workflow instance,
and provides all the related methods.
:param workflow_instance: Instance being wrapped.
"""
workflow_instance.clean()
self._instance = workflow_instance
@property
def instance(self):
return self._instance
@classmethod
def get(cls, document):
"""
Gets an existent workflow for a given document.
:param document:
:return:
"""
content_type = ContentType.objects.get_for_model(type(document))
object_id = document.id
try:
return cls(models.WorkflowInstance.objects.get(content_type=content_type, object_id=object_id))
except models.WorkflowInstance.DoesNotExist:
raise exceptions.WorkflowInstanceDoesNotExist(
None, _('No workflow instance exists for given document'), document
)
@classmethod
def create(cls, user, workflow_spec, document):
"""
Tries to create a workflow instance with this workflow spec, the document, and
on behalf of the specified user.
:param user: The user requesting this action. Permission will be checked for him
against the document.
:param workflow_spec: The workflow spec to be tied to.
:param document: The document to associate.
:return: A wrapper for the newly created instance.
"""
# We only care about the actual spec here, which is already cleaned.
workflow_spec = workflow_spec.spec
with atomic():
workflow_instance = models.WorkflowInstance(workflow_spec=workflow_spec, document=document)
cls.PermissionsChecker.can_instantiate_workflow(workflow_instance, user)
workflow_instance.full_clean()
workflow_instance.save()
return cls(workflow_instance)
def start(self, user):
"""
Starts the workflow by its main course, or searches a course and starts it.
:param user: The user starting the course or workflow.
:return:
"""
with atomic():
try:
self.instance.courses.get(parent__isnull=True)
raise exceptions.WorkflowInstanceNotPending(
self.instance, _('The specified course instance cannot be started because it is not pending')
)
except models.CourseInstance.DoesNotExist:
course_spec = self.instance.workflow_spec.course_specs.get(callers__isnull=True)
course_spec.full_clean()
course_instance = self.WorkflowRunner._instantiate_course(self.instance, course_spec, None, user)
def execute(self, user, action_name, path=''):
"""
Executes an action in the workflow by its main course, or searches a course and executes an action on it.
:param user: The user executing an action in the course or workflow.
:param action_name: The name of the action (transition) to execute.
:param path: Optional path to a course in this instance.
:return:
"""
with atomic():
course_instance = self.CourseHelpers.find_course(self.instance.courses.get(parent__isnull=True), path)
if self.CourseHelpers.is_waiting(course_instance):
course_instance.clean()
course_instance.course_spec.clean()
node_spec = course_instance.node_instance.node_spec
node_spec.clean()
transitions = node_spec.outbounds.all()
# Since we cleaned course_spec and due to the elaborated clean it performs
# which also includes cleaning each outbound, we know each outbound has
# an action_name and it is unique
# We get the transition or fail with non-existence
try:
transition = transitions.get(action_name=action_name)
except models.TransitionSpec.DoesNotExist:
raise exceptions.WorkflowCourseNodeTransitionDoesNotExist(node_spec, action_name)
# We clean the transition
transition.clean()
# And THEN we execute our picked transition
self.WorkflowRunner._run_transition(course_instance, transition, user)
else:
raise exceptions.WorkflowCourseInstanceNotWaiting(
course_instance, _('No action can be executed in the specified course instance because it is not '
'waiting for an action to be taken')
)
def cancel(self, user, path=''):
"""
Cancels a workflow entirely (by its main course), or searches a course and cancels it.
:param user: The user cancelling the course or workflow.
:param path: Optional path to a course in this instance.
:return:
"""
with atomic():
try:
course_instance = self.CourseHelpers.find_course(self.instance.courses.get(parent__isnull=True), path)
except models.CourseInstance.DoesNotExist:
raise exceptions.WorkflowCourseInstanceDoesNotExist(
self.instance, _('No main course exists for this workflow instance')
)
except models.CourseInstance.MultipleObjectsReturned:
raise exceptions.WorkflowCourseInstanceMultipleMatchingElements(
self.instance, _('Multiple main courses exist for this workflow instance')
)
if self.CourseHelpers.is_terminated(course_instance):
raise exceptions.WorkflowCourseInstanceAlreadyTerminated(
course_instance, _('Cannot cancel this instance because it is already terminated')
)
# Check permission on workflow AND on course.
course_instance.clean()
course_instance.course_spec.clean()
self.PermissionsChecker.can_cancel_course(course_instance, user)
# Cancel (recursively).
self.WorkflowRunner._cancel(course_instance, user)
# Trigger the parent joiner, if any.
if course_instance.parent:
course_instance.parent.clean()
parent_course_instance = course_instance.parent.course_instance
parent_course_instance.clean()
self.WorkflowRunner._test_split_branch_reached(parent_course_instance, user, course_instance)
def get_workflow_status(self):
"""
Get the status of each course in the workflow.
:return: A dictionary with 'course.path' => ('status', code), where code is the exit code
(-1 for cancelled, >= 0 for exit, a node spec's code for waiting, and None for other statuses).
"""
self.instance.clean()
course_instance = self.instance.courses.get(parent__isnull=True)
result = {}
def traverse_actions(course_instance, path=''):
course_instance.clean()
if self.CourseHelpers.is_splitting(course_instance):
result[path] = ('splitting', self.CourseHelpers.get_exit_code(course_instance))
for branch in course_instance.node_instance.branches.all():
code = branch.course_spec.code
new_path = code if not path else "%s.%s" % (path, code)
traverse_actions(branch, new_path)
elif self.CourseHelpers.is_waiting(course_instance):
result[path] = ('waiting', course_instance.node_instance.node_spec.code)
elif self.CourseHelpers.is_cancelled(course_instance):
result[path] = ('cancelled', self.CourseHelpers.get_exit_code(course_instance))
elif self.CourseHelpers.is_ended(course_instance):
result[path] = ('ended', self.CourseHelpers.get_exit_code(course_instance))
elif self.CourseHelpers.is_joined(course_instance):
result[path] = ('joined', self.CourseHelpers.get_exit_code(course_instance))
traverse_actions(course_instance)
return result
def get_workflow_available_actions(self, user):
"""
Get all the waiting courses metadata (including available actions) for the
courses in this workflow for a specific user.
:param: The given user.
:return: A dictionary with 'course.path' => {'display_name': _('Course Name'), 'actions': [{
'action_name': 'list',
'display_name': 'List'
}, {
'action_name': 'of',
'display_name': _('of') # i18n-enabled proxies may appear
}, {
'action_name': 'available',
'display_name': _('Available') # i18n-enabled proxies may appear
}, {
'action_name': 'actions',
'display_name': 'Actions'
}]}
"""
self.instance.clean()
course_instance = self.instance.courses.get(parent__isnull=True)
result = {}
def traverse_actions(course_instance, path=''):
course_instance.clean()
if self.CourseHelpers.is_splitting(course_instance):
# Splits do not have available actions on their own.
# They can only continue traversal on their children
# branches.
for branch in course_instance.node_instance.branches.all():
code = branch.course_spec.code
new_path = code if not path else "%s.%s" % (path, code)
traverse_actions(branch, new_path)
elif self.CourseHelpers.is_waiting(course_instance):
# Waiting courses will enumerate actions by their transitions.
actions = self.PermissionsChecker.course_available_actions(course_instance, user)
if actions:
result[path] = {'display_name': course_instance.course_spec.display_name, 'actions': actions}
traverse_actions(course_instance)
return result
| nilq/baby-python | python |
def getStatusMessage(statusCode, default="Unknown status"):
# TODO Add docs
if not isinstance(statusCode, int):
raise TypeError("Status code must be int")
return {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi-Status',
208: 'Already Reported',
226: 'IM Used',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
308: 'Permanent Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Payload Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: "I'm a teapot",
421: 'Misdirected Request',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
444: 'Connection Closed Without Response',
451: 'Unavailable For Legal Reasons',
499: 'Client Closed Request',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
506: 'Variant Also Negotiates',
507: 'Insufficient Storage',
508: 'Loop Detected',
510: 'Not Extended',
511: 'Network Authentication Required',
599: 'Network Connect Timeout Error'
}.get(statusCode, default)
def isOKStatus(statusCode):
# TODO Add docs
if not isinstance(statusCode, int):
raise TypeError("Status code must be int")
return 200 <= statusCode < 300
class PrintableException(Exception):
# TODO Add docs
def __init__(self, **kwargs):
# TODO Add docs
super().__init__(kwargs)
@property
def _json(self):
json = {
"type": self.__class__,
}
if isinstance(self.__cause__, PrintableException):
json["cause"] = self.__cause__
json = {**json, **self.dict}
return json
def __getitem__(self, name):
return self.dict[name]
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError as e:
try:
return self[name]
except KeyError:
raise e from None
@staticmethod
def naifDictDescription(dict):
# TODO Add docs
desc = ""
for key, value in dict.items():
if desc != "":
desc += ", "
if isinstance(value, str):
value = f"'{value}'"
desc += f"{key}={value}"
return desc
@property
def dict(self):
return self.args[0]
def __str__(self):
desc = PrintableException.naifDictDescription(self.dict)
txt = self.__class__.__name__
if desc != "":
txt += ": "
txt += desc
return txt
def __repr__(self):
return f"{self.__class__.__name__}(**{repr(self.dict)})"
class HttpException(PrintableException):
# TODO Add docs
statusMessage = None
@staticmethod
def build(statusCode, statusMessage=None, **kwargs):
# TODO Add docs
fields = {"statusCode": statusCode}
if statusMessage is not None:
fields["statusMessage"] = statusMessage
return type("HttpException", (HttpException,), fields)(**kwargs)
def __init__(self, **kwargs):
if not isinstance(self.statusCode, int):
raise TypeError("Status code must be int")
if self.statusMessage is None:
self.statusMessage = getStatusMessage(self.statusCode)
elif not isinstance(self.statusMessage, str):
raise TypeError("Status message must be str or None")
super().__init__(statusCode=self.statusCode, statusMessage=self.statusMessage, **kwargs)
class AuthorizationException(HttpException):
statusCode = 401
class BadRequestException(HttpException):
statusCode = 400
class ForbiddenException(HttpException):
statusCode = 403
class NotFoundException(HttpException):
statusCode = 404
class ServerErrorException(HttpException):
statusCode = 500
class ServiceUnavailableException(HttpException):
statusCode = 503
class NotImplementedException(HttpException):
statusCode = 501
| nilq/baby-python | python |
n = int(input())
S = input()
T = input()
result = 0
for i in range(n):
if S[i] != T[i]:
result += 1
print(result)
| nilq/baby-python | python |
from django.core import validators
from django.db import models
from django.db.models.aggregates import Max
from django.db.models.base import Model
from django.core.validators import RegexValidator
class entry(models.Model):
pilot_name = models.CharField(max_length=200, blank=False, default="John Doe")
pilot_identifier = models.CharField(max_length=20, blank=False, default='null')
copilot_name = models.CharField(max_length=200, blank=True)
rental_company = models.CharField(max_length=200, blank=True)
airline = models.CharField(max_length=200, blank=True)
flight_date = models.DateField(auto_now_add=True)
manufacturer = models.CharField(max_length=200, blank=False)
aircraft_model = models.CharField(max_length=200, blank=False)
aircraft_icao = models.CharField(validators=[RegexValidator(regex=r'^[A-Z]{1}[A-Z0-9]{1,3}$')], max_length=4, blank=True)
aircraft_reg = models.CharField(max_length=10, blank=True)
flight_number = models.CharField(validators=[RegexValidator(regex=r'^([A-Za-z]{3}|[A-Za-z][0-9]|[0-9][A-Za-z])([0-9]+)$')], max_length=10, blank=True)
from_dest = models.CharField(validators=[RegexValidator(regex=r'[A-Z][A-Z][A-Z][A-Z]')], max_length=4, blank=False)
to_dest = models.CharField(validators=[RegexValidator(regex=r'[A-Z][A-Z][A-Z][A-Z]')], max_length=4, blank=False)
duration = models.DurationField()
category_and_class = models.CharField(max_length=100)
remarks_and_endorsements = models.CharField(max_length=1000)
picture_with_plane = models.ImageField(upload_to='aircraft_images', blank=True)
def __str__(self):
return "{}, {}-{}, {}".format(self.pilot_name, self.from_dest, self.to_dest, self.flight_date)
# The following is pseudocode of the model, "entry".
# Copilot Name: Only if applicable, requirements same as Pilot Name
# Rental Company: Only if applicable, requirements same as Pilot Name
# Date: American standard (MM/DD/YYYY), cannot be left blank
# Manufacturer: 200 Characters, cannot be left blank, if self/independently-made, write name of person who made it
# Model: 100 Characters, cannot be left blank
# Aircraft Identifcation Number: 10 character MAX
# From (Destination): 4 letters all uppercase, must match existing DB of IATA codes
# To (Destination): 4 letters all uppercase, must match existing DB of IATA codes
# Flight Duration: 1 time input in HH:MM format
# Airplane Category and Class: CharField, pilots should know how to fill this in, max is 100 characters
# Remarks/Endorsements: 1000 Character Max | nilq/baby-python | python |
# Generated by Django 3.1.13 on 2021-11-16 14:57
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('analysis', '0016_auto_20211115_0020'),
]
operations = [
migrations.AlterField(
model_name='analysisplan',
name='release_info',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='analysis.releaseinfo'),
),
migrations.CreateModel(
name='AuxiliaryFileDepositRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('object_id', models.UUIDField(default=uuid.uuid4, editable=False)),
('name', models.CharField(blank=True, help_text='auto-filled on save', max_length=255)),
('deposit_success', models.BooleanField(default=False)),
('dv_auxiliary_type', models.CharField(choices=[('dpJSON', 'dpJSON'), ('dpPDF', 'dpPDF')], max_length=100)),
('dv_auxiliary_version', models.CharField(default='v1', help_text='e.g. "v1", "v2", etc', max_length=50)),
('http_status_code', models.IntegerField(default=-1, help_text='HTTP code')),
('http_resp_text', models.TextField(blank=True)),
('http_resp_json', models.JSONField(blank=True, null=True)),
('user_msg', models.TextField(blank=True)),
('dv_download_url', models.URLField(blank=True)),
('release_info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='analysis.releaseinfo')),
],
options={
'abstract': False,
},
),
]
| nilq/baby-python | python |
from __init__ import __version__ as version
from __init__ import __author__ as author
import requests
author_name = requests.get("https://estra-api.herokuapp.com/ShowroomPY").json()["Author"]
if author != author_name:
print("Error: Incorrect author. Please do not change any of these attributes.")
else:
pass
class ShowroomData:
def __init__(self, author, version):
self.author = author
self.version = version
def perfecto(self):
print(f"[ ShowroomPY {self.version}] " + self.author)
logs = ShowroomData(f"- {author_name}", f"{version}")
logs.perfecto()
| nilq/baby-python | python |
# Yes python, it's a package
| nilq/baby-python | python |
from typing import NoReturn
from cryspy.A_functions_base.function_1_objects import \
form_items_by_dictionary
from cryspy.B_parent_classes.cl_1_item import ItemN
from cryspy.B_parent_classes.cl_2_loop import LoopN
class Chi2(ItemN):
"""
Choise of the experimental data for the refinement procedure.
Attributes
----------
- sum True
- diff True
- up False
- down False
- asymmetry False # For flip ratios
"""
ATTR_MANDATORY_NAMES = ()
ATTR_MANDATORY_TYPES = ()
ATTR_MANDATORY_CIF = ()
ATTR_OPTIONAL_NAMES = ("sum", "diff", "up", "down", "asymmetry")
ATTR_OPTIONAL_TYPES = (bool, bool, bool, bool, bool)
ATTR_OPTIONAL_CIF = ("sum", "diff", "up", "down", "asymmetry")
ATTR_NAMES = ATTR_MANDATORY_NAMES + ATTR_OPTIONAL_NAMES
ATTR_TYPES = ATTR_MANDATORY_TYPES + ATTR_OPTIONAL_TYPES
ATTR_CIF = ATTR_MANDATORY_CIF + ATTR_OPTIONAL_CIF
ATTR_INT_NAMES = ()
ATTR_INT_PROTECTED_NAMES = ()
# parameters considered are refined parameters
ATTR_REF = ()
ATTR_SIGMA = tuple([f"{_h:}_sigma" for _h in ATTR_REF])
ATTR_CONSTR_FLAG = tuple([f"{_h:}_constraint" for _h in ATTR_REF])
ATTR_REF_FLAG = tuple([f"{_h:}_refinement" for _h in ATTR_REF])
ATTR_CONSTR_MARK = tuple([f"{_h:}_mark" for _h in ATTR_REF])
# constraints on the parameters
D_CONSTRAINTS = {}
# default values for the parameters
D_DEFAULT = {}# 'sum': True, 'diff': True, 'up': False, 'down': False
for key in ATTR_SIGMA:
D_DEFAULT[key] = 0.
for key in (ATTR_CONSTR_FLAG + ATTR_REF_FLAG):
D_DEFAULT[key] = False
for key in ATTR_CONSTR_MARK:
D_DEFAULT[key] = ""
PREFIX = "chi2"
def __init__(self, **kwargs) -> NoReturn:
super(Chi2, self).__init__()
# defined for any integer and float parameters
D_MIN = {}
# defined for ani integer and float parameters
D_MAX = {}
self.__dict__["D_MIN"] = D_MIN
self.__dict__["D_MAX"] = D_MAX
for key, attr in self.D_DEFAULT.items():
setattr(self, key, attr)
for key, attr in kwargs.items():
setattr(self, key, attr)
class Chi2L(LoopN):
"""
Description of chi2 in loop.
"""
ITEM_CLASS = Chi2
ATTR_INDEX = None
def __init__(self, loop_name: str = None, **kwargs) -> NoReturn:
super(Chi2L, self).__init__()
self.__dict__["items"] = form_items_by_dictionary(self.ITEM_CLASS, kwargs)
self.__dict__["loop_name"] = loop_name
# s_cont = """
# loop_
# _chi2_sum
# _chi2_diff
# _chi2_up
# _chi2_down
# False True False False
# False False True True
# """
# """
# val_2 = Cell()
# val_2.length_a = 3.
# val_2.angle_alpha = 750
# """
# obj = Chi2L.from_cif(s_cont)
# print(obj, end="\n\n")
# print(obj.get_variable_names(), end="\n\n")
| nilq/baby-python | python |
#!/usr/bin/python3
import sys
import json
import queue
import requests
import threading
import subprocess
from .logger import log
from .cmd_args import args
from .test_utils import last_message_as_json, ws_to_http
class CliWalletException(Exception):
def __init__(self, _message):
self.message = _message
def __str__(self):
return self.message
class CliWallet(object):
class CliWalletArgs(object):
def __init__(self, _path_to_executable,
_server_rpc_endpoint,
_cert_auth,
#_rpc_endpoint,
_rpc_tls_endpoint,
_rpc_tls_cert,
_rpc_http_endpoint,
_deamon,
_rpc_allowip,
_wallet_file,
_chain_id ):
self.path = _path_to_executable+'/cli_wallet'
self.server_rpc_endpoint = _server_rpc_endpoint
self.cert_auth = _cert_auth
#self.rpc_endpoint = _rpc_endpoint
self.rpc_tls_endpoint = _rpc_tls_endpoint
self.rpc_tls_cert = _rpc_tls_cert
self.rpc_http_endpoint = _rpc_http_endpoint
self.deamon = _deamon
self.rpc_allowip = _rpc_allowip
self.wallet_file = _wallet_file
self.chain_id = _chain_id
def args_to_list(self):
test_args = []
args = {"server_rpc_endpoint": self.server_rpc_endpoint}
args["cert_auth"] = self.cert_auth
#args["rpc_endpoint"] = self.rpc_endpoint
args["rpc_tls_endpoint"] = self.rpc_tls_endpoint
args["rpc_tls_cert"] = self.rpc_tls_cert
args["rpc_http_endpoint"] =self.rpc_http_endpoint
args["deamon"] = self.deamon
args["rpc_allowip"] = self.rpc_allowip
args["wallet_file"] = self.wallet_file
args["chain_id"] = self.chain_id
for key, val in args.items():
if val :
test_args.append("--"+key.replace("_","-")+ " ")
test_args.append(val)
test_args = " ".join(test_args)
return test_args
def __init__(self, _path_to_executable,
_server_rpc_endpoint="ws://127.0.0.1:8090",
_cert_auth="_default",
#_rpc_endpoint="127.0.0.1:8091",
_rpc_tls_endpoint="127.0.0.1:8092",
_rpc_tls_cert="server.pem",
_rpc_http_endpoint="127.0.0.1:8093",
_deamon=False,
_rpc_allowip=[],
_wallet_file="wallet.json",
_chain_id="18dcf0a285365fc58b71f18b3d3fec954aa0c141c44e4e5cb4cf777b9eab274e"):
self.cli_args = CliWallet.CliWalletArgs(_path_to_executable, _server_rpc_endpoint, _cert_auth, #_rpc_endpoint,
_rpc_tls_endpoint, _rpc_tls_cert,
_rpc_http_endpoint, _deamon, _rpc_allowip, _wallet_file, _chain_id )
self.cli_proc = None
self.response = ""
self.q = queue.Queue()
self.t = threading.Thread(target=self.output_reader, args=())
def __getattr__(self, _method_name):
if self.cli_proc:
self.method_name = _method_name
return self
else:
log.error("Cli_wallet is not set")
raise CliWalletException("Cli_wallet is not set")
def __call__(self,*_args):
try:
self.response = ""
self.send_and_read(self.prepare_args(*_args))
return self.response
except Exception as _ex:
log.exception("Exception `{0}` occuress while calling `{1}` with `{2}` args.".format(str(_ex), self.method_name, list(_args)))
def set_and_run_wallet(self):
try:
log.info("Calling cli_wallet with args `{0}`".format([self.cli_args.path+ " " + self.cli_args.args_to_list()]))
self.cli_proc = subprocess.Popen([self.cli_args.path+ " " + self.cli_args.args_to_list()], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, shell=True)
if not self.cli_proc:
raise CliWalletException("Failed to run cli_wallet")
self.t.daemon=True
self.t.start()
self.set_password("{0}".format("testpassword"))
self.unlock("{0}".format("testpassword"))
self.import_key("{0}".format(args.wif))
except Exception as _ex:
log.exception("Exception `{0}` occuress while while running cli_wallet.".format(str(_ex)))
#we dont have stansaction status api, so we need to combine...
def wait_for_transaction_approwal(self):
json_resp = last_message_as_json(self.response)
block_num = json_resp["result"]["block_num"]
trans_id = json_resp["result"]["id"]
url = ws_to_http(self.cli_args.server_rpc_endpoint)
idx = -1
while True:
param = {"jsonrpc":"2.0", "method":"block_api.get_block", "params":{"block_num":block_num+idx}, "id":1}
resp = requests.post(url, json=param)
data = resp.json()
if "result" in data and "block" in data["result"]:
block_transactions = data["result"]["block"]["transaction_ids"]
if trans_id in block_transactions:
log.info("Transaction `{0}` founded in block `{1}`".format(trans_id, block_num+idx))
break
idx += 1
def check_if_transaction(self):
json_resp = last_message_as_json(self.response)
if "result" in json_resp:
if "id" in json_resp["result"]:
return True
return False
def read_output(self, _timeout):
while True:
try:
self.response += self.q.get(block=True, timeout=_timeout)
except queue.Empty:
break
def send(self, _data):
self.cli_proc.stdin.write(_data.encode("utf-8"))
self.cli_proc.stdin.flush()
def send_and_read(self, _data):
log.info("Sending {0}".format(_data))
self.send(_data)
self.read_output(3)
#asserions does not occures after above flush, so we need to send additiona `Enter`
self.send("\n")
self.read_output(0.5)
if self.check_if_transaction():
self.wait_for_transaction_approwal()
return self.response
def exit_wallet(self):
try:
if not self.cli_proc:
log.info("Cannot exit wallet, because wallet was not set - please run it first by using `run_wallet` metode.")
self.cli_proc.communicate()
return self.cli_proc
except Exception as _ex:
log.exception("Exception `{0}` occuress while while running cli_wallet.".format(str(_ex)))
def output_reader(self):
while True:
try:
for line in iter(self.cli_proc.stdout.readline, b''):
self.q.put_nowait(line.decode('utf-8') )
except queue.Full:
pass
def prepare_args(self, *_args):
name = self.method_name
args = _args
prepared_args = name + " "
for arg in args:
if isinstance(arg, int):
prepared_args += str(arg) + " "
elif isinstance(arg, str):
if arg:
prepared_args += "\"{0}\"".format(arg) + " "
else:
prepared_args += '\"\"' + " "
else:
prepared_args += "{0}".format(arg) + " "
return prepared_args + "\n"
| nilq/baby-python | python |
def escreva(mens):
t=len(m)
print(f'~'*t)
print(f'{mens}')
print(f'~' * t)
m=str(input('Quala Mensagem ? : '))
escreva(m) | nilq/baby-python | python |
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, viewsets
from profiles_api import serializer, models
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializer.HelloSerializer
def get(self, request, format=None):
"""return get """
an_apiview = [
'Uses HTTP method get post del put and push ',
"lalalla",
'blalala',
]
return Response({'message': 'Hello, I"m API', 'an_apiview': an_apiview})
def post(self, request):
"""post method """
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handles the object"""
return Response({'message': 'PUT'})
def patch(self, request, pk=None):
"""partial update of fields """
return Response({'message': 'Patch'})
def delete(self, request, pk=None):
"""partial update of fields """
return Response({'message': 'delete '})
class HelloViewSet(viewsets.ViewSet):
"""testing view set"""
serializer_class = serializer.HelloSerializer
def list(self, request):
a_viewset = [
'Uses HTTP method get post del put and push ',
"lalalla",
'blalala',
'sam',
]
return Response({'message': 'Hello', 'a_viewset': a_viewset})
def create(self, request):
"""create hello msg """
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""handle get user id"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Update obj"""
return Response({'http_method': 'put'})
def partial_update(self, request, pk=None):
"""update partialy """
return Response({'http_method': 'patch'})
def destroy(self, request, pk=None):
return Response({'http_method': 'delete'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating model view set"""
serializer_class = serializer.UserProfileSerializer
queryset = models.UserProfile.objects.all()
| nilq/baby-python | python |
"""
[caption]
def=Cutting URL parameters
ja=URLパラメータの切り取り
"""
import sys
import io
import tkinter
import tkinter.ttk
import tkinter.simpledialog
import ctypes
import ctypes.wintypes
from urllib.parse import urlparse, parse_qsl, urlencode, urlunparse
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
class CuttingDialog(tkinter.simpledialog.Dialog):
def __init__(self, parent, title, url) -> None:
self.url = url
super().__init__(parent, title=title)
def body(self, master) -> None:
parts = urlparse(self.url)
queries = parse_qsl(parts.query)
self.boollist = []
self.attributes("-toolwindow", 1)
self.attributes("-topmost", 1)
self.focus_force()
lf = tkinter.LabelFrame(master, text="URL")
tkinter.Label(lf, text=f"{parts.netloc}{parts.path}").pack(pady=8, padx=4)
lf.pack(side=tkinter.TOP)
for query in queries:
bv = tkinter.BooleanVar()
tkinter.Checkbutton(master, variable=bv, text=f"{query[0]}={query[1]}").pack(side = tkinter.TOP, anchor=tkinter.W)
self.boollist.append(bv)
return super().body(master)
def grab_set(self) -> None:
p = ctypes.wintypes.POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(p))
self.geometry(f"+{p.x - self.winfo_width() // 2}+{p.y - self.winfo_height() // 2}")
return super().grab_set()
def ok(self, event=None): super().ok(event); self.result = True
def cancel(self, event=None): super().cancel(event); self.result = False
text = sys.stdin.read()
if text != "":
result = True
bools = []
p = urlparse(text)
if "params" in globals():
bools = globals()["params"]["bools"]
elif p.scheme:
owner = tkinter.Tk()
owner.withdraw()
dlg = CuttingDialog(owner, 'Cutting URL Params', text)
bools = dlg.boollist
result = dlg.result
if result:
url = urlparse(text)
qsls = parse_qsl(url.query)
qsla = []
for b, q in zip(bools, qsls):
if b.get() if type(b) is tkinter.BooleanVar else b:
qsla.append((q[0], q[1]))
print(urlunparse(url._replace(query=urlencode(qsla))))
else:
print(text)
| nilq/baby-python | python |
# coding: utf-8
from __future__ import unicode_literals
from django.test import TestCase
class ComputeIntersectionsTestCase(TestCase):
def test_command(self):
pass # @todo
| nilq/baby-python | python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Train a Fast-RCNN model on the PASCAL VOC dataset.
This Fast-RCNN is based on VGG16 that was pre-trained using ImageI1K.
By default, the script will download the pre-trained VGG16 from neon model zoo
and seed the convolution and pooling layers. And Fast R-CNN starts training from
that. If the script is given --model_file, it will continue training the
Fast R-CNN from the given model file.
Reference:
"Fast R-CNN"
http://arxiv.org/pdf/1504.08083v2.pdf
https://github.com/rbgirshick/fast-rcnn
Usage:
python examples/fast-rcnn/train.py -e 20 --save_path frcn_vgg.pkl
Notes:
1. For VGG16 based Fast R-CNN model, we can support training/testing with small
batch size such as, 2 or 3 images per batch. The model training will converge
around 20 epochs. With 3 images per batch, and 64 ROIs per image, the training
consumes about 11G memory.
2. The original caffe model goes through 40000 iteration (mb) of training, with
2 images per minibatch.
3. The dataset will cache the preprocessed file and re-use that if the same
configuration of the dataset is used again. The cached file by default is in
~/nervana/data/VOCDevkit/VOC<year>/train_< >.pkl or
~/nervana/data/VOCDevkit/VOC<year>/inference_< >.pkl
"""
from neon import logger as neon_logger
from neon.backends import gen_backend
from neon.data import PASCALVOCTrain
from neon.transforms import CrossEntropyMulti, SmoothL1Loss, ObjectDetection
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.callbacks.callbacks import Callbacks
from neon.layers import Multicost, GeneralizedCostMask
from neon.util.persist import save_obj
from util import load_vgg_weights, create_frcn_model, scale_bbreg_weights
# main script
# parse the command line arguments
parser = NeonArgparser(__doc__, default_overrides=dict(batch_size=4))
parser.add_argument('--subset_pct', type=float, default=100,
help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)
# Override save path if None
if args.save_path is None:
args.save_path = 'frcn_vgg.pkl'
if args.callback_args['save_path'] is None:
args.callback_args['save_path'] = args.save_path
if args.callback_args['serialize'] is None:
args.callback_args['serialize'] = min(args.epochs, 10)
# hyperparameters
args.batch_size = 4
num_epochs = args.epochs
n_mb = None
img_per_batch = args.batch_size
rois_per_img = 64
frcn_fine_tune = False
learning_rate_scale = 1.0 / 10
if frcn_fine_tune is True:
learning_rate_scale = 1.0 / 16
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
if args.backend == 'gpu':
be.enable_winograd = 4
if be.gpu_memory_size < 11 * 1024 * 1024 * 1024:
exit("ERROR: This model requires at least 11GB GPU memory to be run.")
# setup training dataset
train_set = PASCALVOCTrain('trainval', '2007', path=args.data_dir, n_mb=n_mb,
img_per_batch=img_per_batch, rois_per_img=rois_per_img,
rois_random_sample=True,
add_flipped=False, subset_pct=args.subset_pct)
test_set = PASCALVOCTrain('test', '2007', path=args.data_dir, n_mb=n_mb,
img_per_batch=img_per_batch, rois_per_img=rois_per_img,
rois_random_sample=True,
add_flipped=False)
# setup model
model = create_frcn_model(frcn_fine_tune)
# setup optimizer
opt_w = GradientDescentMomentum(
0.001 * learning_rate_scale, 0.9, wdecay=0.0005)
opt_b = GradientDescentMomentum(0.002 * learning_rate_scale, 0.9)
optimizer = MultiOptimizer({'default': opt_w, 'Bias': opt_b})
# if training a new model, seed the image model conv layers with pre-trained weights
# otherwise, just load the model file
if args.model_file is None:
load_vgg_weights(model, args.data_dir)
cost = Multicost(costs=[GeneralizedCostMask(costfunc=CrossEntropyMulti()),
GeneralizedCostMask(costfunc=SmoothL1Loss())],
weights=[1, 1])
callbacks = Callbacks(model, eval_set=test_set, **args.callback_args)
model.fit(train_set, optimizer=optimizer,
num_epochs=num_epochs, cost=cost, callbacks=callbacks)
# Fast R-CNN model requires scale the bbox regression branch linear layer weights
# before saving the model
model = scale_bbreg_weights(
model, train_set.bbtarget_means, train_set.bbtarget_stds)
save_obj(model.serialize(keep_states=True), args.save_path)
neon_logger.display('running eval...')
metric_train = model.eval(train_set, metric=ObjectDetection())
neon_logger.display(
'Train: label accuracy - {}%, object detection logloss - {}'.format(metric_train[0] * 100,
metric_train[1]))
metric_test = model.eval(test_set, metric=ObjectDetection())
neon_logger.display(
'Test: label accuracy - {}%, object detection logloss - {}'.format(metric_test[0] * 100,
metric_test[1]))
| nilq/baby-python | python |
"""Define resize, blur, and related constants."""
from . import io
from collections import namedtuple
from numba import guvectorize
import math
import numpy as np
RowOps = namedtuple('RowOps', 'tindices sindices fweights'.split())
GAUSSIAN_SCALE = 1.0 / np.sqrt(0.5 * np.pi)
def hermite(x):
x = np.clip(x, 0, 1)
return 2 * x * x * x - 3 * x * x + 1
def triangle(x):
x = np.clip(x, 0, 1)
return 1.0 - x
def gaussian(x):
x = np.clip(x, 0, 2)
return np.exp(-2 * x * x) * GAUSSIAN_SCALE
def nearest(x):
return np.less_equal(x, 0.5) * 1.0
def sinc(x):
if x <= 0.00001: return 1.0
return np.sin(np.pi * x) / (np.pi * x)
def lanczos(x):
x = np.clip(x, 0, 1)
return sinc(x) * sinc(x)
def mitchell(x):
B = 1.0 / 3.0
C = 1.0 / 3.0
P0 = (6 - 2*B) / 6.0
P1 = 0
P2 = (-18 +12*B + 6*C) / 6.0
P3 = (12 - 9*B - 6*C) / 6.0
Q0 = (8*B +24*C) / 6.0
Q1 = (-12*B -48*C) / 6.0
Q2 = (6*B +30*C) / 6.0
Q3 = (-1*B - 6*C) / 6.0
if x >= 2.0: return 0.0
if x >= 1.0: return Q0 + Q1*x + Q2*x*x + Q3*x*x*x
return P0 + P1*x + P2*x*x + P3*x*x*x
class Filter:
def __init__(self, fn, radius):
self.radius = radius
self.function = fn
HERMITE = Filter(hermite, 1)
TRIANGLE = Filter(triangle, 1)
GAUSSIAN = Filter(gaussian, 2)
NEAREST = Filter(nearest, 0)
LANCZOS = Filter(lanczos, 1)
MITCHELL = Filter(mitchell, 2)
def resize(source, width=None, height=None, filter=None, radius=1,
wrapx=False, wrapy=False):
"""Create a new numpy image with the desired size.
Either width or height can be null, in which case its value
is inferred from the aspect ratio of the source image.
Filter can be HERMITE, TRIANGLE, GAUSSIAN, NEAREST, LANCZOS, or
MITCHELL.
"""
assert len(source.shape) == 3, 'Shape is not rows x cols x channels'
assert width != None or height != None, 'Missing target size'
aspect = source.shape[1] / source.shape[0]
if width == None: width = height * aspect
if height == None: height = width / aspect
magnifying = width > source.shape[1]
if filter == None: filter = MITCHELL if magnifying else LANCZOS
return resample(source, width, height, filter, radius, wrapx, wrapy)
def resample(source, width, height, filter, radius, wrapx, wrapy):
nchans = source.shape[2]
def fn(t): return filter.function(t / radius)
scaled_filter = Filter(fn, radius * filter.radius)
srows, scols = source.shape[0], source.shape[1]
trows, tcols = int(height), int(width)
vresult = np.zeros([srows, tcols, nchans])
rowops = create_ops(tcols, scols, scaled_filter, wrapx)
convolve(vresult, source, rowops)
vresult = transpose(vresult)
hresult = np.zeros([tcols, trows, nchans])
rowops = create_ops(trows, srows, scaled_filter, wrapy)
convolve(hresult, vresult, rowops)
return transpose(hresult)
def blur(image, filter=GAUSSIAN, radius=4, wrapx=False, wrapy=False):
"""Resample an image and produce a new image with the same size.
For a list of available filters, see <a href="#resize">resize</a>.
"""
width, height = image.shape[1], image.shape[0]
return resize(image, width, height, filter, radius, wrapx, wrapy)
def transpose(source: np.ndarray):
return np.swapaxes(source, 0, 1)
def create_ops(ntarget, nsource, filter: Filter, wrap) -> RowOps:
# Generate a sequence of operations to perform a 1D convolution
# where each operation is represented by 3-tuple of: target index,
# source index, weight.
tindices, sindices, fweights = [], [], []
dtarget = 1.0 / ntarget
dsource = 1.0 / nsource
minifying = ntarget < nsource
fextent = dtarget if minifying else dsource
fdomain = float(ntarget if minifying else nsource)
x = dtarget / 2
for tindex in range(ntarget):
minx = x - filter.radius * fextent
maxx = x + filter.radius * fextent
minsi = int(minx * float(nsource))
maxsi = int(math.ceil(maxx * float(nsource)))
localops = []
weightsum = 0.0
for sindex in range(minsi, maxsi+1):
wrapped = sindex
if sindex < 0 or sindex >= nsource:
if wrap:
wrapped = sindex % nsource
else:
continue
sx = (0.5 + sindex) * dsource
t = fdomain * abs(sx - x)
weight = filter.function(t)
if weight != 0:
localops.append((tindex, wrapped, weight))
weightsum += weight
if weightsum > 0.0:
for op in localops:
tindices.append(op[0])
sindices.append(op[1])
fweights.append(op[2] / weightsum)
x += dtarget
return RowOps(tindices, sindices, fweights)
SIG0 = "void(f8[:,:,:], f8[:,:,:], i4[:], i4[:], f8[:])"
SIG1 = "(r0,c0,d),(r0,c1,d),(i),(i),(i)"
@guvectorize([SIG0], SIG1, target='parallel')
def jit_convolve(target, source, tinds, sinds, weights):
nrows, nchan, nops = target.shape[0], target.shape[2], len(tinds)
for c in range(nchan):
for row in range(nrows):
for op in range(nops):
tind, sind, weight = tinds[op], sinds[op], weights[op]
target[row][tind][c] += source[row][sind][c] * weight
def convolve(target, source, rowops: RowOps):
# Perform highly generalized 1D convolution. This is almost
# equivalent to:
#
# for row in range(len(target)):
# target[row][tindices] += source[row][sindices] * fweights
#
# ...but with the crucial feature of allowing the same index to
# appear multiple times in tindices.
#
# Note that standard numpy convolution assumes a stationary kernel,
# whereas this function could possibly be used to apply a varying
# kernel.
tindices, sindices, fweights = rowops
assert len(tindices) == len(sindices) == len(fweights)
assert len(target) == len(source)
jit_convolve(target, source,
np.int32(tindices), np.int32(sindices),
np.double(fweights))
| nilq/baby-python | python |
# Generic imports
import os
import random
import shutil
from datetime import datetime
# Imports with probable installation required
try:
import progress.bar
except ImportError:
print('*** Missing required packages, I will install them for you ***')
os.system('pip3 install progress')
import progress.bar
# Custom imports
from python_tools.shapes.shapes_utils import *
from python_tools.meshes.meshes_utils import *
### ************************************************
### Generate full dataset
# Parameters
n_sampling_pts = 5
mesh_domain = False
plot_pts = True
n_shapes = 200
time = datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
dataset_dir = 'dataset_'+time+'/'
mesh_dir = dataset_dir+'meshes/'
img_dir = dataset_dir+'images/'
filename = 'shape'
magnify = 1.0
xmin =-2.0
xmax = 2.0
ymin =-2.0
ymax = 2.0
n_tri_max = 5000
# Create directories if necessary
if not os.path.exists(mesh_dir):
os.makedirs(mesh_dir)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
# Generate dataset
bar = progress.bar.Bar('Generating shapes', max=n_shapes)
for i in range(0,n_shapes):
generated = False
while (not generated):
n_pts = random.randint(3, 7)
radius = np.random.uniform(0.0, 1.0, size=n_pts)
edgy = np.random.uniform(0.0, 1.0, size=n_pts)
shape = Shape(filename+'_'+str(i),
None,n_pts,n_sampling_pts,radius,edgy)
shape.generate(magnify=2.0,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax)
meshed, n_tri = shape.mesh()
if (meshed and (n_tri < n_tri_max)):
shape.generate_image(plot_pts=plot_pts,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax)
img = filename+'_'+str(i)+'.png'
mesh = filename+'_'+str(i)+'.mesh'
shutil.move(img, img_dir)
shutil.move(mesh, mesh_dir)
generated = True
bar.next()
# End bar
bar.finish()
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 2 10:54:54 2021
@author: po-po
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
#filename = r'C:\Users\po-po\Desktop\DOC\Fibras\Programas\data\dr2todr4e01121121.csv'
filename = r'C:\Users\po-po\Desktop\DOC\Fibras\Programas\data\drgodet\r5pa1dr2e0f10.csv'
clean_arr = pd.read_csv(filename)
file = str(os.path.splitext(os.path.basename(filename))[0])
#plot formating
params = {'figure.figsize': (6, 4),
'font.size': 18,
'font.sans-serif': 'Arial',
'lines.linewidth': 2.0,
'axes.linewidth': 1.5,
'axes.formatter.use_mathtext': True,
'axes.formatter.min_exponent': False,
'axes.formatter.useoffset': False,
'axes.grid': False,
'axes.grid.axis': 'both',
'xtick.minor.visible': True,
'ytick.minor.visible': True,
'xtick.direction': 'in',
'xtick.top': True,
'ytick.direction': 'in',
'ytick.right': True,
'xtick.major.size': 10,
'xtick.minor.size': 5,
'xtick.major.width': 1,
'ytick.major.size': 10,
'ytick.minor.size': 5,
'ytick.major.width': 1,
'legend.frameon': True,
}
plt.rcParams.update(params)
fig = plt.figure()
#perform rolling average on pandas dataframe of clean data
interval = 100
clean_arr['Average'] = clean_arr['Diameter'].rolling(window = interval, center = True, min_periods = 1).mean()
clean_arr['Std'] = clean_arr['Diameter'].rolling(window = interval, center = True, min_periods = 1).std()
clean_arr['Clean'] = clean_arr.Diameter[(clean_arr['Diameter'] >= clean_arr['Average']-clean_arr['Std']) & (clean_arr['Diameter'] <= clean_arr['Average']+clean_arr['Std'])]
clean_arr['Dirty'] = clean_arr.Diameter[(clean_arr['Diameter'] <= clean_arr['Average']-clean_arr['Std']) | (clean_arr['Diameter'] >= clean_arr['Average']+clean_arr['Std'])]
clean_arr['CAverage'] = clean_arr['Clean'].rolling(window = interval, center = True, min_periods = 1).mean()
clean_arr['Marked'] = clean_arr.Time[clean_arr['Event Flag'] == 1]
#plot diameter array
stflag = 1
if stflag == 1:
plt.plot(clean_arr['Time'],clean_arr['Clean'],'kx')
plt.plot(clean_arr['Time'],clean_arr['CAverage'],'b-')
plt.plot(clean_arr['Marked'], clean_arr['Event Flag'], 'go')
else:
plt.plot(clean_arr['Time'],clean_arr['Clean'],'kx')
plt.plot(clean_arr['Time'],clean_arr['CAverage'],'b-')
plt.plot(clean_arr['Marked'], clean_arr['Event Flag'], 'go')
plt.plot(clean_arr['Time'],clean_arr['Average']-clean_arr['Std'],'r--')
plt.plot(clean_arr['Time'],clean_arr['Average']+clean_arr['Std'],'r--')
plt.plot(clean_arr['Time'],clean_arr['Dirty'],'rx')
plt.xlabel('Time (s)')
plt.ylabel('Fiber Diameter (um)')
plt.title('%s'%file)
plt.show()
| nilq/baby-python | python |
# Copyright (c) 2022 Aiven, Helsinki, Finland. https://aiven.io/
import sys
from unittest import mock
import pytest
from pghoard import postgres_command
def test_restore_command_error():
with mock.patch("pghoard.postgres_command.http_request", return_value=500):
with pytest.raises(postgres_command.PGCError, match="Restore failed with HTTP status 500"):
postgres_command.restore_command("foo", "123", "/tmp/xxx")
def test_postgres_command_archive_error():
args = ["postgres_command", "--site", "foo", "--xlog", "bar", "--mode", "archive"]
with mock.patch.object(sys, "argv", args):
with mock.patch("pghoard.postgres_command.archive_command", side_effect=SystemExit):
assert postgres_command.main() == postgres_command.EXIT_UNEXPECTED
def test_postgres_command_restore_error():
args = ["postgres_command", "--site", "foo", "--xlog", "bar", "--mode", "restore"]
with mock.patch.object(sys, "argv", args):
with mock.patch("pghoard.postgres_command.restore_command", side_effect=SystemExit):
assert postgres_command.main() == postgres_command.EXIT_ABORT
def test_postgres_command_archive_pgcerror():
args = ["postgres_command", "--site", "foo", "--xlog", "bar", "--mode", "archive"]
with mock.patch.object(sys, "argv", args):
with mock.patch(
"pghoard.postgres_command.archive_command", side_effect=postgres_command.PGCError(message="howdy", exit_code=42)
):
assert postgres_command.main() == 42
| nilq/baby-python | python |
# This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Loader implementation for datasets that are given in Json format."""
from typing import Any, Dict, IO, List
import json
from refdata.base import FormatDescriptor
from refdata.dataset.consumer import DataConsumer
from refdata.dataset.loader import DatasetLoader
class JsonLoader(DatasetLoader):
"""Dataset loader for Json files. The dataset is assumed to be a list of
dictionaries, where each dictionary represents one row in the dataset.
This list of dictionary may be contained in another dictionary. In this
case the target path in the format settings references the list element.
For each column, the column identifier from the dataset schema is
expected to be the query path to extract the respective cell value from
a dictionary representing a dataset row. This default behavior can be
overriden by including an object {'id': 'column-id', 'path': 'query path'}
for that column in the 'sources' element of the format settings.
The Json loader considers the following settings:
- target (string): Path to the list element containing the data row
dictionaries (default='').
- sources (list): List of {'id', 'path'}-pairs defining the query path
extract cell values for individual columns.
"""
def __init__(self, parameters: FormatDescriptor):
"""Initialize the format settings.
Parameters
----------
parameters: refdata.base.FormatDescriptor
Dataset format specification.
"""
# Set the arget query to extract the dataset rows from the document.
self.target = JQuery(parameters.get('target', ''))
# Create mapping of column identifier to their source path for the
# columns that have a source path that is different from thier
# identifier. Columns fow which no entry exists in the 'sources' list
# the source path is expected to be the column identifier.
self.source_map = {s['id']: s['path'] for s in parameters.get('sources', dict())}
def read(self, file: IO, columns: List[str], consumer: DataConsumer) -> DataConsumer:
"""Read dataset rows from a given file handle.
Assumes that the file contains a Json object. This method first extracts
the list of dataset row objects from the Json object in the file. It
then creates a dataset row from each object based on the source path for
each column in the given column list.
If no source path was list of objects identfied by the data target path that
was defined in the dataset format.
Parameters
----------
file: file object
Open file object.
columns: list of string
Column identifier defining the content and the schema of the
returned data.
consumer: refdata.dataset.consumer.DataConsumer
Consumer for data rows that are being read.
Returns
-------
list of list
"""
# Create the list of source queries for each column in the resulting
# dataset rows. Use the column to source mapping that was created from
# the format parameters when the object was instantiated. By default,
# the column identifier is used as the query path.
sources = list()
for col in columns:
sources.append(JQuery(self.source_map.get(col, col)))
for doc in self.target.find(json.load(file)):
consumer.consume([q.find(doc) for q in sources])
return consumer
# -- Helper Functions ---------------------------------------------------------
class JQuery:
"""Helper class to evaluate path expressions on nested dictionaries."""
def __init__(self, path: str):
"""Initialize the query path. The path is a string with individual
path components separated by '/'.
Parameters
----------
query: string
Query path expression.
"""
# Remove trailing '/' from the path.
while path.endswith('/'):
path = path[:-1]
# Ensure that the query path is an empty list if the path is empty.
self.path = path.split('/') if path else []
def find(self, doc: Dict[str, Any]) -> Any:
"""Get the element at the query path in the given nested dictionary.
Returns None if the query path does not identify an element in the
given dictionary.
Parameters
----------
doc: dict
Nested dictionary object.
Returns
-------
any
"""
# Keep track of the depth of the (successfully) evaluated part of the
# query path.
depth = 0
while depth < len(self.path) and isinstance(doc, dict):
doc = doc.get(self.path[depth]) # type: ignore
depth += 1
# The result depends on whether we reaced the end of the path (depth
# equals length of the query path) or encountered an element in the
# query path that was not matched (depth is less than the length of
# the query path). In the latter case the result is always None.
return doc if depth == len(self.path) else None
| nilq/baby-python | python |
from django.test import TestCase
from whats_fresh.whats_fresh_api.models import Video
from django.contrib.gis.db import models
class VideoTestCase(TestCase):
def setUp(self):
self.expected_fields = {
'video': models.URLField,
'caption': models.TextField,
'name': models.TextField,
'created': models.DateTimeField,
'modified': models.DateTimeField,
'story': models.related.RelatedObject,
'id': models.AutoField
}
self.optional_fields = {
'caption'
}
def test_fields_exist(self):
model = models.get_model('whats_fresh_api', 'Video')
for field, field_type in self.expected_fields.items():
self.assertEqual(
field_type, type(model._meta.get_field_by_name(field)[0]))
def test_no_additional_fields(self):
fields = Video._meta.get_all_field_names()
self.assertTrue(sorted(fields) == sorted(self.expected_fields.keys()))
def test_created_modified_fields(self):
self.assertTrue(Video._meta.get_field('modified').auto_now)
self.assertTrue(Video._meta.get_field('created').auto_now_add)
def test_optional_fields(self):
for field in self.optional_fields:
self.assertEqual(
Video._meta.get_field_by_name(field)[0].blank, True)
| nilq/baby-python | python |
import os
import json
import torch
import numpy as np
from PIL import Image
import copy
import os
import logging
from detectron2.data import detection_utils as utils
from ..registry import DATASOURCES
from .load_coco import load_coco_json
@DATASOURCES.register_module
class COCO_BOXES(object):
def __init__(self, root, json_file, max_box_num, image_format='RGB', *args, **kwargs):
if json_file.endswith('instances_train2017.json'):
logging.critical('Using ground-truth for pre-training, please use selective search result!')
self.data_dicts = load_coco_json(json_file, root)
self.image_format = image_format
self.max_box_num = max_box_num
def get_length(self):
return len(self.data_dicts)
def __len__(self):
return self.get_length()
def get_sample(self, idx):
data_dict = self.data_dicts[idx]
dataset_dict = copy.deepcopy(data_dict) # it will be modified by code below
annos = [obj for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0]
instances = utils.annotations_to_instances(annos, (dataset_dict['height'], dataset_dict['width']),)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict | nilq/baby-python | python |
from __future__ import absolute_import, division, print_function
import os
from pdfx import cli
# import pytest
curdir = os.path.dirname(os.path.realpath(__file__))
def test_cli():
parser = cli.create_parser()
parsed = parser.parse_args(['-j', 'pdfs/valid.pdf'])
assert parsed.json
assert parsed.pdf == "pdfs/valid.pdf"
| nilq/baby-python | python |
#!/usr/bin/env python
#
# This script is experimental.
#
# Liang Wang @ Dept. Computer Science, University of Helsinki
# 2011.09.21
#
import os, sys
import socket
import pickle
import random
import Queue
import time
import threading
import resource
from khash import *
from bencode import bencode, bdecode
from common import *
MYPORT = 6882 # The port used for communication
ACTIVE_THRESHOLD = 2000 # The minimum number of nodes in nodePool
REFRESH_LIMIT = 60 # The time interval to refresh a node
class Maintainer(object):
def __init__(self, id = None):
self.id = id if id else newID() # Maintainer's ID
self.noisy = True # Output extra info or not
self.krpc = KRPC() # Simple KRPC translator
self.nodePool = {} # Dict of the nodes collected
self.nodePool_lock = threading.Lock()
self.nodeQueue = Queue.Queue(0) # Queue of the nodes to scan
self.startTime = time.time() # Time start the crawler
self.respondent = 0 # Number of respondent
self.isock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.isock.bind( ("",MYPORT) )
self.isock_lock = threading.Lock()
pass
def addNode(self, node, ip):
self.nodePool_lock.acquire()
try:
now = time.time()
# Generate Ip pool
IPs = [ x["host"] for x in self.nodePool.values() ]
if node["id"] not in self.nodePool:
if ip not in IPs:
node["timestamp"] = now
node["lastupdate"] = now - REFRESH_LIMIT
self.nodePool[node['id']] = node
else:
node = self.nodePool[node['id']]
# only update the lastupdate if the message is from node itself
if ip==node["host"]:
node["lastupdate"] = now
self.nodePool[node['id']] = node
except Exception, err:
print "Exception:Maintainer.addNode()", err
self.nodePool_lock.release()
pass
def bootstrap(self):
"""Whenever the number of nodes in nodePool drops below the threshold,
use this function to get more nodes."""
self.nodePool_lock.acquire()
try:
if len(self.nodePool) == 0:
self.findNode("router.bittorrent.com", 6881, self.id)
else:
for n in self.nodePool.values():
self.findNode(n["host"], n["port"], newID(), n["id"])
except Exception, err:
print "Exception:Maintainer.bootstrap()", err
self.nodePool_lock.release()
pass
def findNode(self, host, port, target, rID = None):
msg = self.krpc.encodeReq("find_node", {"id":self.id, "target":target})
self.sendMsg(msg, (host,port))
pass
def ping(self, host, port):
msg = self.krpc.encodeReq("ping", {"id":self.id})
self.sendMsg(msg, (host,port))
pass
def pingNodes(self, nodes):
for node in nodes:
try:
self.ping(node['host'], node['port'])
except Exception, err:
print "Exception:Maintainer.pingNodes():", err
pass
def processNodes(self, nodes):
timestamp = time.time()
for node in nodes:
id = node["id"]
if id not in self.nodePool:
if id != self.id:
self.nodeQueue.put(node)
self.addNode(node, node["host"])
pass
def scan_nodePool(self):
"""Kick out the dead nodes"""
print "scan the nodePool"
now = time.time()
self.nodePool_lock.acquire()
for n in self.nodePool.values():
try:
t = now - n["lastupdate"]
if t >= REFRESH_LIMIT and t < 2*REFRESH_LIMIT:
self.ping(n["host"], n["port"])
elif t >= 2*REFRESH_LIMIT:
self.nodePool.pop(n["id"])
print "kick out %s:%i" % (n["host"], n["port"])
except Exception, err:
print "Exception:Maintainer.scan_nodePool():", err, n
self.nodePool_lock.release()
pass
def sendMsg(self, msg, addr):
self.isock_lock.acquire()
try:
self.isock.sendto(msg, addr)
except:
pass
self.isock_lock.release()
pass
def serialize(self):
tmp = []
obj = []
self.nodePool_lock.acquire()
try:
# Choose those stable nodes to cache
tmp = self.nodePool.values()
tmp.sort(key=lambda x: x["timestamp"])
tmp = tmp[:500]
tmp = random.sample(tmp, min(100,len(tmp)))
# Cache the nodes
obj = []
for v in tmp:
try:
n = {}
n["id"] = v["id"]
n["host"] = v["host"]
n["port"] = v["port"]
n["timestamp"] = v["timestamp"]
n["lastupdate"] = v["lastupdate"]
obj.append(n)
except Exception, err:
print "Exception:Maintainer.serialize():loop:", err
except Exception, err:
print "Exception:Maintainer.serialize():", err
self.nodePool_lock.release()
print "longest", time.time()-tmp[0]["timestamp"]
f = open("nodescache", "w")
pickle.Pickler(f).dump(obj)
f.close()
pass
def start_listener(self):
while True:
try:
msg, addr = self.isock.recvfrom(PACKET_LEN)
msgTID, msgType, msgContent = self.krpc.decodeRsp(msg)
if msgType==RSP and "nodes" in msgContent:
if len(self.nodePool) < 2*ACTIVE_THRESHOLD:
self.processNodes(unpackNodes(msgContent["nodes"]))
if msgType==RSP and "id" in msgContent:
id = msgContent["id"]
if id != self.id:
if id in self.nodePool or len(self.nodePool) < 2*ACTIVE_THRESHOLD:
self.addNode( {"id":id, "host":addr[0], "port":addr[1]}, addr[0] )
self.respondent += 1
except Exception, err:
print "Exception:Maintainer.listener():", err
pass
def start_sender(self):
while True:
try:
# Check are there any nodes in the queue waiting for processing
node = self.nodeQueue.get(True)
if node and len(self.nodePool)<1.5*ACTIVE_THRESHOLD:
self.findNode(node["host"], node["port"], newID(), node["id"])
except Exception, err:
print "Exception:Maintainer.start_sender()", err
pass
def start_service(self):
t1 = threading.Thread(target=self.start_listener, args=())
t1.daemon = True
t1.start()
t2 = threading.Thread(target=self.start_sender, args=())
t2.daemon = True
t2.start()
while True:
try:
now = time.time()
# Should we request more nodes?
if int(now)%10==0 and len(self.nodePool)<ACTIVE_THRESHOLD:
self.bootstrap()
# Scan nodePool, kick out the dead node
if int(now)%15==0:
self.scan_nodePool()
# Cache the nodes to file
if int(now)%300==0:
self.serialize()
self.info()
time.sleep(1)
except KeyboardInterrupt:
break
except Exception, err:
print "Exception:Maintainer.start_service()", err
pass
def info(self):
print "[NodeSet]:%i\t\t[Queue]:%i\t\t[Response]:%i" % \
(len(self.nodePool), self.nodeQueue.qsize(), self.respondent)
pass
def convergeSpeed(self,node):
if (distance(self.id, node["id"])>>148)==0:
self.tn += 1
if (time.time()-self.tntold) >= 5:
self.tnspeed = int((self.tn-self.tnold)/(time.time()-self.tntold))
self.tnold = self.tn
self.tntold = time.time()
pass
if __name__=="__main__":
now = time.time()
maintainer = Maintainer()
maintainer.start_service()
print "%.2f minutes" % ((time.time() - now)/60.0)
pass
| nilq/baby-python | python |
import itertools
import json
import logging
import os
import subprocess
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
import click
import pip_api
import requests
from cachecontrol import CacheControl
# from pipdownload.settings import SETTINGS_FILE
from pipdownload import logger, settings
from pipdownload.utils import (
TempDirectory,
download as normal_download,
get_file_links,
mkurl_pypi_url,
quiet_download,
resolve_package_file,
)
from tzlocal import get_localzone
sess = requests.Session()
session = CacheControl(sess)
@click.command()
@click.argument("packages", nargs=-1)
@click.option(
"-i",
"--index-url",
"index_url",
default="https://pypi.org/simple",
type=click.STRING,
help="Pypi index.",
)
@click.option(
"-r",
"--requirement",
"requirement_file",
type=click.Path(exists=True, file_okay=True, resolve_path=True),
help="Requirements File.",
)
@click.option(
"-d",
"--dest",
"dest_dir",
type=click.Path(exists=False, file_okay=False, writable=True, resolve_path=True),
help="Destination directory.",
)
@click.option(
"-s",
"--suffix",
"whl_suffixes",
type=click.STRING,
multiple=True,
hidden=True,
help="Suffix of whl packages except `none-any` `tar.gz` `zip`.\n"
'Deprecated, Using "-p/--platform-tag instead!"',
)
@click.option(
"-p",
"--platform-tag",
"platform_tags",
type=click.STRING,
multiple=True,
help="Suffix of whl packages except 'none-any', like 'win_amd64', 'manylinux1_x86_64', 'linux_i386' "
"and so on. It can be specified multiple times. This is an option to replace option 'suffix'. "
"You can even specify 'manylinux' to download packages contain 'manylinux1_x86_64', "
"'manylinux2010_x84_64', 'manylinux2014_x86_64'.",
)
@click.option(
"-py",
"--python-version",
"python_versions",
type=click.STRING,
multiple=True,
help="Version of python to be downloaded. More specifically, this is the abi tag of the Python package. "
"It can be specified multiple times. Like: 'cp38', 'cp37', 'cp36', 'cp35', 'cp27' and so on.",
)
@click.option(
"-q",
"--quiet",
is_flag=True,
help="When specified, logs and progress bar will not be shown.",
)
@click.option(
"--no-source",
"no_source",
is_flag=True,
help="When specified, the source package of the project that provides wheel package will not be "
"downloaded.",
)
@click.option(
"--show-config",
"show_config",
is_flag=True,
help="When specified, the config file will be created if not exists and the path will be shown later.",
)
@click.option(
"--show-urls",
"show_urls",
is_flag=True,
help=("When specified, all of downloaded urls will be printed as an report list, with library name before them. " +
"For use in other tools for checking the libraries."),
)
def pipdownload(
packages,
index_url,
requirement_file,
dest_dir,
whl_suffixes,
platform_tags,
python_versions,
quiet,
no_source,
show_config,
show_urls
):
"""
pip-download is a tool which can be used to download python projects and their dependencies listed on
pypi's `download files` page. It can be used to download Python packages across system platforms and
Python versions.
"""
if show_config:
if not Path(settings.SETTINGS_FILE).exists():
Path(settings.SETTINGS_FILE).parent.mkdir(parents=True, exist_ok=True)
# Path(SETTINGS_FILE).touch()
with open(settings.SETTINGS_FILE, "w", encoding="utf8") as f:
json.dump({}, f)
click.echo(f"The config file is {settings.SETTINGS_FILE}.")
sys.exit(0)
if Path(settings.SETTINGS_FILE).exists():
with open(settings.SETTINGS_FILE, "r") as f:
try:
settings_dict = json.loads(f.read(), object_pairs_hook=OrderedDict)
except json.decoder.JSONDecodeError:
logger.error(
f"The config file {settings.SETTINGS_FILE} is not correct, it should be a json object."
)
sys.exit(-2)
if not python_versions:
python_versions = settings_dict.get("python-versions", None)
if python_versions:
click.echo(f"Using `python-versions` in config file.")
if not (platform_tags or whl_suffixes):
platform_tags = settings_dict.get("platform-tags", None)
if platform_tags:
click.echo(f"Using `platform-tags` in config file.")
tz = get_localzone()
if tz.zone in ["Asia/Shanghai", "Asia/Chongqing"]:
index_url = "https://mirrors.aliyun.com/pypi/simple/"
if whl_suffixes:
warnings.warn(
"Option '-s/--suffix' has been deprecated. Please use '-p/--platform-tag' instead."
)
platform_tags = whl_suffixes
if quiet:
logger.setLevel(logging.ERROR)
download = quiet_download
else:
download = normal_download
url_list = []
if not dest_dir:
dest_dir = os.getcwd()
else:
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# dest_dir = os.path.abspath(dest_dir)
if requirement_file:
packages_extra_dict = pip_api.parse_requirements(requirement_file)
packages_extra = {str(value) for value in packages_extra_dict.values()}
else:
packages_extra = set()
for package in itertools.chain(packages_extra, packages):
with TempDirectory(delete=True) as directory:
logger.info(
"We are using pip download command to download package %s" % package
)
logger.info("-" * 50)
try:
command = [
sys.executable,
"-m",
"pip",
"download",
"-i",
index_url,
"--dest",
directory.path,
package,
]
if quiet:
command.extend(["--progress-bar", "off", "-qqq"])
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
logger.error(
"Sorry, we can not use pip download to download the package %s,"
" and Exception is below" % package
)
logger.error(e)
raise
file_names = os.listdir(directory.path)
for file_name in file_names:
python_package = resolve_package_file(file_name)
url_list.append(python_package)
if python_package.name is None:
logger.warning(
"Can not resolve a package's name and version from a downloaded package. You shuold "
"create an issue maybe."
)
continue
url = mkurl_pypi_url(index_url, python_package.name)
try:
r = session.get(url)
for file in get_file_links(r.text, url, python_package):
url_list.append(file)
if "none-any" in file:
if "py2.py3" in file_name or not python_versions:
download(file, dest_dir)
elif [1 for x in python_versions if "-"+x+"-" in file]:
download(file, dest_dir)
continue
if ".tar.gz" in file or ".zip" in file:
if not no_source:
download(file, dest_dir)
continue
eligible = True
if platform_tags:
for tag in platform_tags:
if tag in file:
eligible = True
break
else:
eligible = False
if not eligible:
continue
if python_versions:
for version in python_versions:
if version in file:
eligible = True
break
else:
eligible = False
if eligible:
download(file, dest_dir)
except ConnectionError as e:
logger.error(
"Can not get information about package %s, and the Exception is below.",
python_package.name,
)
logger.error(e)
raise
logger.info("All packages have been downloaded successfully!")
if show_urls:
logger.setLevel(logging.INFO)
logger.error("List of files downloaded :")
for entry in url_list:
logger.info(entry)
return url_list
if __name__ == "__main__":
pipdownload()
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2017 Oliver Ainsworth
# Modifications (remove py2) by (C) Stefan Tapper 2021
import enum
import itertools
import rf2settings.valve
from rf2settings.valve import messages, util
REGION_US_EAST_COAST = 0x00
REGION_US_WEST_COAST = 0x01
REGION_SOUTH_AMERICA = 0x02
REGION_EUROPE = 0x03
REGION_ASIA = 0x04
REGION_AUSTRALIA = 0x05
REGION_MIDDLE_EAST = 0x06
REGION_AFRICA = 0x07
REGION_REST = 0xFF
MASTER_SERVER_ADDR = ("hl2master.steampowered.com", 27011)
class Duplicates(enum.Enum):
"""Behaviour for duplicate addresses.
These values are intended to be used with :meth:`MasterServerQuerier.find`
to control how duplicate addresses returned by the master server are
treated.
:cvar KEEP: All addresses are returned, even duplicates.
:cvar SKIP: Skip duplicate addresses.
:cvar STOP: Stop returning addresses when a duplicate is encountered.
"""
KEEP = "keep"
SKIP = "skip"
STOP = "stop"
class MasterServerQuerier(rf2settings.valve.BaseQuerier):
"""Implements the Source master server query protocol
https://developer.valvesoftware.com/wiki/Master_Server_Query_Protocol
.. note::
Instantiating this class creates a socket. Be sure to close the
querier once finished with it. See :class:`rf2settings.valve.BaseQuerier`.
"""
def __init__(self, address=MASTER_SERVER_ADDR, timeout=10.0):
super(MasterServerQuerier, self).__init__(address, timeout)
def __iter__(self):
"""An unfitlered iterator of all Source servers
This will issue a request for an unfiltered set of server addresses
for each region. Addresses are received in batches but returning
a completely unfiltered set will still take a long time and be
prone to timeouts.
.. note::
If a request times out then the iterator will terminate early.
Previous versions would propagate a :exc:`NoResponseError`.
See :meth:`.find` for making filtered requests.
"""
return self.find(region="all")
def _query(self, region, filter_string):
"""Issue a request to the master server
Returns a generator which yields ``(host, port)`` addresses as
returned by the master server.
Addresses are returned in batches therefore multiple requests may be
dispatched. Because of this any of these requests may result in a
:exc:`NotResponseError` raised. In such circumstances the iterator
will exit early. Otherwise the iteration continues until the final
address is reached which is indicated by the master server returning
a 0.0.0.0:0 address.
.. note::
The terminating 0.0.0.0:0 is not yielded by the iterator.
``region`` should be a valid numeric region identifier and
``filter_string`` should be a formatted filter string as described
on the Valve develper wiki:
https://developer.valvesoftware.com/wiki/Master_Server_Query_Protocol#Filter
"""
last_addr = "0.0.0.0:0"
first_request = True
while first_request or last_addr != "0.0.0.0:0":
first_request = False
self.request(messages.MasterServerRequest(
region=region, address=last_addr, filter=filter_string))
try:
raw_response = self.get_response()
except rf2settings.valve.NoResponseError:
return
else:
response = messages.MasterServerResponse.decode(raw_response)
for address in response["addresses"]:
last_addr = "{}:{}".format(
address["host"], address["port"])
if not address.is_null:
yield address["host"], address["port"]
def _deduplicate(self, method, query):
"""Deduplicate addresses in a :meth:`._query`.
The given ``method`` should be a :class:`Duplicates` object. The
``query`` is an iterator as returned by :meth:`._query`.
"""
seen = set()
if method is Duplicates.KEEP:
for address in query:
yield address
else:
for address in query:
if address in seen:
if method is Duplicates.SKIP:
continue
elif method is Duplicates.STOP:
break
yield address
seen.add(address)
def _map_region(self, region):
"""Convert string to numeric region identifier
If given a non-string then a check is performed to ensure it is a
valid region identifier. If it's not, ValueError is raised.
Returns a list of numeric region identifiers.
"""
if isinstance(region, str):
try:
regions = {
"na-east": [REGION_US_EAST_COAST],
"na-west": [REGION_US_WEST_COAST],
"na": [REGION_US_EAST_COAST, REGION_US_WEST_COAST],
"sa": [REGION_SOUTH_AMERICA],
"eu": [REGION_EUROPE],
"as": [REGION_ASIA, REGION_MIDDLE_EAST],
"oc": [REGION_AUSTRALIA],
"af": [REGION_AFRICA],
"rest": [REGION_REST],
"all": [REGION_US_EAST_COAST,
REGION_US_WEST_COAST,
REGION_SOUTH_AMERICA,
REGION_EUROPE,
REGION_ASIA,
REGION_AUSTRALIA,
REGION_MIDDLE_EAST,
REGION_AFRICA,
REGION_REST],
}[region.lower()]
except KeyError:
raise ValueError(
"Invalid region identifer {!r}".format(region))
else:
# Just assume it's an integer identifier, we'll validate below
regions = [region]
for reg in regions:
if reg not in {REGION_US_EAST_COAST,
REGION_US_WEST_COAST,
REGION_SOUTH_AMERICA,
REGION_EUROPE,
REGION_ASIA,
REGION_AUSTRALIA,
REGION_MIDDLE_EAST,
REGION_AFRICA,
REGION_REST}:
raise ValueError("Invalid region identifier {!r}".format(reg))
return regions
def find(self, region="all", duplicates=Duplicates.SKIP, **filters):
"""Find servers for a particular region and set of filtering rules
This returns an iterator which yields ``(host, port)`` server
addresses from the master server.
``region`` spcifies what regions to restrict the search to. It can
either be a ``REGION_`` constant or a string identifying the region.
Alternately a list of the strings or ``REGION_`` constants can be
used for specifying multiple regions.
The following region identification strings are supported:
+---------+-----------------------------------------+
| String | Region(s) |
+=========+=========================================+
| na-east | East North America |
+---------+-----------------------------------------+
| na-west | West North America |
+---------+-----------------------------------------+
| na | East North American, West North America |
+---------+-----------------------------------------+
| sa | South America |
+---------+-----------------------------------------+
| eu | Europe |
+---------+-----------------------------------------+
| as | Asia, the Middle East |
+---------+-----------------------------------------+
| oc | Oceania/Australia |
+---------+-----------------------------------------+
| af | Africa |
+---------+-----------------------------------------+
| rest | Unclassified servers |
+---------+-----------------------------------------+
| all | All of the above |
+---------+-----------------------------------------+
.. note::
"``rest``" corresponds to all servers that don't fit with any
other region. What causes a server to be placed in this region
by the master server isn't entirely clear.
The region strings are not case sensitive. Specifying an invalid
region identifier will raise a ValueError.
As well as region-based filtering, alternative filters are supported
which are documented on the Valve developer wiki.
https://developer.valvesoftware.com/wiki/Master_Server_Query_Protocol#Filter
This method accepts keyword arguments which are used for building the
filter string that is sent along with the request to the master server.
Below is a list of all the valid keyword arguments:
+------------+-------------------------------------------------------+
| Filter | Description |
+============+=======================================================+
| type | Server type, e.g. "dedicated". This can be a |
| | ``ServerType`` instance or any value that can be |
| | converted to a ``ServerType``. |
+------------+-------------------------------------------------------+
| secure | Servers using Valve anti-cheat (VAC). This should be |
| | a boolean. |
+------------+-------------------------------------------------------+
| gamedir | A string specifying the mod being ran by the server. |
| | For example: ``tf``, ``cstrike``, ``csgo``, etc.. |
+------------+-------------------------------------------------------+
| map | Which map the server is running. |
+------------+-------------------------------------------------------+
| linux | Servers running on Linux. Boolean. |
+------------+-------------------------------------------------------+
| empty | Servers which are not empty. Boolean. |
+------------+-------------------------------------------------------+
| full | Servers which are full. Boolean. |
+------------+-------------------------------------------------------+
| proxy | SourceTV relays only. Boolean. |
+------------+-------------------------------------------------------+
| napp | Servers not running the game specified by the given |
| | application ID. E.g. ``440`` would exclude all TF2 |
| | servers. |
+------------+-------------------------------------------------------+
| noplayers | Servers that are empty. Boolean |
+------------+-------------------------------------------------------+
| white | Whitelisted servers only. Boolean. |
+------------+-------------------------------------------------------+
| gametype | Server which match *all* the tags given. This should |
| | be set to a list of strings. |
+------------+-------------------------------------------------------+
| gamedata | Servers which match *all* the given hidden tags. |
| | Only applicable for L4D2 servers. |
+------------+-------------------------------------------------------+
| gamedataor | Servers which match *any* of the given hidden tags. |
| | Only applicable to L4D2 servers. |
+------------+-------------------------------------------------------+
.. note::
Your mileage may vary with some of these filters. There's no
real guarantee that the servers returned by the master server will
actually satisfy the filter. Because of this it's advisable to
explicitly check for compliance by querying each server
individually. See :mod:`rf2settings.valve.a2s`.
The master server may return duplicate addresses. By default, these
duplicates are excldued from the iterator returned by this method.
See :class:`Duplicates` for controller this behaviour.
"""
if isinstance(region, (int, str)):
regions = self._map_region(region)
else:
regions = []
for reg in region:
regions.extend(self._map_region(reg))
filter_ = {}
for key, value in filters.items():
if key in {"secure", "linux", "empty",
"full", "proxy", "noplayers", "white"}:
value = int(bool(value))
elif key in {"gametype", "gamedata", "gamedataor"}:
value = [str(elt)
for elt in value if str(elt)]
if not value:
continue
value = ",".join(value)
elif key == "napp":
value = int(value)
elif key == "type":
if not isinstance(value, util.ServerType):
value = util.ServerType(value).char
else:
value = value.char
filter_[key] = str(value)
# Order doesn't actually matter, but it makes testing easier
filter_ = sorted(filter_.items(), key=lambda pair: pair[0])
filter_string = "\\".join([part for pair in filter_ for part in pair])
if filter_string:
filter_string = "\\" + filter_string
queries = []
for region in regions:
queries.append(self._query(region, filter_string))
query = self._deduplicate(
Duplicates(duplicates), itertools.chain.from_iterable(queries))
for address in query:
yield address
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import platform
import re
from setuptools import setup, Extension
python_version = platform.python_version()
system_name = platform.system()
print("build for python{} on {}".format(python_version, system_name))
# Arguments
actrie_dir = ""
alib_dir = ""
def get_root_dir():
return os.path.dirname(os.path.realpath(__file__))
if not actrie_dir:
actrie_dir = get_root_dir()
if not alib_dir:
alib_dir = os.path.join(actrie_dir, 'deps', 'alib')
def build_library():
os.system(os.path.join(actrie_dir, "utils", "build.sh"))
# build_library()
warp_sources = [
os.path.join(actrie_dir, 'actrie', 'src', 'wrap.c')
]
compile_args = []
if system_name == "Windows":
compile_args.append("/utf-8")
else:
compile_args.append("-fno-strict-aliasing")
library_dirs = [
# os.path.join(alib_dir, 'lib'),
os.path.join(actrie_dir, 'lib')
]
libraries = ['actrie', 'alib']
include_dirs = [
os.path.join(alib_dir, 'include'),
os.path.join(actrie_dir, 'include')
]
actrie = Extension('actrie._actrie',
sources=warp_sources,
extra_compile_args=compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries)
kwds = {}
# Read version from bitarray/__init__.py
pat = re.compile(r'__version__\s*=\s*(\S+)', re.M)
data = open(os.path.join(actrie_dir, 'actrie', '__init__.py')).read()
kwds['version'] = eval(pat.search(data).group(1))
setup(name="actrie",
description="Aho-Corasick automation for large-scale multi-pattern matching.",
author="James Yin",
author_email="ywhjames@hotmail.com",
url="https://github.com/ifplusor/actrie",
license="BSD",
packages=['actrie', 'actrie.example'],
ext_modules=[actrie],
classifiers=[
"Programming Language :: C",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Utilities"
],
keywords=["matcher", "trie", "aho-corasick automation", "ac-automation",
"string matching", "string search", "string matcher"],
zip_safe=False,
**kwds)
| nilq/baby-python | python |
# @author kingofthenorth
# @filename problemsearch.py
# @description Assignment 2
# @class CS 550
# @instructor Roch
# @notes N/A
from collections import deque
from basicsearch_lib02.queues import PriorityQueue
from basicsearch_lib02.searchrep import (Node, Problem)
from explored import Explored
def graph_search(problem: Problem, verbose=False, debug=False):
"""graph_search(problem, verbose, debug) - Given a problem representation
(instance of basicsearch_lib02.representation.Problem or derived class),
attempt to solve the problem.
If debug is True, debugging information will be displayed.
if verbose is True, the following information will be displayed:
Number of moves to solution
List of moves and resulting puzzle states
Example:
Solution in 25 moves
Initial state
0 1 2
0 4 8 7
1 5 . 2
2 3 6 1
Move 1 - [0, -1]
0 1 2
0 4 8 7
1 . 5 2
2 3 6 1
Move 2 - [1, 0]
0 1 2
0 4 8 7
1 3 5 2
2 . 6 1
... more moves ...
0 1 2
0 1 3 5
1 4 2 .
2 6 7 8
Move 22 - [-1, 0]
0 1 2
0 1 3 .
1 4 2 5
2 6 7 8
Move 23 - [0, -1]
0 1 2
0 1 . 3
1 4 2 5
2 6 7 8
Move 24 - [1, 0]
0 1 2
0 1 2 3
1 4 . 5
2 6 7 8
If no solution were found (not possible with the puzzles we
are using), we would display:
No solution found
Returns a tuple (path, nodes_explored) where:
path - list of actions to solve the problem or None if no solution was found
nodes_explored - Number of nodes explored (dequeued from frontier)
"""
# Establish frontier set and nodes
frontier = PriorityQueue()
frontier.append(Node(problem, problem.initial))
node = frontier.pop()
popping = True
if node.expand(node.problem)[0].g < 0:
# Depth First Search
frontier = deque()
frontier.append(Node(problem, problem.initial))
elif node.expand(node.problem)[0].h < 2:
# Breadth First Search
popping = False
frontier = deque()
frontier.append(Node(problem, problem.initial))
else:
# Manhattan Search
frontier.append(node)
# Working with the hash
frontier_hash = Explored()
frontier_hash.add(problem.initial.state_tuple())
finished = False
nodes_explored = 0
explored = Explored()
while not finished:
if popping:
node = frontier.pop() # Manhattan and DFS
else:
node = frontier.popleft() # BFS
if debug:
print("Node popped:", str(node))
explored.add(node.state.state_tuple())
nodes_explored += 1
if node.state.solved():
if debug:
print("Solution found!")
solution_path = node.path()
finished = True
if verbose:
print_solution(solution_path)
return solution_path, nodes_explored
else:
for child in node.expand(node.problem):
if not explored.exists(child.state.state_tuple()) and not frontier_hash.exists(
child.state.state_tuple()):
frontier.append(child)
frontier_hash.add(child)
elif debug:
print("Skipping...", child)
pass
finished = len(frontier) == 0
if debug:
print("")
if verbose:
print("No solution found")
return None, nodes_explored
def print_solution(path: tuple):
print("Amount of moves taken: %d" % (len(path) - 1))
print("Initial State...")
print(path[0])
for i in range(1, len(path)):
print("Move %d - %s" % (i, path[i].action))
print(path[i].state)
print("")
| nilq/baby-python | python |
# @name: Katana-DorkScanner
# @repo: https://github.com/adnane-X-tebbaa/Katana
# @author: Adnane-X-tebbaa (AXT)
# Scada-file V2.2
# I used dorks for the most used PLCs
"""
MIT License
Copyright (c) 2020 adnane tebbaa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import os
import time
from googlesearch import search
import sys
from termcolor import colored, cprint
import random
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
from http import cookiejar
class BlockAll(cookiejar.CookiePolicy):
return_ok = set_ok = domain_return_ok = path_return_ok = lambda self, *args, **kwargs: False
netscape = True
rfc2965 = hide_cookie2 = False
NPP = """
) ) )
( ( (
) ) )
(~~~~~~~~~)
| POWER | Katana-ds V1.5.3
| | Find online PLCs
| _._ by AXT (adnane-X-tebbaa)
| / `\
| | N |
| | |~~~~~~~~~~~~~~|
/ | ||~~~~~~~~| |
__/_____|___||__|||___|____|__________________________________________
Note: That will take some time
"""
print (NPP)
TLD = ["com","com.tw","co.in"]
beta = random.choice(TLD)
betax = random.choice(TLD)
print (" ")
print(colored('[+] Searching... ', 'green'))
B = """ intitle:"Rockwell Automation" "Device Name" "Uptime" """
query = B
# ****
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
spinner = spinning_cursor()
for _ in range(100):
sys.stdout.write(next(spinner))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b')
#*****
for gamma in search(query, tld=beta,stop=50, num=10,pause=2):
print(colored ('[+] Found > ' ,'yellow') + (gamma) )
print(colored('[+] 20% done ', 'green'))
B = """ inurl:dtm.html intitle:1747-L551 """
query = B
# ****
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
spinner = spinning_cursor()
for _ in range(100):
sys.stdout.write(next(spinner))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b')
#*****
for gamma in search(query, tld=betax, num=10,stop=50,pause=2):
print(colored ('[+] Found > ' ,'yellow') + (gamma) )
print(colored('[+] 40% done ', 'green' )) # more scada dorks will be added here
from Modes import Scada2
| nilq/baby-python | python |
import sqlite3
from functools import partial
import multiprocessing as mp
def create(args):
p,name,sql = args
db = sqlite3.connect(name)
db.execute(sql)
class mydb:
def __init__(self, w):
self.pool = mp.Pool(w)
def create(self, tab, name_tmpl, parts=[0]):
sql = 'create table if not exists {}'.format(tab)
args = [(p,name_tmpl.format(p),sql) for p in parts]
self.pool.map(create,args)
def insert_iter(self): pass # TODO API
if __name__ == "__main__":
db = mydb(4)
db.create('main(a,b,c)','data/v4_{}.sqlite',[1,2,3,4,5])
| nilq/baby-python | python |
#!/usr/bin/env python
"""
Hacky script for comparing output set to gold set.
Usage:
just run python compare_solution.py -h
"""
import argparse
import fileinput
import sys
import re
def compare_sets(s1, s2):
"""Compare the sets."""
if len(s1) != 0:
# return s1 == s2
return s1 - s2
return False
def read_from_stdin():
"""Collect piped elements in set."""
s1 = set()
for line in fileinput.input():
s1.add(line.strip())
return s1
def order_output(s):
time_step_matches = (re.search("(\d{1,4})\)\.", i) for i in s)
time_steps = ((item, int(m.group(1)))
for m, item in zip(time_step_matches, s) if m)
return [i[0] for i in sorted(time_steps, key=lambda x: x[1])]
def file2set(file_obj):
"""Turn lines in a file into a set."""
return set(line.strip() for line in file_obj)
def read_from_file(file_name):
"""Read set from a file"""
with open(file_name, "r") as f:
return file2set(f)
if __name__ == "__main__":
prs = argparse.ArgumentParser()
prs.add_argument('expected', help="Name of gold standard file")
prs.add_argument(
'ours',
nargs="?",
help="Name of our output. "
"If not given, stdin is used.")
args = prs.parse_args()
expected_set = read_from_file(args.expected)
if args.ours:
our_set = read_from_file(args.ours)
else:
our_set = file2set(sys.stdin)
# print("\ncorrect solution: {}\n".format(compare_sets(test, gold)))
# print("\ndifferences in set 1 and set 2:\n\n {}\n".format(compare_sets(test, gold)))
test_ordered = order_output(our_set - expected_set)
gold_ordered = order_output(expected_set - our_set)
with open("our-output.lp", "w") as f:
f.write("\n".join(test_ordered))
with open("expected-output.lp", "w") as f:
f.write("\n".join(gold_ordered))
| nilq/baby-python | python |
import logging
from airflow.decorators import dag, task
from datetime import datetime, timedelta
from airflow.utils.dates import days_ago
from airflow.providers.amazon.aws.operators.dms_create_task import DmsCreateTaskOperator
from airflow.providers.amazon.aws.operators.dms_start_task import DmsStartTaskOperator
from airflow.providers.amazon.aws.operators.dms_stop_task import DmsStopTaskOperator
from airflow.providers.amazon.aws.operators.dms_delete_task import DmsDeleteTaskOperator
from airflow.providers.amazon.aws.sensors.dms_task import DmsTaskCompletedSensor
default_args = {
'owner': 'crn-data',
"retries": 2,
"retry_delay": timedelta(seconds=30),
}
env = 'dev'
REPLICATION_TASK_ID = 'rds-to-crm-redshift-test'
SOURCE_ENDPOINT_ARN = 'arn:aws:dms:us-east-1:341484775232:endpoint:STD2AIN4MHPTLCYRLKNGYPHDUSQM7SQLGDKZDHY'
TARGET_ENDPOINT_ARN = 'arn:aws:dms:us-east-1:341484775232:endpoint:4L3AIBD3U4PW37TNROXLBCLDRTDPVI5MO2RG2CA'
REPLICATION_INSTANCE_ARN = 'arn:aws:dms:us-east-1:341484775232:rep:JZ6JLH3PSJN4HZK7AXWYZ22YKLGEKWEO7QUE52Q'
TABLE_MAPPINGS = {
"rules": [
{
"rule-type": "transformation",
"rule-id": "1",
"rule-name": "1",
"rule-target": "table",
"object-locator": {
"schema-name": "treat",
"table-name": "points_type"
},
"rule-action": "replace-prefix",
"value": "crn_points_",
"old-value": "points_"
},
{
"rule-type": "selection",
"rule-id": "8",
"rule-name": "8",
"object-locator": {
"schema-name": "treat",
"table-name": "points_type"
},
"rule-action": "include",
"filters": []
}
]
}
# TABLE_MAPPINGS = {
# "rules": [
# {
# "rule-type": "transformation",
# "rule-id": "1",
# "rule-name": "1",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "treat_offer"
# },
# "rule-action": "replace-prefix",
# "value": "crn_treat_",
# "old-value": "treat_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "2",
# "rule-name": "2",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_used"
# },
# "rule-action": "replace-prefix",
# "value": "crn_points_",
# "old-value": "points_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "3",
# "rule-name": "3",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_type"
# },
# "rule-action": "replace-prefix",
# "value": "crn_points_",
# "old-value": "points_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "4",
# "rule-name": "4",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "cust_loyalty_tier"
# },
# "rule-action": "replace-prefix",
# "value": "crn_cust_",
# "old-value": "cust_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "5",
# "rule-name": "5",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "menu_item_xref"
# },
# "rule-action": "replace-prefix",
# "value": "crn_menu_",
# "old-value": "menu_"
# },
# {
# "rule-type": "transformation",
# "rule-id": "6",
# "rule-name": "6",
# "rule-target": "schema",
# "object-locator": {
# "schema-name": "treat"
# },
# "rule-action": "replace-prefix",
# "value": "crm",
# "old-value": "treat"
# },
# {
# "rule-type": "transformation",
# "rule-id": "7",
# "rule-name": "7",
# "rule-target": "schema",
# "object-locator": {
# "schema-name": "crn"
# },
# "rule-action": "replace-prefix",
# "value": "crm",
# "old-value": "crn"
# },
# {
# "rule-type": "selection",
# "rule-id": "8",
# "rule-name": "8",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "treat_offer"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "9",
# "rule-name": "9",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_used"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "10",
# "rule-name": "10",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "points_type"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "11",
# "rule-name": "11",
# "object-locator": {
# "schema-name": "treat",
# "table-name": "cust_loyalty_tier"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "12",
# "rule-name": "12",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "customer_activity"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "selection",
# "rule-id": "13",
# "rule-name": "13",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "menu_item_xref"
# },
# "rule-action": "include",
# "filters": []
# },
# {
# "rule-type": "transformation",
# "rule-id": "14",
# "rule-name": "14",
# "rule-target": "table",
# "object-locator": {
# "schema-name": "crn",
# "table-name": "customer_activity"
# },
# "rule-action": "replace-prefix",
# "value": "crn_customer_",
# "old-value": "customer_"
# }
# ]
# }
redshift_conn_id = f'crm_redshift_{env}'
aws_connection = 'aws-default'
@dag(dag_id='rds_to_redshift',
default_args=default_args,
schedule_interval='0 10 * * *',
start_date=days_ago(1),
catchup=True,
tags=[f'crn-data-{env}'])
def rds_to_redshift():
"""
Copies RTS RDS data to CRN Redshift
"""
# [START howto_dms_operators]
create_task = DmsCreateTaskOperator(
task_id='create_task',
replication_task_id=REPLICATION_TASK_ID,
source_endpoint_arn=SOURCE_ENDPOINT_ARN,
target_endpoint_arn=TARGET_ENDPOINT_ARN,
replication_instance_arn=REPLICATION_INSTANCE_ARN,
table_mappings=TABLE_MAPPINGS,
)
start_task = DmsStartTaskOperator(
task_id='start_task',
replication_task_arn=create_task.output,
)
wait_for_completion = DmsTaskCompletedSensor(
task_id='wait_for_completion',
replication_task_arn=create_task.output,
)
stop_task = DmsStopTaskOperator(
task_id='delete_task',
replication_task_arn=create_task.output,
)
start_task >> wait_for_completion >> stop_task
rds_to_redshift = rds_to_redshift() | nilq/baby-python | python |
import numpy as np
s = '''73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450'''
s = s.replace('\n', '').replace(' ', '')
s = [int(ch) for ch in s]
N = len(s)
K = 13
answer = 0
for i in range(K, N + 1):
p = np.prod(s[i-K:i])
if p > answer:
answer = p
# 23514624000
print(answer) | nilq/baby-python | python |
# step: build the vectorizer for year_month + general, f > 2, ngram = 3
#
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
import numpy as np
import pickle
from sklearn.metrics import classification_report, f1_score
from scipy.sparse import lil_matrix
from imblearn.over_sampling import RandomOverSampler
from multiprocessing import Pool
def data_batch_loader(
data_name, test_time_label, file_type='year',
batch_size=100, mode='train'):
data_path = './data/'+data_name+'/'+data_name+'_'+file_type+'_sample.tsv'
time_labels = sorted(
[
file_name.split('.')[0].split('_')[1].strip()
for file_name in os.listdir('./vects1/' + data_name + '/')
if file_type in file_name]
)
valid_time_label = time_labels[-3]
if not test_time_label:
test_time_label = time_labels[-2] # the latest year
batch_data = {
'data': [], 'label': [], 'time_label': []
}
all_data = []
all_label = []
all_time_label = []
with open(data_path) as datafile:
datafile.readline()
for line in datafile:
infos = line.strip().split('\t')
if mode == 'train' and infos[1] == test_time_label:
continue
if mode == 'test':
if infos[1] != test_time_label:
continue
if mode == 'valid':
if infos[1] != valid_time_label:
continue
all_data.append(infos[0])
all_label.append(infos[2])
all_time_label.append(infos[1])
if mode == 'train': # over sampling
print('\t\tOver Sampling.......')
sampler = RandomOverSampler(random_state=0)
indices = [[item] for item in list(range(len(all_data)))]
indices, all_label = sampler.fit_sample(indices, all_label)
all_data = [all_data[item[0]] for item in indices]
all_time_label = [all_time_label[item[0]] for item in indices]
for item in zip(all_data, all_label, all_time_label):
batch_data['data'].append(item[0])
batch_data['label'].append(item[2])
batch_data['time_label'].append(item[1])
if len(batch_data['data']) >= batch_size:
yield batch_data
batch_data = {
'data': [], 'label': [], 'time_label': [],
}
if len(batch_data['data']) > 0:
yield batch_data
def create_domain_vects(data_name, mode='year'):
data_path = './data/' + data_name + '/' + data_name + '_' + mode + '_sample.tsv'
domain_docs = {'general': []}
time_idx = 1
# load the data for domain data
print('\t\tLoading domain data')
with open(data_path) as datafile:
datafile.readline()
for line in datafile:
infos = line.strip().split('\t')
domain_docs['general'].append(infos[0])
if infos[time_idx] not in domain_docs:
domain_docs[infos[time_idx]] = list()
domain_docs[infos[time_idx]].append(infos[0])
print('\t\tFitting domain data')
for domain_name in domain_docs:
print('\t\t\tWorking on: ' + domain_name)
da_vect = TfidfVectorizer(min_df=2, ngram_range=(1, 3), stop_words='english')
da_vect.fit(domain_docs[domain_name])
pickle.dump(
da_vect,
open('./vects1/' + data_name + '/' + mode + '_' + str(domain_name) + '.pkl', 'wb')
)
return list(domain_docs.keys())
def create_domain_clfs(data_name, test_time_label, file_type='year'):
domains = {file_type: []}
sum_fea_size = 0
fea_size = {file_type: dict()}
# get feature size of each vectorizer:
print('\t\tGet domain information.....')
for file_name in os.listdir('./vects1/' + data_name + '/'):
if file_type not in file_name:
continue
with open('./vects1/' + data_name + '/' + file_name, 'rb') as vect_pkl_f:
vect_pkl = pickle.load(vect_pkl_f)
cur_domain = file_name.split('.')[0].split('_')[1].strip()
sum_fea_size += len(vect_pkl.vocabulary_)
domains[file_type].append(cur_domain)
fea_size[file_type][cur_domain] = len(vect_pkl.vocabulary_)
print('Total feature size: ' + str(sum_fea_size))
# load the time label: year by loop the file names in the vectorizer folder
domains['year'] = sorted(domains['year'], reverse=True) # reverse for set the 'general' in the 1st place
domains['month'] = sorted(domains['month'])
clf = SGDClassifier(
loss='log', penalty='elasticnet', max_iter=2000,
l1_ratio=0.1, n_jobs=-1, tol=0.0001)
# load the data
batch_size = 1000
train_iter = data_batch_loader(
data_name, test_time_label=test_time_label, file_type=file_type)
# load the general vect
general_vect = pickle.load(open('./vects1/' + data_name + '/' + file_type + '_general.pkl', 'rb'))
print('\t\tBacth fit............')
batch_count = 0
for train_batch in train_iter:
if len(np.unique(train_batch['label'])) == 1:
continue
print('Working on batch #' + str(batch_count))
batch_count += 1
# transform the data
train_data = lil_matrix((len(train_batch['data']), sum_fea_size))
train_data[:, :fea_size[file_type]['general']] = general_vect.transform(train_batch['data'])
start_idx = fea_size['year']['general']
for domain_name in domains[file_type]:
if domain_name == 'general':
continue
with open('./vects1/' + data_name + '/' + file_type + '_' + str(domain_name) + '.pkl', 'rb') as vect_pkl_f:
vect_pkl = pickle.load(vect_pkl_f)
transformed_data = vect_pkl.transform(train_batch['data'])
for label_idx in range(len(train_batch['time_label'])):
if train_batch['time_label'][label_idx] == domain_name:
train_data[label_idx, start_idx:start_idx + fea_size[file_type][domain_name]] = transformed_data[
label_idx, :]
start_idx += fea_size[file_type][domain_name] # update the start index
# partial training
train_data = train_data.tocsr()
clf.partial_fit(train_data, train_batch['label'], classes=['0', '1'])
# save the clf
print('\t\tSaving classifier............')
with open('./clfs1/' + data_name + '_' + file_type + '.pkl', 'wb') as clf_file:
pickle.dump(
clf,
clf_file
)
return clf
def run_exp(data_name, file_type, create_vects=False, create_clfs=False):
print('Working on: ' + data_name + '..............................')
if not os.path.exists('./vects1/' + data_name):
os.mkdir('./vects1/' + data_name)
if create_vects:
print('\tCreating vects.........')
domain_list = create_domain_vects(data_name, mode=file_type)
print(domain_list)
print('Creating logistic regression classifier------------')
if create_clfs:
clf = create_domain_clfs(data_name)
else:
clf = pickle.load(open('./clfs1/' + data_name + '.pkl', 'rb'))
# only load general vectorizer
gen_vect = pickle.load(open('./vects1/' + data_name + '/year_general.pkl', 'rb'))
fea_size = clf.coef_.shape[1] # feature size
print('Validation.....') # validation choose the 2nd latest year as the validation
lambdas = [1, 10, 100, 200, 300]
best_valid_f1 = 0
best_lambda = 1
for flip_l in lambdas:
valid_iter = data_batch_loader(data_name, mode='valid')
y_valids = []
valid_preds = []
for valid_batch in valid_iter:
for label in valid_batch['label']:
y_valids.append(label)
valid_data = lil_matrix((len(valid_batch['data']), fea_size))
valid_data[:, :len(gen_vect.vocabulary_)] = gen_vect.transform(valid_batch['data'])
if flip_l != 1:
valid_data = valid_data * flip_l
predictions = clf.predict(valid_data)
for label in predictions:
valid_preds.append(label)
tmp_f1 = f1_score(y_true=y_valids, y_pred=valid_preds, average='weighted')
if tmp_f1 > best_valid_f1:
best_valid_f1 = tmp_f1
best_lambda = flip_l
print(data_name + ' lambda: ' + str(best_lambda))
print(data_name + ' valid f1: ' + str(best_valid_f1))
print('Testing .....')
test_iter = data_batch_loader(data_name, mode='test')
y_preds = []
y_truth = []
print('Test by each batch')
for test_batch in test_iter:
for label in test_batch['label']:
y_truth.append(label)
# transform the test data:
test_data = lil_matrix((len(test_batch['data']), fea_size))
test_data[:, :len(gen_vect.vocabulary_)] = gen_vect.transform(test_batch['data'])
# flip lambda
test_data = test_data * best_lambda
# prediction
predictions = clf.predict(test_data)
for label in predictions:
y_preds.append(label)
my_f1 = str(f1_score(y_true=y_truth, y_pred=y_preds, average='weighted'))
my_report = classification_report(y_true=y_truth, y_pred=y_preds)
print(data_name + '----- F1-score: ' + my_f1)
with open('results.txt', 'a') as result_file:
result_file.write('Working on ' + data_name + '--------------------\n')
result_file.write(
'Best valid result: ' + str(best_valid_f1) +
', lambda flip: ' + str(best_lambda) + '\n')
result_file.write('F1: ' + my_f1 + '\n')
result_file.write(my_report)
result_file.write('\n----------------------------------------\n')
if __name__ == '__main__':
data_list = [
'amazon',
'economy',
'vaccine',
'yelp_hotel',
'yelp_rest',
'parties',
]
# multiprocess:
# p = Pool(5)
# p.map(run_exp, 'year')
# p.map(run_exp, 'month')
for file_type in ['year', 'month']:
for data in data_list:
run_exp(data, file_type=file_type, create_vects=False, create_clfs=False)
| nilq/baby-python | python |
# --------------------------------------------------------------------------- #
import os
import filecmp
from arroyo import utils
import pytest
# --------------------------------------------------------------------------- #
# Asymmetric Key Tests
from arroyo.crypto import KeyAlgorithmType, EncodingType
from arroyo.crypto import asymmetric
# --------------------------------------------------------------------------- #
PASSWORD = b'password'
HERE = os.path.dirname(__file__)
# --------------------------------------------------------------------------- #
def get_public_key_filename(key_type, key_encoding):
if not isinstance(key_type, str):
key_type = key_type.value
key_type = key_type.lower()
if not isinstance(key_encoding, str):
key_encoding = key_encoding.value
key_encoding = key_encoding.lower()
key_name = "{}_public_{}.key".format(key_type, key_encoding)
return os.path.join(HERE, "keys", key_name)
def get_private_key_filename(key_type, key_encoding, encrypted=False):
if not isinstance(key_type, str):
key_type = key_type.value
key_type = key_type.lower()
if not isinstance(key_encoding, str):
key_encoding = key_encoding.value
key_encoding = key_encoding.lower()
if encrypted:
key_name = "{}_private_{}_encrypted.key".format(key_type, key_encoding)
else:
key_name = "{}_private_{}.key".format(key_type, key_encoding)
return os.path.join(HERE, "keys", key_name)
class FakeTestKey(asymmetric.AsymmetricKey):
def __eq__(self, other):
pass
def to_bytes(self, *, encoding: EncodingType, fmt: str):
pass
def to_jwk(self):
return b'\x00\x01'
# --------------------------------------------------------------------------- #
@pytest.fixture(scope="session", params=EncodingType)
def public_key_encoding(request):
return request.param
@pytest.fixture(scope="session",
params=[e for e in EncodingType if e != EncodingType.OpenSSH])
def private_key_encoding(request):
return request.param
# --------------------------------------------------------------------------- #
def test_load_public_key_files(key_algorithm, public_key_encoding):
key_file = get_public_key_filename(key_algorithm, public_key_encoding)
key = asymmetric.PublicKey.from_file(key_file)
assert isinstance(key, asymmetric.PublicKey)
assert key.algorithm == key_algorithm
assert key.encoding == public_key_encoding
def test_load_private_key_files(key_algorithm, private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding)
key = asymmetric.PrivateKey.from_file(key_file)
assert isinstance(key, asymmetric.PrivateKey)
assert key.algorithm == key_algorithm
assert key.encoding == private_key_encoding
def test_load_encrypted_private_key_files(key_algorithm, private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding,
encrypted=True)
key = asymmetric.PrivateKey.from_file(key_file, password=PASSWORD)
assert isinstance(key, asymmetric.PrivateKey)
assert key.algorithm == key_algorithm
assert key.encoding == private_key_encoding
def test_load_encrypted_private_key_files_str_pass(key_algorithm,
private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding,
encrypted=True)
key = asymmetric.PrivateKey.from_file(key_file, password=PASSWORD.decode())
assert isinstance(key, asymmetric.PrivateKey)
assert key.algorithm == key_algorithm
assert key.encoding == private_key_encoding
def test_load_encrypted_private_key_files_inv_pass_type(key_algorithm,
private_key_encoding):
key_file = get_private_key_filename(key_algorithm, private_key_encoding,
encrypted=True)
with pytest.raises(TypeError):
asymmetric.PrivateKey.from_file(key_file, password=12345)
def test_unsupported_key_algorithm():
class FakeSubclass(asymmetric.AsymmetricKey):
def to_bytes(self, *, encoding: EncodingType, fmt: str) -> bytes:
pass
def __eq__(self, other):
return True
with pytest.raises(TypeError):
FakeSubclass(key=None)
def test_private_key_bytes():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert isinstance(bytes(key), bytes)
assert bytes(key) == key.to_bytes()
def test_public_key_bytes():
key_file = get_public_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PublicKey.from_file(key_file)
assert isinstance(bytes(key), bytes)
assert bytes(key) == key.to_bytes()
def test_private_key_size():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert isinstance(len(key), int)
assert len(key) == key.size
def test_public_key_size():
key_file = get_public_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PublicKey.from_file(key_file)
assert isinstance(len(key), int)
assert len(key) == key.size
def test_private_key_equality():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key1 = asymmetric.PrivateKey.from_file(key_file)
key2 = asymmetric.PrivateKey.from_file(key_file)
assert key1 is not key2
assert key1 == key2
assert not key1 != key2
assert key1 != 12345
def test_public_key_equality():
key_file = get_public_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
pub_key = asymmetric.PublicKey.from_file(key_file)
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
priv_key = asymmetric.PrivateKey.from_file(key_file)
assert priv_key.public_key is not pub_key
assert priv_key.public_key == pub_key
assert not priv_key.public_key != pub_key
assert pub_key != 12345
# Test the __contains__ Operator
assert pub_key in priv_key
def test_size_in_repr(key_algorithm):
key_file = get_private_key_filename(key_algorithm, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert str(key.size) in repr(key)
def test_algorithm_in_repr(key_algorithm):
key_file = get_private_key_filename(key_algorithm, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
assert str(key_algorithm.value) in repr(key)
def test_set_invalid_encoding():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
with pytest.raises(ValueError):
key.encoding = b'NotValid'
def test_private_key_to_file(key_algorithm, private_key_encoding, empty_file):
key_file = get_private_key_filename(key_algorithm, private_key_encoding)
key = asymmetric.PrivateKey.from_file(key_file)
key.to_file(empty_file)
assert filecmp.cmp(key_file, empty_file)
def test_private_key_to_file_encrypted(key_algorithm, private_key_encoding,
empty_file):
key_file = get_private_key_filename(key_algorithm, private_key_encoding)
key1 = asymmetric.PrivateKey.from_file(key_file)
key1.to_file(empty_file, password=PASSWORD)
key2 = asymmetric.PrivateKey.from_file(empty_file, password=PASSWORD)
assert key1 == key2
@pytest.mark.xfail
def test_public_key_to_file(key_algorithm, public_key_encoding, empty_file):
# XXX: Currently this fails because we are not using sane defaults
# when writing out Public Keys, specifically ECDSA keys.
key_file = get_public_key_filename(key_algorithm, public_key_encoding)
key = asymmetric.PublicKey.from_file(key_file)
key.to_file(empty_file)
assert filecmp.cmp(key_file, empty_file)
def test_rsa_private_key_to_jwk():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
jwk = key.to_jwk()
assert jwk['kty'] == 'RSA'
assert 'n' in jwk
assert 'e' in jwk
assert 'd' in jwk
assert 'p' in jwk
assert 'q' in jwk
assert 'dp' in jwk
assert 'dq' in jwk
assert 'qi' in jwk
def test_dsa_private_key_to_jwk():
"""Test to ensure that attempting to convert a DSA key to a JWK results
in an exception thrown, since DSA keys cannot be represented as JWKs."""
key_file = get_private_key_filename(KeyAlgorithmType.DSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
with pytest.raises(TypeError):
key.to_jwk()
def test_ecdsa_private_key_to_jwk():
key_file = get_private_key_filename(KeyAlgorithmType.ECDSA,
EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
with pytest.raises(NotImplementedError):
key.to_jwk()
def test_rsa_private_key_jwk_thumbprint():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
jwk_thumbprint = key.jwk_thumbprint
assert isinstance(jwk_thumbprint, str)
# Ensure the result can be decoded as JOSE base64 and appears to be a
# SHA256 result
decoded = utils.jose_b64decode(jwk_thumbprint)
assert len(decoded) * 8 == 256
def test_invalid_key_type():
with pytest.raises(TypeError):
FakeTestKey(key=25)
def test_invalid_to_jwk():
key_file = get_private_key_filename(KeyAlgorithmType.RSA, EncodingType.PEM)
key = asymmetric.PrivateKey.from_file(key_file)
new_key = FakeTestKey(key=key._key)
with pytest.raises(TypeError):
new_key.jwk_thumbprint
def test_direct_public_key_creation_as_str(key_algorithm):
key_file = get_public_key_filename(key_algorithm, EncodingType.PEM)
with open(key_file, 'r') as f:
key_data = f.read()
asymmetric.PublicKey(data=key_data)
def test_direct_public_key_invalid_data():
with pytest.raises(TypeError):
asymmetric.PublicKey(data=54321)
def test_direct_private_key_creation_as_str(key_algorithm):
key_file = get_private_key_filename(key_algorithm, EncodingType.PEM)
with open(key_file, 'r') as f:
key_data = f.read()
asymmetric.PrivateKey(data=key_data)
def test_direct_private_key_invalid_data():
with pytest.raises(TypeError):
asymmetric.PrivateKey(data=54321)
def test_invalid_public_key_file(nonempty_file):
with pytest.raises(ValueError):
asymmetric.PublicKey.from_file(nonempty_file)
def test_invalid_private_key_file(nonempty_file):
with pytest.raises(ValueError):
asymmetric.PrivateKey.from_file(nonempty_file)
# --------------------------------------------------------------------------- #
# Key Generation Tests
def test_strong_key_generation(recwarn, key_algorithm):
key = asymmetric.PrivateKey.generate(key_algorithm)
# Ensure that the default parameters generate a "strong" key
# (thus no warnings were raised)
assert len(recwarn) == 0
assert key.algorithm is key_algorithm
def test_weak_rsa_key_generation(recwarn):
key = asymmetric.PrivateKey.generate(KeyAlgorithmType.RSA, size=1024)
# Ensure that a warning was raised since the key size will generate a
# "weak" key
assert len(recwarn) > 0
assert key.algorithm is KeyAlgorithmType.RSA
def test_weak_dsa_key_generation(recwarn):
key = asymmetric.PrivateKey.generate(KeyAlgorithmType.DSA, size=1024)
# Ensure that a warning was raised since the key size will generate a
# "weak" key
assert len(recwarn) > 0
assert key.algorithm is KeyAlgorithmType.DSA
def test_invalid_ecdsa_curve_size():
with pytest.warns(UserWarning) as record:
asymmetric.PrivateKey.generate(KeyAlgorithmType.ECDSA, size=1)
# Ensure that a warning was raised about the key size being too small
# and that it was rounded up.
assert len(record) == 1
assert "Rounding up" in str(record[0].message)
def test_too_large_ecdsa_curve_size():
with pytest.warns(UserWarning) as record:
asymmetric.PrivateKey.generate(KeyAlgorithmType.ECDSA, size=9999999999)
# Ensure that a warning was raised about the key size being too small
# and that it was rounded up.
assert len(record) == 1
assert "Rounding down" in str(record[0].message)
| nilq/baby-python | python |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def do_test_dropout_numpy_p0(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_one_mask = np.ones_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
out = flow._C.dropout(x_tensor, p=0.0)
test_case.assertTrue(np.allclose(out.numpy(), np_x, atol=1e-5, rtol=1e-5))
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_numpy_p1(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_zero_mask = np.zeros_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
out = flow._C.dropout(x_tensor, p=1.0)
test_case.assertTrue(np.allclose(out.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5))
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_numpy_fp16_p0(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
np_x_fp16 = np_x.astype(np.float16)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
np_one_mask = np.ones_like(np_x)
out = flow._C.dropout(x_tensor_fp16, p=0.0)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(np.allclose(out_fp32.numpy(), np_x_fp16, atol=1e-5, rtol=1e-5))
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_numpy_fp16_p1(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
np_zero_mask = np.zeros_like(np_x)
out = flow._C.dropout(x_tensor_fp16, p=1.0)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(
np.allclose(out_fp32.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_p0(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_addend = np.random.randn(*shape).astype(dtype)
np_one_mask = np.ones_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device=device)
DropoutModule = flow.nn.Dropout(p=0.0)
out = DropoutModule(x_tensor, addend_tensor)
test_case.assertTrue(
np.allclose(out.numpy(), np_x + np_addend, atol=1e-5, rtol=1e-5)
)
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_p1(test_case, shape, device, dtype):
np_x = np.random.randn(*shape).astype(dtype)
np_addend = np.random.randn(*shape).astype(dtype)
np_one_mask = np.ones_like(np_x)
np_zero_mask = np.zeros_like(np_x)
x_tensor = flow.tensor(np_x, requires_grad=True, device=device)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device=device)
DropoutModule = flow.nn.Dropout(p=1.0)
out = DropoutModule(x_tensor, addend_tensor)
test_case.assertTrue(np.allclose(out.numpy(), np_addend, atol=1e-5, rtol=1e-5))
out_sum = out.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_fp16_p0(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
np_x_fp16 = np_x.astype(np.float16)
np_addend = np.random.randn(*shape).astype(np.float32)
np_addend_fp16 = np_addend.astype(np.float16)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device="cuda")
addend_tensor_fp16 = flow.cast(addend_tensor, flow.float16)
np_one_mask = np.ones_like(np_x)
DropoutModule = flow.nn.Dropout(p=0.0)
out = DropoutModule(x_tensor_fp16, addend_tensor_fp16)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(
np.allclose(out_fp32.numpy(), np_x_fp16 + np_addend_fp16, atol=1e-5, rtol=1e-5)
)
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def do_test_dropout_addend_numpy_fp16_p1(test_case, shape):
np_x = np.random.randn(*shape).astype(np.float32)
np_addend = np.random.randn(*shape).astype(np.float32)
np_addend_fp16 = np_addend.astype(np.float16)
x_tensor = flow.tensor(np_x, requires_grad=True, device="cuda")
x_tensor_fp16 = flow.cast(x_tensor, flow.float16)
addend_tensor = flow.tensor(np_addend, requires_grad=True, device="cuda")
addend_tensor_fp16 = flow.cast(addend_tensor, flow.float16)
np_zero_mask = np.zeros_like(np_x)
np_one_mask = np.ones_like(np_x)
DropoutModule = flow.nn.Dropout(p=1.0)
out = DropoutModule(x_tensor_fp16, addend_tensor_fp16)
out_fp32 = flow.cast(out, flow.float32)
test_case.assertTrue(
np.allclose(out_fp32.numpy(), np_addend_fp16, atol=1e-5, rtol=1e-5)
)
out_sum = out_fp32.sum()
out_sum.backward()
test_case.assertTrue(
np.allclose(x_tensor.grad.numpy(), np_zero_mask, atol=1e-5, rtol=1e-5)
)
test_case.assertTrue(
np.allclose(addend_tensor.grad.numpy(), np_one_mask, atol=1e-5, rtol=1e-5)
)
def fixed_cpu_seed_dropout_test(test_case):
gen1 = flow.Generator()
gen1.manual_seed(5)
dropped_array1 = np.array(
[
[0.000000, 0.000000, 1.333333],
[1.333333, 0.000000, 1.333333],
[1.333333, 1.333333, 1.333333],
]
).astype(np.float32)
dropout1 = flow.nn.Dropout(p=0.25, generator=gen1)
x = flow.ones((3, 3), dtype=flow.float32)
out1 = dropout1(x)
test_case.assertTrue(
np.allclose(out1.numpy(), dropped_array1, atol=1e-4, rtol=1e-4)
)
gen2 = flow.Generator()
gen2.manual_seed(7)
dropout2 = flow.nn.Dropout(p=0.5, generator=gen2)
dropped_array2 = np.array(
[[0.0, 0.0, 2.0], [0.0, 0.0, 2.0], [2.0, 0.0, 2.0]]
).astype(np.float32)
out2 = dropout2(x)
test_case.assertTrue(
np.allclose(out2.numpy(), dropped_array2, atol=1e-4, rtol=1e-4)
)
def fixed_gpu_seed_dropout_test(test_case):
gen1 = flow.Generator()
gen1.manual_seed(5)
dropped_array1 = np.array(
[[1.2500, 0.0000, 1.2500], [1.2500, 1.2500, 1.2500], [1.2500, 1.2500, 1.2500]]
).astype(np.float32)
dropout1 = flow.nn.Dropout(p=0.2, generator=gen1).to("cuda")
x = flow.ones((3, 3), dtype=flow.float32).to("cuda")
out1 = dropout1(x)
test_case.assertTrue(
np.allclose(out1.numpy(), dropped_array1, atol=1e-4, rtol=1e-4)
)
gen2 = flow.Generator()
gen2.manual_seed(7)
dropout2 = flow.nn.Dropout(p=0.7, generator=gen2).to("cuda")
dropped_array2 = np.array(
[
[3.333333, 3.333333, 0.000000],
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 0.000000],
]
).astype(np.float32)
out2 = dropout2(x)
test_case.assertTrue(
np.allclose(out2.numpy(), dropped_array2, atol=1e-4, rtol=1e-4)
)
@flow.unittest.skip_unless_1n1d()
class TestModule(flow.unittest.TestCase):
def test_dropout_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [do_test_dropout_numpy_p0, do_test_dropout_numpy_p1]
arg_dict["shape"] = [[4, 127, 256], [2, 1024, 1024]]
arg_dict["device"] = ["cuda"]
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
arg_dict["device"] = ["cpu"]
arg_dict["dtype"] = [np.float32, np.float64]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_dropout_fp16_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
do_test_dropout_numpy_fp16_p0,
do_test_dropout_numpy_fp16_p1,
]
arg_dict["shape"] = [[4, 127, 256], [5, 63, 49], [7, 32, 64], [16, 512, 512]]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_dropout_addend_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
do_test_dropout_addend_numpy_p0,
do_test_dropout_addend_numpy_p1,
]
arg_dict["shape"] = [[4, 47, 156], [5, 33, 65], [3, 132, 94], [9, 256, 63]]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["dtype"] = [np.float32, np.float64]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_dropout_addend_fp16_numpy_case(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
do_test_dropout_addend_numpy_fp16_p0,
do_test_dropout_addend_numpy_fp16_p1,
]
arg_dict["shape"] = [[2, 44, 66], [1, 2, 7], [5, 32, 74], [8, 125, 63]]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_cpu_fixed_dropout(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
fixed_cpu_seed_dropout_test,
]
for arg in GenArgList(arg_dict):
arg[0](test_case)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_gpu_fixed_dropout(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
fixed_gpu_seed_dropout_test,
]
for arg in GenArgList(arg_dict):
arg[0](test_case)
@autotest()
def autotest_dropout_p0(test_case):
device = random_device()
x = random_tensor(ndim=random(), dim0=random(1, 8)).to(device)
m = torch.nn.Dropout(p=0, inplace=random_bool())
return m(x)
@autotest()
def autotest_dropout_p1(test_case):
device = random_device()
x = random_tensor(ndim=random(), dim0=random(1, 8)).to(device)
m = torch.nn.Dropout(p=1.0, inplace=random_bool())
return m(x)
@autotest()
def autotest_dropout_eval(test_case):
device = random_device()
x = random_tensor(ndim=random(), dim0=random(1, 8)).to(device)
m = torch.nn.Dropout(p=1.0, inplace=random_bool())
m.eval()
return m(x)
@autotest()
def autotest_0dim_dropout_eval(test_case):
device = random_device()
x = random_tensor(ndim=0).to(device)
m = torch.nn.Dropout(p=1.0, inplace=random_bool())
m.eval()
return m(x)
if __name__ == "__main__":
unittest.main()
| nilq/baby-python | python |
def solution(arrows):
answer = 0
coorL = [[0,0]]
for each in arrows:
if each == 0:
a = [int(coorL[-1][0]), int(coorL[-1][1])+1]
elif each == 1:
a = [int(coorL[-1][0])+1, int(coorL[-1][1])+1]
elif each == 2:
a = [int(coorL[-1][0])+1, int(coorL[-1][1])]
elif each == 3:
a = [int(coorL[-1][0])+1, int(coorL[-1][1])-1]
elif each == 4:
a = [int(coorL[-1][0]), int(coorL[-1][1])-1]
elif each == 5:
a = [int(coorL[-1][0])-1, int(coorL[-1][1])-1]
elif each == 6:
a = [int(coorL[-1][0])-1, int(coorL[-1][1])]
elif each == 7:
a = [int(coorL[-1][0])-1, int(coorL[-1][1])+1]
if a in coorL:
answer += 1
coorL.append(a)
return answer
print(solution([6, 6, 6, 4, 4, 4, 2, 2, 2, 0, 0, 0, 1, 6, 5, 5, 3, 6, 0, 2, 4]))
#ans = 5 | nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.