id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1664274
|
import numpy as np
import tensorflow as tf
import sys
import gc
import time
#from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn
from tf_utils import variable_summaries, _batch_norm
from custom_ops import atrous_pool2d
def weight_variable(shape, name=None):
initial = tf.truncated_normal(shape, stddev=0.1)
w_var = tf.Variable(initial, name=name)
if name != None:
variable_summaries(w_var, name)
return w_var
def bias_variable(shape, name=None):
initial = tf.constant(0.1, shape=shape)
b_var = tf.Variable(initial, name=name)
if name != None:
variable_summaries(b_var, name)
return b_var
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def atrous_conv2d(x, W, rate):
return tf.nn.atrous_conv2d(x, W, rate, padding='SAME')
def max_pool(x, kH):
return tf.nn.max_pool(x, ksize=[1, kH, 1, 1],
strides=[1, 1, 1, 1], padding='SAME')
def atrous_pool(x, kH, dilation_rate):
return atrous_pool2d(x, ksize=[1, kH, 1, 1], rate=dilation_rate, padding="SAME", pooling_type="MAX")
# return tf.nn.pool(x, dilation_rate=[dilation_rate, 1], window_shape=[kH, 1],
# padding='VALID', pooling_type="MAX")
def dilated_convolution_model(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
#dim_reduction = 10
#nkernels = [128, 240, 50]
#hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([1*dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]], "W_conv2"),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]]),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([1*dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
#"conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
#"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, 1*dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
h_conv5 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv4, W["conv5"], 81) + b["conv5"]), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, 1*dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def dilated_convolution_model_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([1*dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([1**dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
#"conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
#"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, 1*dim_reduction, 1, 1], padding='SAME') + b["conv1"]
h_conv1_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv1, is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv2 = atrous_conv2d(h_conv1_norm, W["conv2"], 3) + b["conv2"]
h_conv2_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv2, is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv3 = atrous_conv2d(h_conv2_norm, W["conv3"], 9) + b["conv3"]
h_conv3_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv3, is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
h_conv4 = atrous_conv2d(h_conv3_norm, W["conv4"], 27) + b["conv4"]
h_conv4_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv4, is_training, decay_rate, "conv4-norm")), dropout_keep_prob)
h_conv5 = atrous_conv2d(h_conv4_norm, W["conv5"], 81) + b["conv5"]
h_conv5_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv5, is_training, decay_rate, "conv5-norm")), dropout_keep_prob)
h_conv6 = tf.nn.conv2d_transpose(h_conv5_norm, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, 1*dim_reduction, 1, 1]) + b["conv6"]
h_conv6_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv6, is_training, decay_rate, "conv6-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6_norm), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6_norm]
def dilated_convolution_with_pooling(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv1_pooled = max_pool(h_conv1, filter_height)
h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1_pooled, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
h_conv2_pooled = max_pool(h_conv2, filter_height)
h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2_pooled, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
h_conv4_pooled = max_pool(h_conv4, filter_height)
h_conv5 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"]), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def dilated_convolution_with_pooling_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]
h_conv1_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv1, is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv1_pooled = max_pool(h_conv1_norm, filter_height)
h_conv2 = atrous_conv2d(h_conv1_pooled, W["conv2"], 3) + b["conv2"]
h_conv2_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv2, is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv2_pooled = max_pool(h_conv2_norm, filter_height)
h_conv3 = atrous_conv2d(h_conv2_pooled, W["conv3"], 9) + b["conv3"]
h_conv3_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv3, is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
h_conv4 = atrous_conv2d(h_conv3_norm, W["conv4"], 27) + b["conv4"]
h_conv4_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv4, is_training, decay_rate, "conv4-norm")), dropout_keep_prob)
h_conv4_pooled = max_pool(h_conv4_norm, filter_height)
h_conv5 = atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"]
h_conv5_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv5, is_training, decay_rate, "conv5-norm")), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5_norm, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
h_conv6_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv6, is_training, decay_rate, "conv6-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6_norm), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6_norm]
def dilated_convolution_with_dilated_pooling(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
h_conv4_pooled = atrous_pool(h_conv4, filter_height, 27)
h_conv5 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"]), dropout_keep_prob)
h_conv5_pooled = atrous_pool(h_conv5, filter_height, 81)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5_pooled, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def dilated_convolution_with_dilated_pooling_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"], is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"], is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"], is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"], is_training, decay_rate, "conv4-norm")), dropout_keep_prob)
h_conv4_pooled = atrous_pool(h_conv4, filter_height, 27)
h_conv5 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"], is_training, decay_rate, "conv5-norm")), dropout_keep_prob)
h_conv5_pooled = atrous_pool(h_conv5, filter_height, 81)
h_conv6 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d_transpose(h_conv5_pooled, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"], is_training, decay_rate, "conv6-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def convolution_7_layer_resizing(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
"conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv7"),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv1, W["conv2"]) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv2, W["conv3"]) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv3, W["conv4"]) + b["conv4"]), dropout_keep_prob)
h_conv5 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv4, W["conv5"]) + b["conv5"]), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv5, W["conv6"]) + b["conv6"]), dropout_keep_prob)
h_conv7 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv6, W["conv7"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv7"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv7), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv7]
def convolution_7_layer_resizing_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
"conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv7"),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"], is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(_batch_norm(conv2d(h_conv1, W["conv2"]) + b["conv2"], is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(_batch_norm(conv2d(h_conv2, W["conv3"]) + b["conv3"], is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(_batch_norm(conv2d(h_conv3, W["conv4"]) + b["conv4"], is_training, decay_rate, "conv4-norm")), dropout_keep_prob)
h_conv5 = tf.nn.dropout(tf.nn.relu(_batch_norm(conv2d(h_conv4, W["conv5"]) + b["conv5"], is_training, decay_rate, "conv5-norm")), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(_batch_norm(conv2d(h_conv5, W["conv6"]) + b["conv6"], is_training, decay_rate, "conv6-norm")), dropout_keep_prob)
h_conv7 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d_transpose(h_conv6, W["conv7"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv7"], is_training, decay_rate, "conv7-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv7), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv7]
def convolution_3_layer_resizing(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]], "W_conv2"),
"conv3": weight_variable([dim_reduction, 1, nkernels[2], nkernels[1]], "W_conv3"),
"hid1": weight_variable([nkernels[2], hidden], "W_hid1"),
"hid2": weight_variable([hidden, noutputs], "W_hid2"),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv1, W["conv2"]) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv2, W["conv3"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv3"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv3), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv3]
def convolution_3_layer_resizing_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]], "W_conv2"),
"conv3": weight_variable([dim_reduction, 1, nkernels[2], nkernels[1]], "W_conv3"),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"], is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(_batch_norm(conv2d(h_conv1, W["conv2"]) + b["conv2"], is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d_transpose(h_conv2, W["conv3"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv3"], is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv3), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv3]
def convolution_1_layer(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"hid1": weight_variable([nkernels[0], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(conv2d(embedded_image, W["conv1"]) + b["conv1"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv1), [batch_size*25000, nkernels[0]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv1]
def convolution_3_layer(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]], "W_conv2"),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(conv2d(embedded_image, W["conv1"])+ b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv1, W["conv2"]) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(conv2d(h_conv2, W["conv3"]) + b["conv3"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv3), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv3]
def conv_bi_lstm(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=100, nkernels=[128, 20, 50], hidden=125):
# dim_reduction = 100
# nkernels = [128, 240, 50]
# n_hidden = 20
# hidden = 125
n_hidden = nkernels[1]
embedding_size = 4
fw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
bw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
total_output_size = fw_cell.output_size + bw_cell.output_size
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
#"conv3": weight_variable([dim_reduction, 1, nkernels[2], nkernels[0]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[2], total_output_size], "W_conv3"),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv3": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
print 1, embed.get_shape()
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
print 2, embedded_image.get_shape()
h_conv1 = tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"])
print 3, h_conv1.get_shape()
h_conv1_sq = tf.squeeze(tf.nn.dropout(h_conv1, dropout_keep_prob), squeeze_dims=[2])
print 4, h_conv1_sq.get_shape()
#splitted = tf.unstack(h_conv1_sq, axis=1)
seq_len = h_conv1_sq.get_shape()[1]
print seq_len, batch_size
seq_lens = tf.ones([batch_size], tf.int32)*seq_len
print seq_lens
outputs, _,= tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, h_conv1_sq, dtype=tf.float32, time_major=False, sequence_length=seq_lens)
packed = tf.expand_dims(tf.concat(outputs, 2), 2)
print 5, packed.get_shape()
#concatenated = tf.nn.dropout(tf.concat(2, outputs), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(packed, W["conv3"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv3"]), dropout_keep_prob)
print 6, h_conv3.get_shape()
# add the convolutional layers
#h_conv1 = tf.nn.dropout(tf.nn.relu(conv2d(embedded_image, W["conv1"]) + b["conv1"]), .2)
flattened = tf.reshape(tf.squeeze(h_conv3, squeeze_dims=[2]), [batch_size*25000, nkernels[2]])
print 7, flattened.get_shape()
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
print 8, hid1.get_shape()
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv3]
def ID_block_batchnorm(inp, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels, is_training, decay_rate, block_suffix):
h_conv1 = tf.nn.conv2d(inp, W["conv1"], strides=[1, 2*dim_reduction, 1, 1], padding='SAME') + b["conv1"]
h_conv1_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv1, is_training, decay_rate, "conv1-norm" + block_suffix)), dropout_keep_prob)
h_conv2 = atrous_conv2d(h_conv1_norm, W["conv2"], 3) + b["conv2"]
h_conv2_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv2, is_training, decay_rate, "conv2-norm" + block_suffix)), dropout_keep_prob)
h_conv3 = atrous_conv2d(h_conv2_norm, W["conv3"], 9) + b["conv3"]
h_conv3_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv3, is_training, decay_rate, "conv3-norm" + block_suffix)), dropout_keep_prob)
h_conv4 = atrous_conv2d(h_conv3_norm, W["conv4"], 27) + b["conv4"]
h_conv4_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv4, is_training, decay_rate, "conv4-norm" + block_suffix)), dropout_keep_prob)
h_conv5 = tf.nn.conv2d_transpose(h_conv4_norm, W["conv5"], [batch_size, 25000, 1, nkernels[0]], [1, 2*dim_reduction, 1, 1]) + b["conv5"]
h_conv5_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv5, is_training, decay_rate, "conv5-norm" + block_suffix)), dropout_keep_prob)
return h_conv5_norm
def ID_block(inp, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels):
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(inp, W["conv1"], strides=[1, 2*dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
h_conv5 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv4, W["conv5"], [batch_size, 25000, 1, nkernels[0]], [1, 2*dim_reduction, 1, 1]) + b["conv5"]), dropout_keep_prob)
return h_conv5
def get_block_outputs(block_activations, batch_size, nkernels, dropout_keep_prob, W, b):
flattened = tf.reshape(tf.squeeze(block_activations), [batch_size*25000, nkernels[0]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs
def ID_CNN_model(x, y_, dropout_keep_prob, batch_size, noutputs, is_training=None, decay_rate=None, use_batchnorm=False, dim_reduction=10, nkernels=[50, 240, 80], hidden=125):
# dim_reduction = 10
# nkernels = [50, 240, 80]
# hidden = 125
embedding_size = 4
W = {
"conv0": weight_variable([2*dim_reduction, 1, embedding_size, nkernels[0]], "W_conv0"),
"conv1": weight_variable([2*dim_reduction, 1, nkernels[0], nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]], "W_conv2"),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]]),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([2*dim_reduction, 1, nkernels[0], nkernels[2]], "W_conv5"),
"hid1": weight_variable([nkernels[0], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv0": bias_variable([nkernels[0]]),
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[0]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
h_conv0 = tf.nn.dropout(tf.nn.relu(conv2d(embedded_image, W["conv0"]) + b["conv0"]), dropout_keep_prob)
# add the convolutional layers
if use_batchnorm:
block_1 = ID_block_batchnorm(h_conv0, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels, is_training, decay_rate, "_block1")
block_2 = ID_block_batchnorm(block_1, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels, is_training, decay_rate, "_block2")
block_3 = ID_block_batchnorm(block_2, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels, is_training, decay_rate, "_block3")
else:
block_1 = ID_block(h_conv0, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels)
block_2 = ID_block(block_1, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels)
block_3 = ID_block(block_2, W, b, dim_reduction, dropout_keep_prob, batch_size, nkernels)
block_1_outputs = get_block_outputs(block_1, batch_size, nkernels, dropout_keep_prob, W, b)
block_2_outputs = get_block_outputs(block_2, batch_size, nkernels, dropout_keep_prob, W, b)
block_3_outputs = get_block_outputs(block_3, batch_size, nkernels, dropout_keep_prob, W, b)
# h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, 2*dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
# h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
# h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
# h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
# h_conv5 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv4, W["conv5"], 81) + b["conv5"]), dropout_keep_prob)
# h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, 2*dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
# flattened = tf.reshape(tf.squeeze(block_3), [batch_size*25000, nkernels[0]])
# hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
# outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return [block_1_outputs, block_2_outputs, block_3_outputs], W, b, embed, None
def get_model(model, x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, kw, nkernels, hidden):
if model == "dilated":
return dilated_convolution_model(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "dilated_normed":
return dilated_convolution_model_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, kw, nkernels, hidden)
elif model == "dilated_pooling":
return dilated_convolution_with_pooling(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "dilated_pooling_normed":
return dilated_convolution_with_pooling_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, kw, nkernels, hidden)
elif model == "dilated_dil_pooling":
return dilated_convolution_with_dilated_pooling(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "dilated_dil_pooling_normed":
return dilated_convolution_with_dilated_pooling_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, kw, nkernels, hidden)
elif model == "conv1":
return convolution_1_layer(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "conv3":
return convolution_3_layer(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "conv3_resizing":
return convolution_3_layer_resizing(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "conv3_resizing_normed":
return convolution_3_layer_resizing_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, kw, nkernels, hidden)
elif model == "conv7_resizing":
return convolution_7_layer_resizing(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "conv7_resizing_normed":
return convolution_7_layer_resizing_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, kw, nkernels, hidden)
elif model == "conv_bi_lstm":
return conv_bi_lstm(x, y_, dropout_keep_prob, batch_size, noutputs, kw, nkernels, hidden)
elif model == "id_cnn":
return ID_CNN_model(x, y_, dropout_keep_prob, batch_size, noutputs, None, None, False, kw, nkernels, hidden)
elif model == "id_cnn_normed":
return ID_CNN_model(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, True, kw, nkernels, hidden)
else:
print("Invalid model: " + model)
sys.exit()
return None, None, None, None, None
|
1664295
|
from collections import defaultdict
from datetime import timedelta
from time import time
import numpy as np
from scipy import sparse as sp
from menpo.visualize import bytes_str, print_progress
from .base import gradient_xy, camera_parameters_update
from .hessian import (initialize_hessian_and_JTe, insert_frame_to_H,
insert_frame_to_JTe)
from .jacobian import jacobians
def increment_parameters(images, mm, id_indices, exp_indices, template_camera,
p, qs, cs,
c_f=1, c_l=1, c_id=1, c_exp=1, c_sm=1,
lm_group=None, n_samples=1000,
compute_costs=True):
n_frames = len(images)
n_points = mm.shape_model.template_instance.n_points
n_p = len(id_indices)
n_q = len(exp_indices)
n_c = cs.shape[1] - 2 # sub one for quaternion, one for focal length
print('Precomputing....')
# Rescale shape components to have size:
# n_points x (n_components * n_dims)
# and to be scaled by the relevant standard deviation.
shape_pc = (
mm.shape_model.components.T *
np.sqrt(mm.shape_model.eigenvalues)
).reshape([n_points, -1])
# include std.dev in principal components
shape_pc_lms = shape_pc.reshape([n_points, 3, -1])[mm.model_landmarks_index]
print('Initializing Hessian/JTe for frame...')
H, JTe = initialize_hessian_and_JTe(c_id, c_exp, c_sm, n_p, n_q, n_c, p, qs,
n_frames)
print('H: {} ({})'.format(H.shape, bytes_str(H.nbytes)))
if compute_costs:
costs = defaultdict(list)
for (f, image), c, q in zip(enumerate(print_progress(
images, prefix='Incrementing H/JTe')), cs, qs):
# Form the overall shape parameter: [p, q]
s = np.zeros(mm.shape_model.n_active_components)
s[id_indices] = p
s[exp_indices] = q
# In our error we consider landmarks stored [x, y] - so flip here.
lms_points_xy = image.landmarks[lm_group].points[:, [1, 0]]
# Compute input image gradient
grad_x, grad_y = gradient_xy(image)
j = jacobians(s, c, image, lms_points_xy, mm, id_indices, exp_indices,
template_camera, grad_x, grad_y, shape_pc, shape_pc_lms,
n_samples, compute_costs=compute_costs)
insert_frame_to_H(H, j, f, n_p, n_q, n_c, c_f, c_l, n_frames)
insert_frame_to_JTe(JTe, j, f, n_p, n_q, n_c, c_f, c_l, n_frames)
if compute_costs:
for cost, val in j['costs'].items():
costs[cost].append(val)
print('Converting Hessian to sparse format')
H = sp.csr_matrix(H)
print("Sparsity (prop. 0's) of H: {:.2%}".format(
1 - (H.count_nonzero() / np.prod(np.array(H.shape)))))
print('Solving for parameter update')
d = sp.linalg.spsolve(H, JTe)
dp = d[:n_p]
dqs = d[n_p:(n_p + (n_frames * n_q))].reshape([n_frames, n_q])
dcs = d[-(n_frames * n_c):].reshape([n_frames, n_c])
# Add the focal length and degenerate quaternion parameters back on as
# null delta updates
dcs = np.hstack([np.tile(np.array([0, 1]), (n_frames, 1)), dcs])
new_p = p + dp
new_qs = qs + dqs
new_cs = np.array([camera_parameters_update(c, dc)
for c, dc in zip(cs, dcs)])
params = {
'p': new_p,
'qs': new_qs,
'cs': new_cs,
'dp': dp,
'dqs': dqs,
'dcs': dcs,
}
if compute_costs:
c = {k: np.array(v) for k, v in costs.items()}
err_s_id = (p ** 2).sum()
err_s_exp = (qs ** 2).sum()
err_sm = ((qs[:-2] - 2 * qs[1:-1] + qs[2:]) ** 2).sum()
err_f_tot = c['err_f'].sum() * c_f / (n_c * n_samples)
err_l_tot = c['err_l'].sum()
total_energy = (err_f_tot +
c_l * err_l_tot +
c_id * err_s_id +
c_exp * err_s_exp +
c_sm * err_sm)
c['total_energy'] = total_energy
c['err_s_id'] = (c_id, err_s_id)
c['err_s_exp'] = (c_exp, err_s_exp)
c['err_sm'] = (c_sm, err_sm)
c['err_f_tot'] = err_f_tot
c['err_l_tot'] = (c_l, err_l_tot)
print_cost_dict(c)
params['costs'] = c
return params
def fit_video(images, mm, id_indices, exp_indices, template_camera,
p, qs, cs, c_f=1, c_l=1, c_id=1, c_exp=1, c_sm=1, lm_group=None,
n_samples=1000, n_iters=10, compute_costs=True):
params = [
{
"p": p,
"qs": qs,
"cs": cs
}]
for i in range(1, n_iters + 1):
print('{} / {}'.format(i, n_iters))
# retrieve the last used parameters and pass them into the increment
l = params[-1]
t1 = time()
incs = increment_parameters(images, mm, id_indices, exp_indices,
template_camera, l['p'], l['qs'], l['cs'],
c_f=c_f, c_l=c_l, c_id=c_id, c_exp=c_exp,
c_sm=c_sm,
lm_group=lm_group, n_samples=n_samples,
compute_costs=compute_costs)
# update the parameter list
params.append(incs)
# And report the time taken for the iteration.
dt = int(time() - t1)
print('Iteration {} complete in {}\n'.format(i, timedelta(seconds=dt)))
return params
def fit_image(image, mm, id_indices, exp_indices, template_camera,
p, q, c, c_f=1, c_l=1, c_id=1, c_exp=1, lm_group=None,
n_samples=1000, n_iters=10, compute_costs=True):
# fit image is the same as fit_video, just for a single length video.
return fit_video(
[image], mm, id_indices, exp_indices, template_camera,
p, q[None, :], c[None, :], c_f=c_f, c_l=c_l,
c_id=c_id, c_exp=c_exp, c_sm=0, lm_group=lm_group,
n_samples=n_samples, n_iters=n_iters, compute_costs=compute_costs
)
def print_single_cost(k, c, tot):
if isinstance(c, tuple):
key = '{:03.0%} | {:>12}'.format((c[0] * c[1]) / tot, k)
val = '{:>12.2f} x {:>12.2f} = {:.2f}'.format(c[0], c[1], c[0] * c[1])
else:
key = '{:03.0%} | {:>12}'.format(c / tot, k)
val = '{:.2f}'.format(c)
print('{:>20}: {}'.format(key, val))
def print_cost_dict(d):
print('------------------------------------------------------------------')
print_single_cost('total_energy', d['total_energy'], d['total_energy'])
print('------------------------------------------------------------------')
for k in ['err_f_tot', 'err_l_tot', 'err_s_id',
'err_s_exp', 'err_sm']:
print_single_cost(k, d[k], d['total_energy'])
print('------------------------------------------------------------------')
for k in ['err_f', 'err_l']:
print('{} (median over frames): {:.2f}'.format(k, np.median(d[k])))
print('------------------------------------------------------------------')
|
1664336
|
import sys
import scrapy
from scrapy.crawler import CrawlerProcess
class CachingHostnameResolverSpider(scrapy.Spider):
"""
Finishes in a finite amount of time (does not hang indefinitely in the DNS resolution)
"""
name = "caching_hostname_resolver_spider"
def start_requests(self):
yield scrapy.Request(self.url)
def parse(self, response):
for _ in range(10):
yield scrapy.Request(response.url, dont_filter=True, callback=self.ignore_response)
def ignore_response(self, response):
self.logger.info(repr(response.ip_address))
if __name__ == "__main__":
process = CrawlerProcess(settings={
"RETRY_ENABLED": False,
"DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver",
})
process.crawl(CachingHostnameResolverSpider, url=sys.argv[1])
process.start()
|
1664432
|
from fractions import gcd
def lcm(a, b):
return (a * b) // gcd(a, b)
N = int(input())
for i in range(N):
number_of_wheels = int(input())
wheels = [int(x) for x in input().split()]
if number_of_wheels == 1:
print(wheels[0])
else:
solution = lcm(wheels[0], wheels[1])
for j in range(2,number_of_wheels):
solution = lcm(solution, wheels[j])
print(solution if solution <= 10**9 else "More than a billion.")
|
1664487
|
import sys
sys.path.append('/opt')
from common.constant import StatusCode
from common.logger import get_logger
from common.utils import handle_exception_with_slack_notification, generate_lambda_response
from event_pubsub.config import SLACK_HOOK, NETWORK_ID
from event_pubsub.services.raw_events_service import RawEventsService
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def get_raw_event_details(event, context):
logger.info(f"get_raw_event_details :: event :: {event}")
transaction_hash_list = event["transaction_hash_list"]
contract_name = event["contract_name"]
response = RawEventsService().get_raw_events(
transaction_hash_list=transaction_hash_list,
contract_name=contract_name
)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
|
1664532
|
from .. import surface, rect, video, pixels, render, rwops
from .color import convert_to_color
from .common import SDLError
from .compat import isiterable
from .ebs import System
from .image import load_image
from .renderer import Renderer
from .sprite import Sprite, SoftwareSprite, TextureSprite
from .window import Window
__all__ = [
"SpriteFactory", "SoftwareSpriteRenderSystem", "SpriteRenderSystem",
"TextureSpriteRenderSystem", "TEXTURE", "SOFTWARE"
]
TEXTURE = 0
SOFTWARE = 1
class SpriteFactory(object):
"""A factory class for creating Sprite components."""
def __init__(self, sprite_type=TEXTURE, **kwargs):
"""Creates a new SpriteFactory.
The SpriteFactory can create TextureSprite or SoftwareSprite
instances, depending on the sprite_type being passed to it,
which can be SOFTWARE or TEXTURE. The additional kwargs are used
as default arguments for creating sprites within the factory
methods.
"""
if sprite_type == TEXTURE:
if "renderer" not in kwargs:
raise ValueError("you have to provide a renderer=<arg> argument")
elif sprite_type != SOFTWARE:
raise ValueError("sprite_type must be TEXTURE or SOFTWARE")
self._spritetype = sprite_type
self.default_args = kwargs
@property
def sprite_type(self):
"""The sprite type created by the factory."""
return self._spritetype
def __repr__(self):
stype = "TEXTURE"
if self.sprite_type == SOFTWARE:
stype = "SOFTWARE"
return "SpriteFactory(sprite_type=%s, default_args=%s)" % \
(stype, self.default_args)
def create_sprite_render_system(self, *args, **kwargs):
"""Creates a new SpriteRenderSystem.
For TEXTURE mode, the passed args and kwargs are ignored and the
Renderer or SDL_Renderer passed to the SpriteFactory is used.
"""
if self.sprite_type == TEXTURE:
return TextureSpriteRenderSystem(self.default_args["renderer"])
else:
return SoftwareSpriteRenderSystem(*args, **kwargs)
def from_image(self, fname):
"""Creates a Sprite from the passed image file."""
return self.from_surface(load_image(fname), True)
def from_surface(self, tsurface, free=False):
"""Creates a Sprite from the passed SDL_Surface.
If free is set to True, the passed surface will be freed
automatically.
"""
if self.sprite_type == TEXTURE:
renderer = self.default_args["renderer"]
texture = render.SDL_CreateTextureFromSurface(renderer.sdlrenderer,
tsurface)
if not texture:
raise SDLError()
sprite = TextureSprite(texture.contents)
if free:
surface.SDL_FreeSurface(tsurface)
return sprite
elif self.sprite_type == SOFTWARE:
return SoftwareSprite(tsurface, free)
raise ValueError("sprite_type must be TEXTURE or SOFTWARE")
def from_object(self, obj):
"""Creates a Sprite from an arbitrary object."""
if self.sprite_type == TEXTURE:
rw = rwops.rw_from_object(obj)
# TODO: support arbitrary objects.
imgsurface = surface.SDL_LoadBMP_RW(rw, True)
if not imgsurface:
raise SDLError()
return self.from_surface(imgsurface.contents, True)
elif self.sprite_type == SOFTWARE:
rw = rwops.rw_from_object(obj)
imgsurface = surface.SDL_LoadBMP_RW(rw, True)
if not imgsurface:
raise SDLError()
return SoftwareSprite(imgsurface.contents, True)
raise ValueError("sprite_type must be TEXTURE or SOFTWARE")
def from_color(self, color, size, bpp=32, masks=None):
"""Creates a sprite with a certain color.
"""
color = convert_to_color(color)
if masks:
rmask, gmask, bmask, amask = masks
else:
rmask = gmask = bmask = amask = 0
sfc = surface.SDL_CreateRGBSurface(0, size[0], size[1], bpp, rmask,
gmask, bmask, amask)
if not sfc:
raise SDLError()
fmt = sfc.contents.format
if fmt.contents.Amask != 0:
# Target has an alpha mask
col = pixels.SDL_MapRGBA(fmt, color.r, color.g, color.b, color.a)
else:
col = pixels.SDL_MapRGB(fmt, color.r, color.g, color.b)
ret = surface.SDL_FillRect(sfc, None, col)
if ret == -1:
raise SDLError()
return self.from_surface(sfc.contents, True)
def from_text(self, text, **kwargs):
"""Creates a Sprite from a string of text."""
args = self.default_args.copy()
args.update(kwargs)
fontmanager = args['fontmanager']
sfc = fontmanager.render(text, **args)
return self.from_surface(sfc, free=True)
def create_sprite(self, **kwargs):
"""Creates an empty Sprite.
This will invoke create_software_sprite() or
create_texture_sprite() with the passed arguments and the set
default arguments.
"""
args = self.default_args.copy()
args.update(kwargs)
if self.sprite_type == TEXTURE:
return self.create_texture_sprite(**args)
else:
return self.create_software_sprite(**args)
def create_software_sprite(self, size, bpp=32, masks=None):
"""Creates a software sprite.
A size tuple containing the width and height of the sprite and a
bpp value, indicating the bits per pixel to be used, need to be
provided.
"""
if masks:
rmask, gmask, bmask, amask = masks
else:
rmask = gmask = bmask = amask = 0
imgsurface = surface.SDL_CreateRGBSurface(0, size[0], size[1], bpp,
rmask, gmask, bmask, amask)
if not imgsurface:
raise SDLError()
return SoftwareSprite(imgsurface.contents, True)
def create_texture_sprite(self, renderer, size,
pformat=pixels.SDL_PIXELFORMAT_RGBA8888,
access=render.SDL_TEXTUREACCESS_STATIC):
"""Creates a texture sprite.
A size tuple containing the width and height of the sprite needs
to be provided.
TextureSprite objects are assumed to be static by default,
making it impossible to access their pixel buffer in favour for
faster copy operations. If you need to update the pixel data
frequently or want to use the texture as target for rendering
operations, access can be set to the relevant
SDL_TEXTUREACCESS_* flag.
"""
if isinstance(renderer, render.SDL_Renderer):
sdlrenderer = renderer
elif isinstance(renderer, Renderer):
sdlrenderer = renderer.sdlrenderer
else:
raise TypeError("renderer must be a Renderer or SDL_Renderer")
texture = render.SDL_CreateTexture(sdlrenderer, pformat, access,
size[0], size[1])
if not texture:
raise SDLError()
return TextureSprite(texture.contents)
class SpriteRenderSystem(System):
"""A rendering system for Sprite components.
This is a base class for rendering systems capable of drawing and
displaying Sprite-based objects. Inheriting classes need to
implement the rendering capability by overriding the render()
method.
"""
def __init__(self):
super(SpriteRenderSystem, self).__init__()
self.componenttypes = (Sprite,)
self._sortfunc = lambda e: e.depth
def render(self, sprites, x=None, y=None):
"""Renders the passed sprites.
This is a no-op function and needs to be implemented by inheriting
classes.
"""
pass
def process(self, world, components):
"""Draws the passed SoftSprite objects on the Window's surface."""
self.render(sorted(components, key=self._sortfunc))
@property
def sortfunc(self):
"""Sort function for the component processing order.
The default sort order is based on the depth attribute of every
sprite. Lower depth values will cause sprites to be drawn below
sprites with higher depth values.
"""
return self._sortfunc
@sortfunc.setter
def sortfunc(self, value):
"""Sort function for the component processing order.
The default sort order is based on the depth attribute of every
sprite. Lower depth values will cause sprites to be drawn below
sprites with higher depth values.
"""
if not callable(value):
raise TypeError("sortfunc must be callable")
self._sortfunc = value
class SoftwareSpriteRenderSystem(SpriteRenderSystem):
"""A rendering system for SoftwareSprite components.
The SoftwareSpriteRenderSystem class uses a Window as drawing device to
display Sprite surfaces. It uses the Window's internal SDL surface as
drawing context, so that GL operations, such as texture handling or
using SDL renderers is not possible.
"""
def __init__(self, window):
"""Creates a new SoftwareSpriteRenderSystem for a specific Window."""
super(SoftwareSpriteRenderSystem, self).__init__()
if isinstance(window, Window):
self.window = window.window
elif isinstance(window, video.SDL_Window):
self.window = window
else:
raise TypeError("unsupported window type")
self.target = window
sfc = video.SDL_GetWindowSurface(self.window)
if not sfc:
raise SDLError()
self.surface = sfc.contents
self.componenttypes = (SoftwareSprite,)
def render(self, sprites, x=None, y=None):
"""Draws the passed sprites (or sprite) on the Window's surface.
x and y are optional arguments that can be used as relative drawing
location for sprites. If set to None, the location information of the
sprites are used. If set and sprites is an iterable, such as a list of
SoftwareSprite objects, x and y are relative location values that will
be added to each individual sprite's position. If sprites is a single
SoftwareSprite, x and y denote the absolute position of the
SoftwareSprite, if set.
"""
r = rect.SDL_Rect(0, 0, 0, 0)
if isiterable(sprites):
blit_surface = surface.SDL_BlitSurface
imgsurface = self.surface
x = x or 0
y = y or 0
for sprite in sprites:
r.x = x + sprite.x
r.y = y + sprite.y
blit_surface(sprite.surface, None, imgsurface, r)
else:
r.x = sprites.x
r.y = sprites.y
if x is not None and y is not None:
r.x = x
r.y = y
surface.SDL_BlitSurface(sprites.surface, None, self.surface, r)
video.SDL_UpdateWindowSurface(self.window)
class TextureSpriteRenderSystem(SpriteRenderSystem):
"""A rendering system for TextureSprite components.
The TextureSpriteRenderSystem class uses a SDL_Renderer as drawing
device to display TextureSprite objects.
"""
def __init__(self, target):
"""Creates a new TextureSpriteRenderSystem.
target can be a Window, SDL_Window, Renderer or SDL_Renderer.
If it is a Window or SDL_Window instance, a Renderer will be
created to acquire the SDL_Renderer.
"""
super(TextureSpriteRenderSystem, self).__init__()
if isinstance(target, (Window, video.SDL_Window)):
# Create a Renderer for the window and use that one.
target = Renderer(target)
if isinstance(target, Renderer):
self._renderer = target # Used to prevent GC
sdlrenderer = target.sdlrenderer
elif isinstance(target, render.SDL_Renderer):
sdlrenderer = target
else:
raise TypeError("unsupported object type")
self.sdlrenderer = sdlrenderer
self.componenttypes = (TextureSprite,)
def __del__(self):
self.sdlrenderer = None
if hasattr(self, "_renderer"):
self._renderer = None
def render(self, sprites, x=None, y=None):
"""Draws the passed sprites (or sprite).
x and y are optional arguments that can be used as relative
drawing location for sprites. If set to None, the location
information of the sprites are used. If set and sprites is an
iterable, such as a list of TextureSprite objects, x and y are
relative location values that will be added to each individual
sprite's position. If sprites is a single TextureSprite, x and y
denote the absolute position of the TextureSprite, if set.
"""
r = rect.SDL_Rect(0, 0, 0, 0)
rcopy = render.SDL_RenderCopyEx
if isiterable(sprites):
renderer = self.sdlrenderer
x = x or 0
y = y or 0
for sprite in sprites:
r.x = x + sprite.x
r.y = y + sprite.y
r.w, r.h = sprite.size
if rcopy(renderer, sprite.texture, None, r, sprite.angle,
sprite.center, sprite.flip) == -1:
raise SDLError()
else:
r.x = sprites.x
r.y = sprites.y
r.w, r.h = sprites.size
if x is not None and y is not None:
r.x = x
r.y = y
if rcopy(self.sdlrenderer, sprites.texture, None, r, sprites.angle,
sprites.center, sprites.flip) == -1:
raise SDLError()
render.SDL_RenderPresent(self.sdlrenderer)
|
1664537
|
import logging
import re
import time
from celery import chain
from django.core.cache import cache
from django.db import transaction
from django.core.exceptions import ValidationError
from cacheops import invalidate_model
import app.ciscoeox.api_crawler as cisco_eox_api_crawler
from app.ciscoeox.exception import CiscoApiCallFailed
from app.config.settings import AppSettings
from app.config.models import NotificationMessage
from app.config import utils
from app.productdb.models import Vendor, Product
from django_project.celery import app as app, TaskState
logger = logging.getLogger("productdb")
NOTIFICATION_MESSAGE_TITLE = "Synchronization with Cisco EoX API"
@app.task(name="ciscoeox.populate_product_lc_state_sync_field")
def cisco_eox_populate_product_lc_state_sync_field():
"""
Periodic job to populate the lc_state_sync field in the Products, which shows that the product lifecycle data are
automatically synchronized against the Cisco EoX API in this case
:return:
"""
try:
cis_vendor = Vendor.objects.get(name__istartswith="Cisco")
except:
# Vendor doesn't exist, no steps required
logger.fatal("Vendor \"Cisco Systems\" not found in database, please check your installation")
return {"error": "Vendor \"Cisco Systems\" not found in database"}
cisco_products = Product.objects.filter(vendor=cis_vendor)
if cisco_products.count() != 0:
app_config = AppSettings()
queries = app_config.get_cisco_eox_api_queries_as_list()
# escape the query strings
queries = [re.escape(e) for e in queries]
# convert the wildcard values
queries = [e.replace("\\*", ".*") for e in queries]
queries = ["^" + e + "$" for e in queries]
with transaction.atomic():
# reset all entries for the vendor
Product.objects.filter(vendor=cis_vendor).update(lc_state_sync=False)
# only set the state sync to true if the periodic synchronization is enabled
if app_config.is_periodic_sync_enabled():
for query in queries:
Product.objects.filter(product_id__regex=query, vendor=cis_vendor).update(lc_state_sync=True)
invalidate_model(Product)
return {"status": "Database updated"}
else:
return {"error": "No Products associated to \"Cisco Systems\" found in database"}
@app.task(
serializer="json",
name="ciscoeox.update_local_database_records"
)
def update_local_database_records(results, year, records):
for record in records:
cisco_eox_api_crawler.update_local_db_based_on_record(record, True)
results[str(year)] = "success"
return results
@app.task(
serializer="json",
name="ciscoeox.notify_initial_import_result"
)
def notify_initial_import_result(results):
msg = "The following years were successful imported: " + ",".join(results.keys())
NotificationMessage.objects.create(
title="Initial data import finished",
summary_message=msg,
detailed_message=msg,
type=NotificationMessage.MESSAGE_INFO
)
@app.task(
serializer="json",
name="ciscoeox.initial_sync_with_cisco_eox_api",
bind=True
)
def initial_sync_with_cisco_eox_api(self, years_list):
"""
synchronize all entries from the EoX API for a given amount of years (today - n-years), ignores the create missing
entries and the configurable blacklist.
:param self:
:param years_list: list of years to sync (e.g. [2018, 2017, 2016]
:return:
"""
if type(years_list) is not list:
raise AttributeError("years_list must be a list")
for val in years_list:
if type(val) is not int:
raise AttributeError("years_list must be a list of integers")
if len(years_list) == 0:
return {
"status_message": "No years provided, nothing to do."
}
app_config = AppSettings()
# test Cisco EoX API access
test_result = utils.check_cisco_eox_api_access(
app_config.get_cisco_api_client_id(),
app_config.get_cisco_api_client_secret(),
False
)
failed_years = []
successful_years = []
if test_result:
# perform synchronization
self.update_state(state=TaskState.PROCESSING, meta={
"status_message": "start initial synchronization with the Cisco EoX API..."
})
all_records = []
for year in years_list:
self.update_state(state=TaskState.PROCESSING, meta={
"status_message": "fetch all information for year %d..." % year
})
# wait some time between the query calls
time.sleep(int(app_config.get_cisco_eox_api_sync_wait_time()))
# fetch all API entries for a specific year
try:
records = cisco_eox_api_crawler.get_raw_api_data(year=year)
successful_years += [year]
all_records.append({
"year": year,
"records": records
})
except CiscoApiCallFailed as ex:
msg = "Cisco EoX API call failed (%s)" % str(ex)
logger.error("Query for year %s to Cisco EoX API failed (%s)" % (year, msg), exc_info=True)
failed_years += [year]
NotificationMessage.objects.create(
title="Initial data import failed",
summary_message="Unable to collect Cisco EoX data for year %d" % year,
detailed_message=msg,
type=NotificationMessage.MESSAGE_ERROR
)
except Exception as ex:
msg = "Unexpected Exception, cannot access the Cisco API. Please ensure that the server is " \
"connected to the internet and that the authentication settings are valid."
logger.error("Query for year %s to Cisco EoX API failed (%s)" % (year, msg), exc_info=True)
failed_years += [year]
NotificationMessage.objects.create(
title="Initial data import failed",
summary_message="Unable to collect Cisco EoX data for year %d" % year,
detailed_message=msg,
type=NotificationMessage.MESSAGE_ERROR
)
# update local database (asynchronous task)
if len(all_records) != 0:
tasks = [
update_local_database_records.s({}, all_records[0]["year"], all_records[0]["records"])
]
for r in all_records[1:]:
tasks.append(update_local_database_records.s(r["year"], r["records"]))
tasks.append(notify_initial_import_result.s())
chain(*tasks).apply_async()
time.sleep(10)
# remove in progress flag with the cache
cache.delete("CISCO_EOX_INITIAL_SYN_IN_PROGRESS")
success_msg = ",".join([str(e) for e in successful_years])
if len(success_msg) == 0:
success_msg = "None"
failed_msg = ""
if len(failed_years) != 0:
failed_msg = " (for %s the synchronization failed)" % ",".join([str(e) for e in failed_years])
return {
"status_message": "The EoX data were successfully downloaded for the following years: %s%s" % (success_msg,
failed_msg)
}
@app.task(
serializer="json",
name="ciscoeox.update_cisco_eox_records",
)
def update_cisco_eox_records(records):
"""
update given database records from the Cisco EoX v5 API
:param records:
:return:
"""
app_config = AppSettings()
blacklist_raw_string = app_config.get_product_blacklist_regex()
create_missing = app_config.is_auto_create_new_products()
# build blacklist from configuration
blacklist = []
for e in [e.split(";") for e in blacklist_raw_string.splitlines()]:
blacklist += e
blacklist = [e for e in blacklist if e != ""]
counter = 0
messages = {}
for record in records:
blacklisted = False
for regex in blacklist:
try:
if re.search(regex, record["EOLProductID"], re.I):
blacklisted = True
break
except:
logger.warning("invalid regular expression in blacklist: %s" % regex)
if not blacklisted:
try:
message = cisco_eox_api_crawler.update_local_db_based_on_record(record, create_missing)
if message:
messages[record["EOLProductID"]] = message
except ValidationError as ex:
logger.error("invalid data received from Cisco API, cannot save data object for "
"'%s' (%s)" % (record, str(ex)), exc_info=True)
else:
messages[record["EOLProductID"]] = " Product record ignored"
counter += 1
return {
"count": counter,
"messages": messages
}
@app.task(
serializer="json",
name="ciscoeox.synchronize_with_cisco_eox_api",
bind=True,
soft_time_limit=82800,
time_limit=86400
)
def execute_task_to_synchronize_cisco_eox_states(self, ignore_periodic_sync_flag=False):
"""
This task synchronize the local database with the Cisco EoX API. It executes all configured queries and stores the
results in the local database. There are two types of operation:
* cisco_eox_api_auto_sync_auto_create_elements is set to true - will create any element which is not part of the
blacklist and not in the database
* cisco_eox_api_auto_sync_auto_create_elements is set to false - will only update entries, which are already
included in the database
:return:
"""
app_config = AppSettings()
run_task = app_config.is_periodic_sync_enabled()
if not (run_task or ignore_periodic_sync_flag):
result = {
"status_message": "task not enabled"
}
else:
logger.info("start sync with Cisco EoX API...")
self.update_state(state=TaskState.PROCESSING, meta={
"status_message": "sync with Cisco EoX API..."
})
# read configuration for the Cisco EoX API synchronization
queries = app_config.get_cisco_eox_api_queries_as_list()
if len(queries) == 0:
result = {
"status_message": "No Cisco EoX API queries configured."
}
NotificationMessage.objects.create(
title=NOTIFICATION_MESSAGE_TITLE,
type=NotificationMessage.MESSAGE_WARNING,
summary_message="There are no Cisco EoX API queries configured. Nothing to do.",
detailed_message="There are no Cisco EoX API queries configured. Please configure at least on EoX API "
"query in the settings or disable the periodic synchronization."
)
# update the local database with the Cisco EoX API
else:
# test Cisco EoX API access
test_result = utils.check_cisco_eox_api_access(
app_config.get_cisco_api_client_id(),
app_config.get_cisco_api_client_secret(),
False
)
if not test_result:
msg = "Cannot contact Cisco EoX API, please verify your internet connection and access " \
"credentials."
if not ignore_periodic_sync_flag:
NotificationMessage.objects.create(
title=NOTIFICATION_MESSAGE_TITLE, type=NotificationMessage.MESSAGE_ERROR,
summary_message="The synchronization with the Cisco EoX API was not successful.",
detailed_message=msg
)
result = {
"error_message": msg
}
else:
# execute all queries from the configuration
query_eox_records = {}
failed_queries = []
failed_query_msgs = {}
successful_queries = []
counter = 1
for query in queries:
self.update_state(state=TaskState.PROCESSING, meta={
"status_message": "send query <code>%s</code> to the Cisco EoX API (<strong>%d of "
"%d</strong>)..." % (query, counter, len(queries))
})
# wait some time between the query calls
time.sleep(int(app_config.get_cisco_eox_api_sync_wait_time()))
try:
query_eox_records[query] = cisco_eox_api_crawler.get_raw_api_data(api_query=query)
successful_queries.append(query)
except CiscoApiCallFailed as ex:
msg = "Cisco EoX API call failed (%s)" % str(ex)
logger.error("Query %s to Cisco EoX API failed (%s)" % (query, msg), exc_info=True)
failed_queries.append(query)
failed_query_msgs[query] = str(ex)
except Exception as ex:
msg = "Unexpected Exception, cannot access the Cisco API. Please ensure that the server is " \
"connected to the internet and that the authentication settings are " \
"valid."
logger.error("Query %s to Cisco EoX API failed (%s)" % (query, msg), exc_info=True)
failed_queries.append(query)
failed_query_msgs[query] = str(ex)
counter += 1
for key in query_eox_records:
amount_of_records = len(query_eox_records[key])
self.update_state(state=TaskState.PROCESSING, meta={
"status_message": "update database (query <code>%s</code>, processed <b>0</b> of "
"<b>%d</b> results)..." % (key, amount_of_records)
})
# update database in a separate task
update_cisco_eox_records.apply_async(kwargs={
"records": query_eox_records[key]
})
# view the queries in the detailed message and all messages (if there are some)
detailed_message = "The following queries were executed:<br><ul style=\"text-align: left;\">"
for fq in failed_queries:
detailed_message += "<li class=\"text-danger\"><code>%s</code> " \
"(failed, %s)</li>" % (fq, failed_query_msgs.get(fq, "unknown"))
for sq in successful_queries:
detailed_message += "<li><code>%s</code> (<b>affects %d products</b>, " \
"success)</li>" % (sq, len(query_eox_records[sq]))
detailed_message += "</ul>"
# show the executed queries in the summary message
if len(failed_queries) == 0 and len(successful_queries) != 0:
summary_html = "The following queries were successful executed: %s" % ", ".join(
["<code>%s</code>" % query for query in successful_queries]
)
NotificationMessage.objects.create(
title=NOTIFICATION_MESSAGE_TITLE, type=NotificationMessage.MESSAGE_SUCCESS,
summary_message="The synchronization with the Cisco EoX API was successful. " + summary_html,
detailed_message=detailed_message
)
elif len(failed_queries) != 0 and len(successful_queries) == 0:
summary_html = "The following queries failed to execute: %s" % ", ".join(
["<code>%s</code>" % query for query in failed_queries]
)
NotificationMessage.objects.create(
title=NOTIFICATION_MESSAGE_TITLE, type=NotificationMessage.MESSAGE_ERROR,
summary_message="The synchronization with the Cisco EoX API was not successful. " + summary_html,
detailed_message=detailed_message
)
else:
summary_html = "The following queries were successful executed: %s\n<br>The following queries " \
"failed to execute: %s" % (
", ".join(["<code>%s</code>" % query for query in successful_queries]),
", ".join(["<code>%s</code>" % query for query in failed_queries])
)
NotificationMessage.objects.create(
title=NOTIFICATION_MESSAGE_TITLE, type=NotificationMessage.MESSAGE_WARNING,
summary_message="The synchronization with the Cisco EoX API was partially "
"successful. " + summary_html,
detailed_message=detailed_message
)
result = {"status_message": "<p style=\"text-align: left;\">" + detailed_message + "</p>"}
# if the task was executed eager, set state to SUCCESS (required for testing)
if self.request.is_eager:
self.update_state(state=TaskState.SUCCESS, meta={"status_message": summary_html})
# remove in progress flag with the cache
cache.delete("CISCO_EOX_API_SYN_IN_PROGRESS")
return result
|
1664539
|
import datetime, json, os, uuid, urllib.request, urllib.error, re
import flask, werkzeug.exceptions, cairosvg
import mimetypes
from flask import jsonify, request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import TimeoutException
from functree import __version__, app, auth, constants, csrf, filters, forms, models, tree, analysis, cache
from functree.crckm.src import download as crckm
@app.route('/')
def route_index():
return flask.render_template('index.html')
@app.route('/api/mapping/', methods=['POST'])
@csrf.exempt
def mapping():
form = forms.MappingForm(csrf_enabled=False)
if 'private' not in request.form:
form.private.data = True
else:
if request.form['private'] == "0":
form.private.data = False
else:
form.private.data = True
if 'modulecoverage' not in request.form:
form.modulecoverage.data = True
else:
if request.form['modulecoverage'] == "0":
form.modulecoverage.data = False
else:
form.modulecoverage.data = True
if 'distribute' not in request.form:
form.distribute.data = True
else:
if request.form['distribute'] == "0":
form.distribute.data = False
else:
form.distribute.data = True
if form.validate_on_submit():
profile_id = analysis.basic_mapping.from_table(form)
return jsonify({'profile_id': profile_id})
else:
return jsonify({'errors': form.errors})
@app.route('/api/comparison/', methods=['POST'])
@csrf.exempt
def comparison():
form = forms.MappingForm(csrf_enabled=False)
if 'private' not in request.form:
form.private.data = True
else:
if request.form['private'] == "0":
form.private.data = False
else:
form.private.data = True
if form.validate_on_submit():
profile_id = analysis.comparison.from_table(form)
return jsonify({'profile_id': profile_id})
else:
return jsonify({'errors': form.errors})
@app.route('/api/display/', methods=['POST'])
@csrf.exempt
def api_display():
form = forms.DisplayForm(csrf_enabled=False)
if 'private' not in request.form:
form.private.data = True
else:
if request.form['private'] == "0":
form.private.data = False
else:
form.private.data = True
if form.validate_on_submit():
profile_id = profile_for_display(form)
return jsonify({'profile_id': profile_id})
else:
return jsonify({'errors': form.errors})
@app.route('/api/viewer/', methods=['GET'])
def api_viewer():
# process args
profile_id = request.args.get('profile_id', type=uuid.UUID)
series_value = request.args.get('series')
column_values = request.args.get("columns").split(",")
circle_column_value = request.args.get("circle-column")
is_stack = request.args.get("stack")
is_disable_normalization = request.args.get("disable-normalization")
color_code=request.args.get("color-code")
depth_value=request.args.get("depth")
# initialize a headless chrome
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--auto-open-devtools-for-tabs')
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument("window-size=1980,1200");
svg = None
driver = None
try:
driver = webdriver.Chrome(chrome_options=chrome_options)
# locate the page
page=flask.url_for('route_viewer', _external=True) + '?profile_id={}'.format(profile_id)
browser = driver.get(page)
wait = WebDriverWait(driver, 60)
# Wait for page to load
wait.until(EC.invisibility_of_element_located((By.ID, 'loading')))
# open the options
options = driver.find_element_by_id("options").click()
driver.implicitly_wait(0.5)
# select a series to visualize
series = Select(driver.find_element_by_id("series"))
series.select_by_visible_text(series_value)
driver.implicitly_wait(0.5)
# select samples
columns = Select(driver.find_element_by_id("columns"))
for column in column_values:
columns.select_by_visible_text(column)
if circle_column_value:
circle_map = Select(driver.find_element_by_id("circle-map"))
circle_map.select_by_visible_text(circle_column_value)
driver.implicitly_wait(0.5)
if is_stack == "":
driver.find_element_by_id("stacking").click()
if is_disable_normalization == "":
driver.find_element_by_id("normalize").click()
if color_code:
color_coding = Select(driver.find_element_by_id("color-coding"))
color_coding.select_by_visible_text(color_code)
if depth_value:
depth = Select(driver.find_element_by_id("depth"))
depth.select_by_visible_text(depth_value)
# update
driver.find_element_by_id("update").click()
# Giuve a couple of seconds to let the SVG update
driver.implicitly_wait(2)
# locate the svg element
svgEl = driver.find_element_by_tag_name("svg")
# copy it
svg = svgEl.get_attribute('outerHTML')
finally:
driver.quit()
# return the raw SVG file
return str(svg)
@app.route('/analysis/<string:mode>/', methods=['GET', 'POST'])
def route_analysis(mode):
if mode == 'mapping':
form = forms.MappingForm()
if form.validate_on_submit():
profile_id = analysis.basic_mapping.from_table(form)
if profile_id == constants.NO_MATCHED_HIERARCHIES:
return flask.redirect(flask.url_for('route_nomapping'))
return flask.redirect(flask.url_for('route_viewer') + '?profile_id={}'.format(profile_id))
else:
return flask.render_template('mapping.html', form=form, mode=mode)
elif mode == 'comparison':
form = forms.ComparisonForm()
if form.validate_on_submit():
profile_id = analysis.comparison.from_table(form)
return flask.redirect(flask.url_for('route_viewer') + '?profile_id={}'.format(profile_id))
else:
return flask.render_template('comparison.html', form=form, mode=mode)
elif mode == 'display':
form = forms.DisplayForm()
if form.validate_on_submit():
profile_id = profile_for_display(form)
if not profile_id:
# TODO add error message | or hadnle this at validation with custom validators
return flask.render_template('display.html', form=form, mode=mode)
return flask.redirect(flask.url_for('route_viewer') + '?profile_id={}'.format(profile_id))
else:
return flask.render_template('display.html', form=form, mode=mode)
else:
flask.abort(404)
@app.route('/nomapping/')
def route_nomapping():
return flask.render_template('nomapping.html')
def profile_for_display(form):
profile_id = None
file_type = mimetypes.MimeTypes().guess_type(form.input_file.data.filename)[0]
if file_type == "application/json":
profile_id = analysis.display.from_json(form)
elif file_type == 'text/tab-separated-values':
profile_id = analysis.display.from_table(form)
return profile_id
@app.route('/list/')
def route_list():
only = ('profile_id', 'description', 'added_at', 'target', 'locked')
profiles = models.Profile.objects().filter(private=False).only(*only)
return flask.render_template('list.html', profiles=profiles)
@app.route('/data/')
def route_data():
only = ('source', 'description', 'added_at')
trees = models.Tree.objects().all().only(*only)
definitions = models.Definition.objects().all().only(*only)
return flask.render_template('data.html', trees=trees, definitions=definitions)
@app.route('/data/upload/', methods=['GET', 'POST'])
def route_data_upload():
'''
Handle upload of reference trees
'''
form = forms.UploadForm()
if form.validate_on_submit():
file_type = mimetypes.MimeTypes().guess_type(form.input_file.data.filename)[0]
if file_type == "application/json":
models.Tree(
tree=tree.from_json(form.input_file.data),
source=form.target.data,
description=form.description.data,
added_at=datetime.datetime.utcnow()).save()
elif file_type == 'text/tab-separated-values':
models.Tree(
tree=tree.from_tsv(form.input_file.data, form.target.data),
source=form.target.data,
description=form.description.data,
added_at=datetime.datetime.utcnow()).save()
cache.clear()
return flask.redirect(flask.url_for('route_data'))
else:
return flask.render_template('upload.html', form=form)
@app.context_processor
def utility_processor():
def json_schema():
schema = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static/data/example/data-schema.json'), 'r')
return schema.read()
def json_example():
data = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static/data/example/data-example.json'), 'r')
return data.read()
def json_reference_example():
data = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static/data/example/reference_tree.json'), 'r')
return data.read()
def json_reference_schema():
data = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static/data/example/reference-tree-schema.json'), 'r')
return data.read()
return dict(json_schema=json_schema, json_example=json_example, json_reference_example=json_reference_example, json_reference_schema=json_reference_schema)
@app.route('/docs/')
def route_docs():
return flask.render_template('help.html', constants=constants)
@app.route('/about/')
def route_about():
return flask.render_template('about.html', version=__version__)
@app.route('/contact/')
def route_contact():
return flask.render_template('contact.html')
@app.route('/viewer/')
def route_viewer():
profile_id = flask.request.args.get('profile_id', type=uuid.UUID)
mode = flask.request.args.get('mode', default='functree', type=str)
excludes = ('profile',)
profile = models.Profile.objects().exclude(*excludes).get_or_404(profile_id=profile_id)
if mode == 'functree':
root = flask.request.args.get('root', type=str)
return flask.render_template('functree.html', profile=profile, mode=mode, root=root)
elif mode == 'charts':
series = flask.request.args.get('series', default=0, type=int)
return flask.render_template('charts.html', profile=profile, mode=mode, series=series)
elif mode == 'pathways':
series = flask.request.args.get('series', default=0, type=int)
return flask.render_template('pathways.html', profile=profile, mode=mode, series=series)
elif mode == 'tables':
series = flask.request.args.get('series', default=0, type=int)
return flask.render_template('tables.html', profile=profile, mode=mode, series=series)
elif mode == 'summary':
return flask.render_template('summary.html', profile=profile, mode=mode)
else:
flask.abort(404)
@app.route('/admin/')
@auth.login_required
def route_admin():
counts = {
'profile': models.Profile.objects().count(),
'tree': models.Tree.objects().count(),
'definition': models.Definition.objects().count()
}
return flask.render_template('admin.html', counts=counts)
@app.route('/profile/<uuid:profile_id>', methods=['GET'])
@cache.cached()
def route_profile(profile_id):
excludes = ('id',)
profile = models.Profile.objects.exclude(*excludes).get_or_404(profile_id=profile_id)
return flask.jsonify([profile])
@app.route('/profile/<uuid:profile_id>', methods=['POST'])
def route_profile_delete(profile_id):
if flask.request.form.get('_method') == 'DELETE':
models.Profile.objects.get_or_404(profile_id=profile_id, locked=False).delete()
return flask.redirect(flask.url_for('route_list'))
else:
return flask.abort(405)
@app.route('/tree/<string:source>')
@cache.cached()
def route_tree(source):
excludes = ('id',)
tree = models.Tree.objects().exclude(*excludes).get_or_404(source=source)
return flask.jsonify([tree])
@app.route('/definition/<string:source>')
def route_definition(source):
excludes = ('id',)
definition = models.Definition.objects().exclude(*excludes).get_or_404(source=source)
return flask.jsonify([definition])
# HTTPS proxy for TogoWS
@app.route('/entry/')
@app.route('/entry/<string:entry>')
def route_get_entry(entry=''):
TOGOWS_GET_ENTRY_ENDPOINT = 'http://togows.org/entry/'
if re.match(r'^K\d{5}$', entry):
db = 'kegg-orthology'
elif re.match(r'^M\d{5}$', entry):
db = 'kegg-module'
elif re.match(r'^map\d{5}$', entry):
db = 'kegg-pathway'
else:
flask.abort(404)
try:
res = urllib.request.urlopen(TOGOWS_GET_ENTRY_ENDPOINT + db + '/' + entry)
return flask.Response(res.read(), content_type=res.headers['Content-Type'])
except urllib.error.HTTPError as e:
flask.abort(e.code)
@app.route('/action/save_image/', methods=['POST'])
def route_save_image():
image_format = flask.request.form['format']
svg = flask.request.form['svg']
if image_format == 'pdf':
data = cairosvg.svg2pdf(bytestring=svg)
mimetype = 'application/pdf'
elif image_format == 'png':
data = cairosvg.svg2png(bytestring=svg)
mimetype = 'image/png'
elif image_format == 'ps':
data = cairosvg.svg2ps(bytestring=svg)
mimetype = 'application/postscript'
elif image_format == 'svg':
data = cairosvg.svg2svg(bytestring=svg)
mimetype = 'image/svg+xml'
elif image_format == 'raw-svg':
data = svg
mimetype = 'image/svg+xml'
else:
flask.abort(400)
return flask.Response(data, mimetype=mimetype)
@app.route('/action/init_profiles/')
@auth.login_required
def route_init_profiles():
f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static/data/example/profile.json'), 'r')
input_data = json.load(f)[0]
models.Profile.objects.all().delete()
models.Profile(
profile_id=uuid.uuid4(),
profile=input_data['profile'],
series=input_data['series'],
columns=input_data['columns'],
target=input_data['target'],
description=input_data['description'],
added_at=datetime.datetime.utcnow(),
expire_at=datetime.datetime(2101, 1, 1),
private=False,
locked=True
).save()
return flask.redirect(flask.url_for('route_admin'))
@app.route('/action/update_trees/')
@auth.login_required
def route_update_trees():
models.Tree.objects.all().delete()
models.Tree(
tree=tree.get_tree(),
source='KEGG',
description='KEGG BRITE Functional Hierarchies',
added_at=datetime.datetime.utcnow()
).save()
cache.clear()
sources = models.Tree.objects.aggregate(
{'$group': {'_id': '$source'}}
)
models.Profile.target.choices = models.Definition.source.choices = [source['_id'] for source in sources]
return flask.redirect(flask.url_for('route_admin'))
@app.route('/action/update_definitions/')
@auth.login_required
def route_update_definitions():
models.Definition.objects.all().delete()
models.Definition(
definition=crckm.get_definition(),
source='KEGG',
description='KEGG Module definitions',
added_at=datetime.datetime.utcnow()
).save()
return flask.redirect(flask.url_for('route_admin'))
@app.route('/action/update_annotation_mapping/')
@auth.login_required
def route_update_annotation_mapping():
f = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static/data/ortholog_mapping/external_annotation.map')
orto_mapping = {}
with open(f, 'rU') as mapping_file:
for line in mapping_file:
tokens = line.rstrip().split('\t', 1)
if tokens[0] not in orto_mapping:
orto_mapping[tokens[0]] = set()
orto_mapping[tokens[0]].add(tokens[1])
# drop current collection
models.AnnotationMapping.drop_collection()
# upload in batches
batch = []
target_size = 5000
for x in orto_mapping:
batch.append(models.AnnotationMapping(
annotation=x,
ko_map=orto_mapping[x]
))
if len(batch) == target_size:
models.AnnotationMapping.objects.insert(batch, load_bulk=False, signal_kwargs={'ordered': False} )
batch.clear()
# insert the remaning annotations
models.AnnotationMapping.objects.insert(batch, load_bulk=False, signal_kwargs={'ordered': False} )
models.AnnotationMapping.create_index('annotation')
return flask.redirect(flask.url_for('route_admin'))
@auth.get_password
def auth_get_password(username):
if username == app.config['FUNCTREE_ADMIN_USERNAME']:
return app.config['FUNCTREE_ADMIN_PASSWORD']
return None
@auth.error_handler
def auth_error_handler():
return flask.render_template('error.html', error=werkzeug.exceptions.Unauthorized()), 401
@app.errorhandler(400)
@app.errorhandler(401)
@app.errorhandler(403)
@app.errorhandler(404)
@app.errorhandler(405)
@app.errorhandler(408)
@app.errorhandler(500)
def route_error(error):
return flask.render_template('error.html', error=error), error.code
|
1664572
|
import os
from setuptools import find_packages, setup
NAME = 'django-lightning'
os.chdir(os.path.dirname(os.path.abspath(__file__)))
VERSION = '1.1.0-rc.2'
def get_install_require_packages():
"""获取依赖的安装包"""
with open('requirements.in', 'r') as file:
return [line
for line in file.readlines() if not line.startswith('http')]
# 可能会导致 Windows 安装有问题
# with open('README.zh-CN.md', 'r') as file:
# long_description = file.read()
def get_packages(app):
"""获取包"""
return [app] + [
"{}.{}".format(app, item) for item in find_packages(app)
]
all_packages = []
[all_packages.extend(item) for item in map(get_packages, [
'api_basebone',
'bsm_config',
'lightning',
'shield',
'storage',
'puzzle'
])]
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
setup(
name=NAME,
version=VERSION,
url='https://github.com/git-men/lightning',
author='git<EMAIL>.com',
author_email='<EMAIL>',
description='A Django based no-code Admin and rapid development framework',
long_description='A Django based no-code Admin and rapid development framework',
# long_description_content_type='text/markdown',
license='MIT',
packages=all_packages,
include_package_data=True,
data_files={},
install_requires=get_install_require_packages(),
dependency_links = [
"git+https://github.com/jeffkit/wechatpy/archive/v.18.13-work.zip",
],
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
1664573
|
import torch
from torch import optim
from torch.utils import data
from torch import nn
from torch.nn.functional import soft_margin_loss
import numpy as np
from .train_helpers import normalize, get_loss_weights, load_losses, save_losses
from .models import Relative_Positioning
import os.path as op
import os
root = op.dirname(__file__)
saved_models_dir = op.join(root, 'saved_models')
def train_ssl(train_dataset, test_dataset, n_epochs=20, lr=1e-3, batch_size=256, load_last_saved_model=False, num_workers=8):
C = train_dataset.__getitem__(0)[0].shape[1] # num channels
T = train_dataset.__getitem__(0)[0].shape[2] # num timepoints
model = Relative_Positioning(C, T, k=50, m=13, dropout_prob=0.5, embedding_dim=100, n_spatial_filters=8)
if load_last_saved_model:
model.load_state_dict(torch.load(op.join(root, 'saved_models', 'supervised_baseline_model.pt')))
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.cuda()
train_loader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
new_train_losses, new_test_losses = _train_epochs(model, train_loader, test_loader,
dict(epochs=n_epochs, lr=lr))
if load_last_saved_model:
train_losses, test_losses = load_losses(saved_models_dir, 'supervised_baseline')
else:
train_losses = []
test_losses = []
train_losses.extend(new_train_losses)
test_losses.extend(new_test_losses)
save_losses(train_losses, test_losses, saved_models_dir, 'supervised_baseline')
return train_losses, test_losses, model
def _train_epochs(model, train_loader, test_loader, train_args):
epochs, lr = train_args['epochs'], train_args['lr']
optimizer = optim.Adam(model.parameters(), lr=lr)
if not os.path.exists(saved_models_dir):
os.makedirs(saved_models_dir)
train_losses = []
test_losses = [_eval_loss(model, test_loader)]
for epoch in range(1, epochs+1):
model.train()
train_losses.extend(_train(model, train_loader, optimizer, epoch))
test_loss = _eval_loss(model, test_loader)
test_losses.append(test_loss)
print(f'Epoch {epoch}, Test loss {test_loss:.4f}')
# save model every 10 epochs
if epoch % 10 == 0:
torch.save(model.state_dict(), op.join(root, 'saved_models', 'supervised_baseline_model_epoch{}.pt'.format(epoch)))
torch.save(model.state_dict(), op.join(root, 'saved_models', 'supervised_baseline_model.pt'))
return train_losses, test_losses
def rp_loss(model, x, y):
out = model(x)
return soft_margin_loss(out, y)
def _train(model, train_loader, optimizer, epoch):
model.train()
train_losses = []
for pair in train_loader:
x, y = pair[0], pair[1]
x = x.cuda().float().contiguous()
y = y.cuda().float().contiguous()
loss = rp_loss(model, x, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
return train_losses
def _eval_loss(model, data_loader):
model.eval()
total_loss = 0
with torch.no_grad():
for pair in data_loader:
x, y = pair[0], pair[1]
x = x.cuda().float().contiguous()
y = y.cuda().float().contiguous()
loss = rp_loss(model, x, y)
total_loss += loss * x.shape[0]
avg_loss = total_loss / len(data_loader.dataset)
return avg_loss.item()
|
1664599
|
from __future__ import absolute_import
import boto3
import botocore
import inspect
import os
import zipfile
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from zappa.zappa import Zappa
class ZappaCommand(BaseCommand):
# Management command
can_import_settings = True
requires_system_checks = False
# Zappa settings
zappa = None
zappa_settings = None
api_stage = None
project_name = None
lambda_name = None
s3_bucket_name = None
settings_file = None
zip_path = None
vpc_config = None
memory_size = None
timeout = None
help = '''Deploy this project to AWS with Zappa.'''
def add_arguments(self, parser):
parser.add_argument('environment', nargs='+', type=str)
def __init__(self, *args, **kwargs):
super(ZappaCommand, self).__init__(*args, **kwargs)
self.zappa = Zappa()
def require_settings(self, args, options):
"""
Load the ZAPPA_SETTINGS as we expect it.
"""
if not options.has_key('environment'):
print(
"You must call deploy with an environment name. \n python manage.py deploy <environment>")
raise ImproperlyConfigured
from django.conf import settings
if not 'ZAPPA_SETTINGS' in dir(settings):
print(
"Please define your ZAPPA_SETTINGS in your settings file before deploying.")
raise ImproperlyConfigured
self.zappa_settings = settings.ZAPPA_SETTINGS
# Set your configuration
if type(options['environment']) == list:
self.api_stage = options['environment'][0]
else:
self.api_stage = options['environment']
if self.zappa_settings[self.api_stage].get('project_name'):
self.project_name = self.zappa_settings[self.api_stage]['project_name']
else:
self.project_name = os.path.abspath(settings.BASE_DIR).split(os.sep)[-1]
self.lambda_name = slugify(self.project_name + '-' + self.api_stage).replace("_","-")
if self.api_stage not in self.zappa_settings.keys():
print("Please make sure that the environment '" + self.api_stage +
"' is defined in your ZAPPA_SETTINGS in your settings file before deploying.")
raise ImproperlyConfigured
# Load environment-specific settings
self.s3_bucket_name = self.zappa_settings[self.api_stage]['s3_bucket']
self.vpc_config = self.zappa_settings[
self.api_stage].get('vpc_config', {})
self.memory_size = self.zappa_settings[
self.api_stage].get('memory_size', 512)
self.timeout = self.zappa_settings[
self.api_stage].get('timeout', 30)
custom_settings = [
'http_methods',
'parameter_depth',
'integration_response_codes',
'method_response_codes',
'role_name',
'aws_region'
]
for setting in custom_settings:
if self.zappa_settings[self.api_stage].has_key(setting):
setattr(self.zappa, setting, self.zappa_settings[
self.api_stage][setting])
def get_django_settings_file(self):
if not self.get_settings_location().startswith('s3://'):
self.settings_file = self.zappa_settings[
self.api_stage]['settings_file']
if '~' in self.settings_file:
self.settings_file = self.settings_file.replace(
'~', os.path.expanduser('~'))
self.check_settings_file()
else:
self.settings_file = self.download_from_s3(*self.parse_s3_url(self.get_settings_location()))
self.check_settings_file()
def check_settings_file(self):
"""
Checks whether the settings file specified is actually a file.
"""
if not os.path.isfile(self.settings_file):
print("Please make sure your settings_file is properly defined.")
raise ImproperlyConfigured
def get_settings_location(self):
"""
Returns the value of the settings file location as specified in the
json file.
:return:
"""
return self.zappa_settings[self.api_stage]['settings_file']
def download_from_s3(self,bucket_name,s3_key,
output_filename='temp_zappa_settings.py'):
"""
Download a file from S3
:param bucket_name: Name of the S3 bucket (string)
:param s3_key: Name of the file hosted on S3 (string)
:param output_filename: Name of the file the download operation
will create (string)
:return: False or the value of output_filename
"""
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
try:
s3.meta.client.head_object(Bucket=bucket_name,Key=s3_key)
except botocore.exceptions.ClientError:
return False
print(u'Downloading the settings file ({0}) from S3'.format(s3_key))
new_file = bucket.download_file(s3_key,output_filename)
return output_filename
def parse_s3_url(self,s3_url):
"""
Parse the S3 url. Format: s3://mybucket:path/to/my/key
Example: s3://settings-bucket:/production_settings.py
:param s3_url: Path to the file hosted on S3
:return:
"""
return s3_url.replace('s3://','').split(':')
def load_credentials(self):
session = None
profile_name = self.zappa_settings[self.api_stage].get('profile_name')
region_name = self.zappa_settings[self.api_stage].get('aws_region')
if profile_name is not None:
session = boto3.Session(profile_name=profile_name, region_name=region_name)
self.zappa.load_credentials(session)
def create_package(self):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[
0:-2]) + os.sep + 'handler.py'
exclude = self.zappa_settings[self.api_stage].get('exclude', []) + ['static', 'media']
self.zip_path = self.zappa.create_lambda_zip(
self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.zappa_settings[self.api_stage].get('use_precompiled_packages', True),
exclude=exclude
)
# Add this environment's Django settings to that zipfile
with open(self.settings_file, 'r') as f:
contents = f.read()
all_contents = contents
if not self.zappa_settings[self.api_stage].has_key('domain'):
script_name = self.api_stage
else:
script_name = ''
all_contents = all_contents + \
'\n# Automatically added by Zappa:\nSCRIPT_NAME=\'/' + script_name + '\'\n'
f.close()
with open('zappa_settings.py', 'w') as f:
f.write(all_contents)
with zipfile.ZipFile(self.zip_path, 'a') as lambda_zip:
lambda_zip.write('zappa_settings.py', 'zappa_settings.py')
lambda_zip.close()
os.unlink('zappa_settings.py')
def remove_s3_local_settings(self):
#Remove the settings file if downloaded from S3
if self.get_settings_location().startswith('s3://'):
os.remove(self.settings_file)
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.zappa_settings[self.api_stage].get('delete_zip', True):
os.remove(self.zip_path)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
# Finally, delete the local copy our zip package
self.remove_local_zip()
|
1664607
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BottleneckV2(nn.Module):
def __init__(self, in_channels,channels,ks, stride=1,upsample=False,downsample=False):
super(BottleneckV2, self).__init__()
self.bn1 = nn.BatchNorm1d(in_channels)
self.conv1 = nn.Conv1d(in_channels, channels//4, 1, stride=1, bias=False)
self.bn2 = nn.BatchNorm1d(channels//4)
self.conv2 = nn.Conv1d(channels//4,channels//4, ks, stride=stride, padding=ks//2,bias=False)
self.bn3 = nn.BatchNorm1d(channels//4)
self.conv3 = nn.Conv1d(channels//4,channels, 1, stride=1, bias=False)
if downsample:self.downsample = nn.Conv1d(in_channels,channels, 1, stride, bias=False)
else:self.downsample = None
if upsample:self.upsample = nn.Conv1d(in_channels, channels, 1, stride, bias=False)
else:self.upsample = None
def forward(self,x):
residual = x
x = self.bn1(x)
x = F.relu(x)
if self.downsample:residual = self.downsample(x)
if self.upsample:residual = self.upsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv3(x)
#print(x.shape,residual.shape)
return x + residual
class BasicBlockV2(nn.Module):
def __init__(self, in_channels,channels,ks, stride=1,upsample=False,downsample=False):
super(BasicBlockV2, self).__init__()
self.bn1 = nn.BatchNorm1d(in_channels)
self.conv1 = nn.Conv1d(in_channels,channels, ks, stride=stride, padding=ks//2,bias=False)
self.bn2 = nn.BatchNorm1d(channels)
self.conv2 = nn.Conv1d(channels,channels, ks, stride=stride, padding=ks//2,bias=False)
if downsample:self.downsample = nn.Conv1d(in_channels,channels, 1, stride, bias=False)
else:self.downsample = None
if upsample:self.upsample = nn.Conv1d(in_channels, channels, 1, stride, bias=False)
else:self.upsample = None
def forward(self, x):
residual = x
x = self.bn1(x)
x = F.relu(x)
if self.downsample:residual = self.downsample(x)
if self.upsample:residual = self.upsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv2(x)
return x + residual
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1)
return x * y
class SEBasicBlock(nn.Module):
def __init__(self, in_channels,channels,ks, stride=1,upsample=False,downsample=False):
super(SEBasicBlock, self).__init__()
self.bn1 = nn.BatchNorm1d(in_channels)
self.conv1 = nn.Conv1d(in_channels,channels, ks, stride=stride, padding=ks//2,bias=False)
self.bn2 = nn.BatchNorm1d(channels)
self.conv2 = nn.Conv1d(channels,channels, ks, stride=stride, padding=ks//2,bias=False)
self.se = SELayer(channels, reduction=16)
if downsample:self.downsample = nn.Conv1d(in_channels,channels, 1, stride, bias=False)
else:self.downsample = None
if upsample:self.upsample = nn.Conv1d(in_channels, channels, 1, stride, bias=False)
else:self.upsample = None
def forward(self, x):
residual = x
x = self.bn1(x)
x = F.relu(x)
if self.downsample:residual = self.downsample(x)
if self.upsample:residual = self.upsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv2(x)
x = self.se(x)
return x + residual
|
1664631
|
import types
from pycsp3.tools.curser import queue_in
class Diagram:
_cnt = 0
_cache = {}
def __init__(self, transitions):
self.transitions = Diagram._add_transitions(transitions)
self.states = sorted({q for (q, _, _) in self.transitions} | {q for (_, _, q) in self.transitions})
self.num = Diagram._cnt
Diagram._cnt += 1
def __contains__(self, other):
queue_in.append((self, other))
return True
def __str__(self):
return "transitions=" + str(self.transitions)
MSG_STATE = "states must given under the form of strings"
@staticmethod
def _add_transitions(transitions):
assert len(transitions) > 0, "at least one transition must be present"
assert isinstance(transitions, (list, set))
t = []
for transition in transitions:
if isinstance(transition, list):
transition = tuple(transition)
assert isinstance(transition, tuple), "A transition must be given under the form of a 3-tuple (or a list)"
assert len(transition) == 3, "Error: each transition must be composed of 3 elements"
state1, state2 = transition[0], transition[2]
assert isinstance(state1, str) and isinstance(state2, str), Diagram.MSG_STATE
values = transition[1] if isinstance(transition[1], (list, tuple, set, range)) else [transition[1]]
for value in values:
assert isinstance(value, int), "currently, the value of a transition is necessarily an integer"
t.append((state1, value, state2))
return t
def transitions_to_string(self):
if self.num not in Diagram._cache:
Diagram._cache[self.num] = "".join(["(" + q1 + "," + str(v) + "," + q2 + ")" for (q1, v, q2) in self.transitions])
return Diagram._cache[self.num]
class Automaton(Diagram):
@staticmethod
def q(i, j=None):
return "q" + str(i) + ("" if j is None else "x" + str(j))
def __init__(self, *, start, transitions, final):
super().__init__(transitions)
self.start = start
self.final = [final] if isinstance(final, str) else sorted(q for q in set(final) if q in self.states)
assert isinstance(self.start, str) and all(isinstance(f, str) for f in self.final), Diagram.MSG_STATE
class MDD(Diagram):
def __init__(self, transitions):
if isinstance(transitions, types.GeneratorType):
transitions = [t for t in transitions]
assert isinstance(transitions, list) # currently, a list is wanted for a MDD (and not a set); to be changed?
super().__init__(transitions)
|
1664691
|
from __future__ import print_function
import numpy as np
import gluoncv as gcv
import mxnet as mx
from mxnet import autograd, gluon
from gluoncv.utils import download, viz
def test_voc07_metric_difficult():
url = 'https://apache-mxnet.s3-accelerate.amazonaws.com/gluon/dataset/pikachu/train.rec'
idx_url = 'https://apache-mxnet.s3-accelerate.amazonaws.com/gluon/dataset/pikachu/train.idx'
download(url, path='pikachu_train.rec', overwrite=False)
download(idx_url, path='pikachu_train.idx', overwrite=False)
classes = ['pikachu']
dataset = gcv.data.RecordFileDetection('pikachu_train.rec')
net = gcv.model_zoo.get_model('yolo3_darknet53_custom', classes=classes,
pretrained_base=False, transfer='voc')
def get_dataloader(val_dataset, data_shape, batch_size, num_workers):
from gluoncv.data.batchify import Tuple, Stack, Pad
from gluoncv.data.transforms.presets.yolo import YOLO3DefaultValTransform
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
val_loader = gluon.data.DataLoader(
val_dataset.transform(YOLO3DefaultValTransform(data_shape, data_shape)),
batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
return val_loader
val_data = get_dataloader(dataset, 416, 16, 0)
try:
a = mx.nd.zeros((1,), ctx=mx.gpu(0))
ctx = [mx.gpu(0)]
except:
ctx = [mx.cpu()]
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=classes)
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
eval_metric.reset()
# set nms threshold and topk constraint
net.set_nms(nms_thresh=0.45, nms_topk=400)
mx.nd.waitall()
net.hybridize()
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y in zip(data, label):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults)
break
return eval_metric.get()
net.collect_params().reset_ctx(ctx)
validate(net, val_data, ctx, val_metric)
if __name__ == '__main__':
import nose
nose.runmodule()
|
1664714
|
import dns.message
import dns.query
import dns.rdatatype
import sys
def resolve(domain):
# Start at the root nameserver
nameserver = "192.168.3.11"
while True:
reply = query(domain, nameserver)
ip = get_answer(reply)
if ip:
# Best case: we get an answer to our query and we're done
return ip
nameserver_ip = get_glue(reply)
if nameserver_ip:
# Second best: we get a "glue record" with the *IP address* of another nameserver to query
nameserver = nameserver_ip
else:
# Otherwise: we get the *domain name* of another nameserver to query, which we can look up the IP for
nameserver_domain = get_nameserver(reply)
nameserver = resolve(nameserver_domain)
def query(name, nameserver):
query = dns.message.make_query(name, 'A')
return dns.query.udp(query, nameserver)
def get_answer(reply):
for record in reply.answer:
if record.rdtype == dns.rdatatype.A:
return record[0].address
def get_glue(reply):
for record in reply.additional:
if record.rdtype == dns.rdatatype.A:
return record[0].address
def get_nameserver(reply):
for record in reply.authority:
if record.rdtype == dns.rdatatype.NS:
return record[0].target
print(resolve(sys.argv[1]))
|
1664750
|
import unittest
from email_scraper.scrape import extract_emails, deobfuscate_html, scrape_emails
class TestExtractor(unittest.TestCase):
def test_basic(self):
self.assertEqual(extract_emails('hello world'), [])
self.assertEqual(extract_emails('hello <EMAIL> world'), ['<EMAIL>'])
self.assertEqual(extract_emails('<EMAIL> <EMAIL>'), ['<EMAIL>', '<EMAIL>'])
self.assertEqual(extract_emails('<EMAIL> <EMAIL>'), ['<EMAIL>', '<EMAIL>'])
self.assertEqual(extract_emails('<EMAIL>,<EMAIL>'), ['<EMAIL>', '<EMAIL>'])
self.assertEqual(extract_emails('hello <EMAIL>. i have been waiting for you.'), ['<EMAIL>'])
def test_basic_html(self):
self.assertEqual(extract_emails('<a href="mailto:<EMAIL>">boo</a>'), ['<EMAIL>'])
self.assertEqual(extract_emails('<a href=\'mailto:<EMAIL>\'>boo</a>'), ['<EMAIL>'])
self.assertEqual(extract_emails('<a href="mailto:<EMAIL>?subject=meh">boo</a>'), ['<EMAIL>'])
def test_tlds(self):
self.assertEqual(extract_emails('<EMAIL>'), ['<EMAIL>'])
self.assertEqual(extract_emails('<EMAIL>'), ['<EMAIL>'])
self.assertEqual(extract_emails('<EMAIL>'), [])
class TestDeobfuscate(unittest.TestCase):
def test_entities(self):
self.assertEqual(deobfuscate_html('yourname@dom'
'ain.com'), '<EMAIL>')
def test_atob(self):
atob = 'atob(\'bWFpbHRvOmVtYWlsQGV4YW1wbGUuY29t\')'
self.assertEqual(deobfuscate_html(atob), 'mailto:<EMAIL>')
class TestHidden(unittest.TestCase):
def test_hidden(self):
self.assertEqual(
extract_emails("foo johnsmith (at) yahoo (dot) com bar"),
["<EMAIL>"]
)
class TestScraping(unittest.TestCase):
def test_basic(self):
html = """<html>
<body>
<a href="mailto:<EMAIL>"><EMAIL></a>
</body>
</html>"""
self.assertEqual(scrape_emails(html), {'<EMAIL>', '<EMAIL>'})
def test_atob(self):
atob = '<a href="javascript:window.location.href=atob(\'bWFpbHRvOmVtYWlsQGV4YW1wbGUuY29t\')">E-Mail</a>'
self.assertEqual(scrape_emails(atob), {'<EMAIL>'})
def test_entities(self):
html = """<p>For more information, send email to <A HREF="mailto:
yourname@domain.com">
yourname@domain.com
</A></p>"""
self.assertEqual(scrape_emails(html), {'<EMAIL>'})
|
1664767
|
import sqlite3
import datetime
def hash0x_to_bytes(hash0x):
return bytearray.fromhex(hash0x[2:])
def build_rows(responses):
for response in responses:
block_number = response['id']
block = response['result']
yield (
block_number,
int(block['timestamp'], 16),
hash0x_to_bytes(block['miner']),
hash0x_to_bytes(block['extraData']))
def decode_extra_data(e):
try:
return e.decode('utf-8')
except:
return e.decode('latin-1')
class Block:
def __init__(self, block_number, timestamp, miner, extra_data):
self.block_number = block_number
self.timestamp = timestamp
self.miner = miner
self.extra_data = extra_data
def __repr__(self):
return str(self.block_number)
def extra_data_decoded(self):
return decode_extra_data(self.extra_data)
def get_datetime(self):
return datetime.datetime.fromtimestamp(self.timestamp)
class BlockIndex:
def __init__(self, db_file='cache/block_index.sqlite3', read_only=False):
flags = '?mode=ro' if read_only else ''
self.db = sqlite3.connect(f'file:{db_file}{flags}', uri=True)
# self.db.execute('PRAGMA journal_mode=wal')
cmd = 'CREATE TABLE IF NOT EXISTS extra_data \
(block_number INTEGER PRIMARY KEY, \
timestamp INTEGER, \
miner BLOB, \
extra_data BLOB)'
self.db.execute(cmd)
def __del__(self):
self.db.close()
def execute(self, query):
return self.db.cursor().execute(query)
def list_field(self, field, ordered=False):
query = f'SELECT {field} FROM extra_data'
for row in self.execute(query):
yield row[0]
def list_field_unique(self, field):
query = f'SELECT DISTINCT {field} FROM extra_data'
for row in self.execute(query):
yield row[0]
def insert_blocks(self, responses):
query = f'INSERT OR REPLACE INTO extra_data VALUES (?, ?, ?, ?)'
self.db.cursor().executemany(query, build_rows(responses))
self.db.commit()
def latest_block(self):
query = 'SELECT MAX(block_number) FROM extra_data'
return self.execute(query).fetchone()[0]
def list_blocks(self, skip_genesis=False):
query = f'SELECT * FROM extra_data ORDER BY block_number ASC'
if skip_genesis:
query += ' LIMIT -1 OFFSET 1'
for row in self.execute(query):
yield Block(*row)
|
1664788
|
from typing import Dict, List
from geojson_pydantic.geometries import Point
from opennem.db.models.opennem import Station
from .schema import FacilityFeature, FacilityGeo
def stations_to_geojson(stations: List[Station]) -> FacilityGeo:
features = []
for station in stations:
if not station.location:
continue
if not station.facilities or len(station.facilities) < 1:
continue
feature_dict: Dict = dict(properties=dict())
feature_dict["properties"] = {
"station_id": station.id,
"station_code": station.code,
"facility_id": station.code,
"network": station.facilities[0].network.label,
"network_country": station.facilities[0].network.country,
"state": station.location.state,
"postcode": station.location.postcode,
"name": station.name,
"capacity_registered": station.capacity_registered,
# "capacity_aggregate": station.capacity_aggregate,
"duid_data": [],
}
if station.location.osm_way_id:
feature_dict["properties"]["osm_way_id"] = station.location.osm_way_id
for facility in station.facilities:
if not facility.fueltech:
continue
feature_dict["properties"]["duid_data"].append(
{
# "oid": facility.oid,
# "duid": facility.duid,
"fuel_tech": facility.fueltech.code,
"fuel_tech_label": facility.fueltech.label,
"fuel_tech_renewable": facility.fueltech.renewable,
"commissioned_date": facility.registered,
"decommissioned_date": facility.deregistered,
"status": facility.status.code,
"status_label": facility.status.label,
"unit_id": facility.unit_id,
"unit_number": facility.unit_number,
"unit_size": facility.unit_capacity,
"unit_alias": facility.unit_alias,
# capacities for the unit
"capacity_registered": facility.capacity_registered,
# "capacity_aggregate": facility.capacity_aggregate,
# network specific fields (DUID is one)
"network_region": facility.network_region,
}
)
feature = FacilityFeature(**feature_dict)
if station.location and station.location.geom:
geom = Point(coordinates=(station.location.lng, station.location.lat))
if not feature.geometry:
feature.geometry = geom
features.append(feature)
crs = {
"type": "name",
"properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"},
}
geo = FacilityGeo(features=features, crs=crs, name="opennem.facilities")
return geo
|
1664791
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import argparse
import math
import numpy as np
import random
from job import Job
from job_table import JobTable
import utils
def generate_interarrival_time(rng, lam):
return -math.log(1.0 - rng.random()) * lam
def generate_duration(durations, rng):
return 3600 * rng.choice(durations)
def generate_scale_factor(rng):
scale_factor = 1
r = rng.uniform(0, 1)
if 0.7 <= r <= 0.8:
scale_factor = 2
elif 0.8 <= r:
scale_factor = 4
return scale_factor
def main(args):
job_generator = random.Random()
job_generator.seed(args.seed)
interarrival_time_generator = random.Random()
interarrival_time_generator.seed(args.seed + 1)
duration_generator = random.Random()
duration_generator.seed(args.seed + 2)
scale_factor_generator = random.Random()
scale_factor_generator.seed(args.seed + 3)
throughputs = utils.read_all_throughputs_json_v2(args.throughputs_file)
durations = np.linspace(args.min_duration, args.max_duration,
args.num_durations)
duration_generator_func = lambda rng: generate_duration(durations, rng)
prev_arrival_time = None
with open(args.output_file, 'w') as f:
for i in range(args.num_jobs):
job = utils.generate_job(
throughputs=throughputs,
reference_worker_type='v100',
rng=job_generator,
job_id=None,
fixed_job_duration=None,
generate_multi_gpu_jobs=args.generate_multi_gpu_jobs,
generate_multi_priority_jobs=args.generate_multi_priority_jobs,
scale_factor_generator_func=generate_scale_factor,
duration_generator_func=duration_generator_func,
scale_factor_rng=scale_factor_generator,
duration_rng=duration_generator,
always_generate_scale_factor=False)
if prev_arrival_time is None:
arrival_time = 0
elif args.lam > 0:
interarrival_time = \
generate_interarrival_time(interarrival_time_generator,
args.lam)
arrival_time = prev_arrival_time + interarrival_time
prev_arrival_time = arrival_time
f.write('%s\t%d\n' % (str(job), arrival_time))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Generate synthetic trace')
parser.add_argument('--num_jobs', type=int, required=True,
help='Number of jobs to generate')
parser.add_argument('-l', '--lam', type=float, default=0.0,
help='Lambda for Poisson arrival rate')
parser.add_argument('--seed', type=int, default=0,
help='Random seed')
parser.add_argument('--throughputs_file', type=str,
default=('simulation_throughputs.json'),
help='Oracle throughputs file')
parser.add_argument('-a', '--min_duration', type=float, default=1,
help='Minimum job duration in hours')
parser.add_argument('-b', '--max_duration', type=float, default=4,
help='Maximum job duration in hours')
parser.add_argument('-n', '--num_durations', type=int, default=4,
help='Number of possible job durations')
parser.add_argument('-m', '--generate-multi-gpu-jobs', action='store_true',
default=False,
help=('If set, generates multi-GPU jobs according to '
'a pre-defined distribution'))
parser.add_argument('--generate-multi-priority-jobs', action='store_true',
default=False,
help=('If set, generates some jobs with higher priority'))
parser.add_argument('--output_file', type=str, required=True,
help='Output file name')
args = parser.parse_args()
main(args)
|
1664812
|
from __future__ import annotations
from typing import Optional
from jsonclasses import jsonclass, types
@jsonclass
class SimpleRange:
i: Optional[int] = types.int.range(1, 10)
ic: Optional[int] = types.int.range(lambda: 1, lambda: 10)
it: Optional[int] = types.int.range(types.default(1), types.default(10))
|
1664820
|
from __future__ import unicode_literals
from ddf import DDFManager, DDF_HOME
dm = DDFManager('spark')
dm.sql('set hive.metastore.warehouse.dir=/tmp/hive/warehouse', False)
dm.sql('drop table if exists mtcars', False)
dm.sql("CREATE TABLE mtcars (mpg double, cyl int, disp double, hp int, drat double, wt double,"
" qesc double, vs int, am int, gear int, carb string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '", False)
dm.sql("LOAD DATA LOCAL INPATH '" + DDF_HOME + "/resources/test/mtcars' INTO TABLE mtcars", False)
ddf = dm.sql2ddf('select * from mtcars', False)
print('Columns: ' + ', '.join(ddf.colnames))
print('Number of columns: {}'.format(ddf.cols))
print('Number of rows: {}'.format(ddf.rows))
print(ddf.summary())
print(ddf.head(2))
print(ddf.aggregate(['sum(mpg)', 'min(hp)'], ['vs', 'am']))
print(ddf.five_nums())
print(ddf.sample(3))
dm.shutdown()
|
1664904
|
from django.conf import settings
ADMIN_VIEWS_SITE = getattr(settings, 'ADMIN_VIEWS_SITE', 'django.contrib.admin.site')
|
1664907
|
import os
class Config:
DB_SERVER_NAME = os.environ.get("DB_SERVER_NAME")
DB_USERNAME = os.environ.get("DB_USERNAME")
DB_PASSWORD = <PASSWORD>("DB_PASSWORD")
|
1664930
|
from fastapi import APIRouter
from . import groups
groups_router = APIRouter()
groups_router.include_router(groups.router)
|
1664948
|
import os
import re
from pathlib import Path
import music_tag
from telegram import ReplyKeyboardMarkup
from telegram.ext import CallbackContext
from models.admin import Admin
from models.user import User
from utils.lang import keys
def translate_key_to(key: str, destination_lang: str) -> str:
"""Find the specified key in the `keys` dictionary and returns the corresponding
value for the given language
**Keyword arguments:**
- file_path (str) -- The file path of the file to delete
**Returns:**
- The value of the requested key in the dictionary
"""
if key not in keys:
raise KeyError("Specified key doesn't exist")
return keys[key][destination_lang]
def delete_file(file_path: str) -> None:
"""Deletes a file from the filesystem. Simply ignores the files that don't exist.
**Keyword arguments:**
- file_path (str) -- The file path of the file to delete
"""
if os.path.exists(file_path):
os.remove(file_path)
def generate_music_info(tag_editor_context: dict) -> str:
"""Generate the details of the music based on the values in `tag_editor_context`
dictionary
**Keyword arguments:**
- tag_editor_context (dict) -- The context object of the user
**Returns:**
`str`
"""
ctx = tag_editor_context
return (
f"*🗣 Artist:* {ctx['artist'] if ctx['artist'] else '-'}\n"
f"*🎵 Title:* {ctx['title'] if ctx['title'] else '-'}\n"
f"*🎼 Album:* {ctx['album'] if ctx['album'] else '-'}\n"
f"*🎹 Genre:* {ctx['genre'] if ctx['genre'] else '-'}\n"
f"*📅 Year:* {ctx['year'] if ctx['year'] else '-'}\n"
f"*💿 Disk Number:* {ctx['disknumber'] if ctx['disknumber'] else '-'}\n"
f"*▶️ Track Number:* {ctx['tracknumber'] if ctx['tracknumber'] else '-'}\n"
"{}\n"
)
def increment_usage_counter_for_user(user_id: int) -> int:
"""Increment the `number_of_files_sent` column of user with the specified `user_id`.
**Keyword arguments:**
- user_id (int) -- The user id of the user
**Returns:**
The new value for `user.number_of_files_sent`
"""
user = User.where('user_id', '=', user_id).first()
if user:
user.number_of_files_sent = user.number_of_files_sent + 1
user.push()
return user.number_of_files_sent
raise LookupError(f'User with id {user_id} not found.')
def is_user_admin(user_id: int) -> bool:
"""Check if the user with `user_id` is admin or not.
**Keyword arguments:**
- user_id (int) -- The user id of the user
**Returns:**
`bool`
"""
admin = Admin.where('admin_user_id', '=', user_id).first()
return bool(admin)
def is_user_owner(user_id: int) -> bool:
"""Check if the user with `user_id` is owner or not.
**Keyword arguments:**
- user_id (int) -- The user id of the user
**Returns:**
`bool`
"""
owner = Admin.where('admin_user_id', '=', user_id).where('is_owner', '=', True).first()
return owner.is_owner if owner else False
def reset_user_data_context(context: CallbackContext) -> None:
user_data = context.user_data
language = user_data['language'] if ('language' in user_data) else 'en'
if 'music_path' in user_data:
delete_file(user_data['music_path'])
if 'art_path' in user_data:
delete_file(user_data['art_path'])
if 'new_art_path' in user_data:
delete_file(user_data['new_art_path'])
new_user_data = {
'tag_editor': {},
'music_path': '',
'music_duration': 0,
'art_path': '',
'new_art_path': '',
'current_active_module': '',
'music_message_id': 0,
'language': language,
}
context.user_data.update(new_user_data)
def save_text_into_tag(
value: str,
current_tag: str,
context: CallbackContext,
is_number: bool = False
) -> None:
"""Store a value of the given tag in the corresponding context.
**Keyword arguments:**
- value (str) -- The value to be stored as the value of `current_tag`
- current_tag (str) -- The key to store the value into
- context (CallbackContext) -- The context of a user to store the key:value pair into
"""
if is_number:
if isinstance(int(value), int):
context.user_data['tag_editor'][current_tag] = value
else:
context.user_data['tag_editor'][current_tag] = 0
else:
context.user_data['tag_editor'][current_tag] = value
def create_user_directory(user_id: int) -> str:
"""Create a directory for a user with a given id.
**Keyword arguments:**
- user_id (int) -- The user id of the user
**Returns:**
The path of the created directory
"""
user_download_dir = f"downloads/{user_id}"
try:
Path(user_download_dir).mkdir(parents=True, exist_ok=True)
except (OSError, FileNotFoundError, BaseException) as error:
raise Exception(f"Can't create directory for user_id: {user_id}") from error
return user_download_dir
def convert_seconds_to_human_readable_form(seconds: int) -> str:
"""Convert seconds to human readable time format, e.g. 02:30
**Keyword arguments:**
- seconds (int) -- Seconds to convert
**Returns:**
Formatted string
"""
if seconds <= 0:
return "00:00"
minutes = int(seconds / 60)
remainder = seconds % 60
minutes_formatted = str(minutes) if minutes >= 10 else "0" + str(minutes)
seconds_formatted = str(remainder) if remainder >= 10 else "0" + str(remainder)
return f"{minutes_formatted}:{seconds_formatted}"
def download_file(user_id: int, file_to_download, file_type: str, context: CallbackContext) -> str:
"""Download a file using convenience methods of "python-telegram-bot"
**Keyword arguments:**
- user_id (int) -- The user's id
- file_to_download (*) -- The file object to download
- file_type (str) -- The type of the file, either 'photo' or 'audio'
- context (CallbackContext) -- The context object of the user
**Returns:**
The path of the downloaded file
"""
user_download_dir = f"downloads/{user_id}"
file_id = ''
file_extension = ''
if file_type == 'audio':
file_id = context.bot.get_file(file_to_download.file_id)
file_name = file_to_download.file_name
file_extension = file_name.split(".")[-1]
elif file_type == 'photo':
file_id = context.bot.get_file(file_to_download.file_id)
file_extension = 'jpg'
file_download_path = f"{user_download_dir}/{file_id.file_id}.{file_extension}"
try:
file_id.download(f"{user_download_dir}/{file_id.file_id}.{file_extension}")
except ValueError as error:
raise Exception(f"Couldn't download the file with file_id: {file_id}") from error
return file_download_path
def generate_back_button_keyboard(language: str) -> ReplyKeyboardMarkup:
"""Create an return an instance of `back_button_keyboard`
**Keyword arguments:**
- language (str) -- The desired language to generate labels
**Returns:**
ReplyKeyboardMarkup instance
"""
return (
ReplyKeyboardMarkup(
[
[translate_key_to('BTN_BACK', language)],
],
resize_keyboard=True,
one_time_keyboard=True,
)
)
def generate_start_over_keyboard(language: str) -> ReplyKeyboardMarkup:
"""Create an return an instance of `start_over_keyboard`
**Keyword arguments:**
- language (str) -- The desired language to generate labels
**Returns:**
ReplyKeyboardMarkup instance
"""
return (
ReplyKeyboardMarkup(
[
[translate_key_to('BTN_NEW_FILE', language)],
],
resize_keyboard=True,
one_time_keyboard=True,
)
)
def generate_module_selector_keyboard(language: str) -> ReplyKeyboardMarkup:
"""Create an return an instance of `module_selector_keyboard`
**Keyword arguments:**
- language (str) -- The desired language to generate labels
**Returns:**
ReplyKeyboardMarkup instance
"""
return (
ReplyKeyboardMarkup(
[
[
translate_key_to('BTN_TAG_EDITOR', language),
translate_key_to('BTN_MUSIC_TO_VOICE_CONVERTER', language)
],
[
translate_key_to('BTN_MUSIC_CUTTER', language),
translate_key_to('BTN_BITRATE_CHANGER', language)
]
],
resize_keyboard=True,
one_time_keyboard=True,
)
)
def generate_tag_editor_keyboard(language: str) -> ReplyKeyboardMarkup:
"""Create an return an instance of `tag_editor_keyboard`
**Keyword arguments:**
- language (str) -- The desired language to generate labels
**Returns:**
ReplyKeyboardMarkup instance
"""
return (
ReplyKeyboardMarkup(
[
[
translate_key_to('BTN_ARTIST', language),
translate_key_to('BTN_TITLE', language),
translate_key_to('BTN_ALBUM', language)
],
[
translate_key_to('BTN_GENRE', language),
translate_key_to('BTN_YEAR', language),
translate_key_to('BTN_ALBUM_ART', language)
],
[
translate_key_to('BTN_DISK_NUMBER', language),
translate_key_to('BTN_TRACK_NUMBER', language)
],
[
translate_key_to('BTN_BACK', language)
]
],
resize_keyboard=True,
)
)
def save_tags_to_file(file: str, tags: dict, new_art_path: str) -> str:
"""Create an return an instance of `tag_editor_keyboard`
**Keyword arguments:**
- file (str) -- The path of the file
- tags (str) -- The dictionary containing the tags and their values
- new_art_path (str) -- The new album art to set
**Returns:**
The path of the file
"""
music = music_tag.load_file(file)
try:
if new_art_path:
with open(new_art_path, 'rb') as art:
music['artwork'] = art.read()
except OSError as error:
raise Exception("Couldn't set hashtags") from error
music['artist'] = tags['artist'] if tags['artist'] else ''
music['title'] = tags['title'] if tags['title'] else ''
music['album'] = tags['album'] if tags['album'] else ''
music['genre'] = tags['genre'] if tags['genre'] else ''
music['year'] = int(tags['year']) if tags['year'] else 0
music['disknumber'] = int(tags['disknumber']) if tags['disknumber'] else 0
music['tracknumber'] = int(tags['tracknumber']) if tags['tracknumber'] else 0
music.save()
return file
def parse_cutting_range(text: str) -> (int, int):
text = re.sub(' ', '', text)
beginning, _, ending = text.partition('-')
if '-' not in text:
raise ValueError('Malformed music range')
if ':' in text:
beginning_sec = int(beginning.partition(':')[0].lstrip('0') if
beginning.partition(':')[0].lstrip('0') else 0) * 60 \
+ int(beginning.partition(':')[2].lstrip('0') if
beginning.partition(':')[2].lstrip('0') else 0)
ending_sec = int(ending.partition(':')[0].lstrip('0') if
ending.partition(':')[0].lstrip('0') else 0) * 60 \
+ int(ending.partition(':')[2].lstrip('0') if
ending.partition(':')[2].lstrip('0') else 0)
else:
beginning_sec = int(beginning)
ending_sec = int(ending)
return beginning_sec, ending_sec
def pretty_print_size(number_of_bytes: float) -> str:
"""Pretty print file sizes
**Keyword arguments:**
- number_of_bytes (float) -- Number of bytes to convert
**Returns:**
A human-readable file size
"""
units = [
(1 << 50, ' PB'),
(1 << 40, ' TB'),
(1 << 30, ' GB'),
(1 << 20, ' MB'),
(1 << 10, ' KB'),
(1, (' byte', ' bytes')),
]
for factor, suffix in units:
if number_of_bytes >= factor:
break
amount = int(number_of_bytes / factor)
if isinstance(suffix, tuple):
singular, multiple = suffix
if amount == 1:
suffix = singular
else:
suffix = multiple
return str(amount) + suffix
def get_dir_size_in_bytes(dir_path: str) -> float:
"""Return the size of a directory and its sub-directories in bytes
**Keyword arguments:**
- dir_path (str) -- The path of the directory
**Returns:**
Size of the directory
"""
root_directory = Path(dir_path)
return sum(f.stat().st_size for f in root_directory.glob('**/*') if f.is_file())
|
1664956
|
from __future__ import absolute_import
import numpy as np
from pyti import catch_errors
from pyti.function_helper import fill_for_noncomputable_vals
from six.moves import range
def vertical_horizontal_filter(data, period):
"""
Vertical Horizontal Filter.
Formula:
ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1))
"""
catch_errors.check_for_period_error(data, period)
vhf = [abs(np.max(data[idx+1-period:idx+1]) -
np.min(data[idx+1-period:idx+1])) /
sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))]
vhf = fill_for_noncomputable_vals(data, vhf)
return vhf
|
1664961
|
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as sch
from .metrics import get_corr_dist
from .quasi import get_quasi_diag
def get_rec_bipart(cov, sort_idx):
"""Compute portfolio weight by recursive bisection
Params
------
cov: pd.DataFrame
sort_idx: pd.Series
Sorted index by quasi diagonalization
Returns
-------
pd.Series
"""
weight = pd.Series(1, index=sort_idx)
# Initialize all in one cluster
cl_items = [sort_idx]
while len(cl_items) > 0:
cl_items_ = []
for cl in cl_items:
# Split into half for each cluter
if len(cl) >= 2:
cl_items_.append(cl[0:len(cl) // 2])
cl_items_.append(cl[len(cl) // 2:len(cl)])
# Update cluster
cl_items = cl_items_
for i in range(0, len(cl_items), 2):
cl0 = cl_items[i]
cl1 = cl_items[i + 1]
var0 = get_cluster_var(cov, cl0)
var1 = get_cluster_var(cov, cl1)
alpha = var1 / (var0 + var1)
weight[cl0] *= alpha
weight[cl1] *= 1 - alpha
return weight
def get_ivp(cov):
"""Compute inverse variance portfolio
Params
------
cov: pd.DataFrame
Returns
-------
np.array
"""
ivp = 1. / np.diag(cov)
ivp /= ivp.sum()
return ivp
def get_cluster_var(cov, cl_items):
"""Compute variance per cluster
Params
------
cov: pd.DataFrame
cl_items: pd.Series
Returns
-------
float
"""
cov_cl = cov.loc[cl_items, cl_items]
w = get_ivp(cov_cl).reshape(-1, 1)
cl_var = np.dot(np.dot(w.T, cov_cl), w)[0, 0]
return cl_var
def get_hrp(cov, corr):
"""Construct a hierarchical portfolio
Params
------
cov: pd.DataFrame
corr: pd.DataFrame
Returns
-------
pd.Series
"""
dist = get_corr_dist(corr)
link = sch.linkage(dist, 'single')
sort_idx = get_quasi_diag(link)
# Recover label
sort_idx = corr.index[sort_idx].tolist()
hrp = get_rec_bipart(cov, sort_idx)
return hrp.sort_index()
|
1664979
|
from django.contrib.postgres.fields import JSONField
from django.db import models
from supportal.app.models.base_model_mixin import BaseModelMixin
class EmailSend(BaseModelMixin):
INVITE_EMAIL = "switchboard_invite_email"
EXPIRING_PROSPECTS = "expiring_contacts_email"
INACTIVE_USER_EMAIL = "switchboard_inactive_user_email"
BLAST_EMAIL = "switchboard_blast_send"
VERIFIED_EMAIL = "switchboard_verified_email"
EMAIL_CHOICES = [
(INVITE_EMAIL, "Invite Email"),
(EXPIRING_PROSPECTS, "Expiring Prospects"),
(INACTIVE_USER_EMAIL, "Invite Inactive Users"),
(BLAST_EMAIL, "Blast Email"),
(VERIFIED_EMAIL, "User Verified"),
]
user = models.ForeignKey(
"app.User", on_delete=models.CASCADE, related_name="email_sends"
)
template_name = models.CharField(
choices=EMAIL_CHOICES, db_index=True, max_length=250
)
payload = JSONField(null=True)
|
1664988
|
from django.utils.http import urlencode
#django <2 compat
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from .exceptions import InvalidActionError
from .exceptions import InvalidControllerError
class ApplicationHelper(object):
"""ApplicationHelpers can contain functions useful in a controller. Each controller is assigned a helper.
Either the global ApplicationHelper, or a class with the same name as the controller such as foo_helper.py,
and being a subclass of ApplicationHelper."""
def __init__(self, controller):
self.controller = controller
def url_for(self, controller = None, action = None, named_url = None, url_params = None, url_args=None, url_kwargs=None):
"""
:param controller: a controller name in snake case, without prefix, if you are not redirecting inside the current controller
:param action: an action name - if a controller is not specified, the action will be in the current controller
:param named_url: A named URL in django
:param url_params: The query string params
:param url_args: the list arguments for this URL, this will be used to build the URL within django's urlresolvers
:param url_kwargs: the dict arguments for this URL, this will be used to build the URL within django's urlresolvers
:return:
"""
if not named_url:
from .controller import get_actions, get_controller_name
if controller:
controllerClassOrInstance = self.controller._site.controllers.get(controller, None)
if controllerClassOrInstance is None:
raise InvalidControllerError(controller)
controller_name = controller
else:
controllerClassOrInstance = self.controller
controller_name = self.controller._controller_name
if action:
try:
action = action.strip('"\'')
action_func = get_actions(controllerClassOrInstance,with_prefix=False)[action]
except KeyError:
raise InvalidActionError(action)
named_url = getattr(action_func,'named_url',None)
if named_url is None:
controller_name = get_controller_name(controllerClassOrInstance, with_prefix=False)
named_url = '%s_%s' % (controller_name, action)
else:
named_url = "%s_index" % controller_name
url = reverse(named_url, args=url_args, kwargs=url_kwargs)
if url_params is not None:
return '%s?%s' % (url, urlencode(url_params))
return url
|
1665013
|
import bz2
import json
import re
from decimal import Decimal
from servicelayer.cache import make_key
# from pprint import pprint
CAMEL_RE = re.compile(r"([A-Z]+)")
STEPS_TYPE_1 = {
"step_2": "relatives",
"step_3": "real_estate",
"step_4": "real_estate_construction",
"step_5": "movable_property",
"step_6": "vehicles",
"step_7": "securities",
"step_8": "corporate_rights",
"step_9": "legal_entities",
"step_10": "intangible_assets",
"step_11": "revenues",
"step_12": "cash_assets",
"step_13": "financial_obligations",
"step_14": "expenses",
"step_15": "other_jobs",
"step_16": "memberships",
}
STEPS_TYPE_2 = {
"step_2": "revenues",
"step_3": "real_estate",
"step_4": "vehicles",
"step_5": "securities",
"step_6": "corporate_rights",
"step_7": "movable_property",
"step_10": "intangible_assets",
}
def audit(context, item):
"Check if item contains: any nested objects or decimals; and print them."
out = {}
for key, value in item.items():
if isinstance(value, dict):
out[key] = value
if isinstance(value, list):
item[key] = " ".join(str(value)).replace("\n", " ")
if isinstance(value, Decimal):
item[key] = str(value)
if isinstance(value, bool):
item[key] = str(value.real)
if len(out):
context.log.warning("Item %s contains nested objects." % item)
def konvert(obj):
"""Convert to snakecase"""
if isinstance(obj, dict):
converted = {}
for key, value in obj.items():
key = CAMEL_RE.sub(r"_\1", key).lower()
key = key.strip("_")
key = key.replace("-", "_")
converted[key] = konvert(value)
return converted
elif isinstance(obj, list):
return [konvert(o) for o in obj]
return obj
def parse_declaration(context, decl):
decl = konvert(decl)
infocard = decl.pop("infocard", "")
declaration_id = infocard.get("id", "")
full_name = " ".join(
[
infocard.get("last_name", ""),
infocard.get("first_name", ""),
infocard.get("patronymic", ""),
]
)
person_id = infocard.get("id", make_key(full_name, decl.get("position", "")))
year = infocard.get("declaration_year", "")
url = infocard.get("url", "")
unified_source = decl.get("unified_source", {})
if unified_source.get("step_0", {}).get("declaration_type", ""):
STEPS = STEPS_TYPE_1
else:
STEPS = STEPS_TYPE_2
for num, step in STEPS.items():
data = unified_source.get(num, {})
if num == "step_16":
organizations = data.get("org", {})
bodies = data.get("part_org", {})
if isinstance(organizations, dict):
for idn, org in organizations.items():
org["declaration_id"] = declaration_id
org["id"] = idn
org["url"] = url
context.emit(rule="memberships", data=org)
if isinstance(bodies, dict):
for idn, body in bodies.items():
body["declaration_id"] = declaration_id
body["id"] = idn
body["url"] = url
context.emit(rule="memberships", data=body)
continue
for idn, smth in data.items():
smth["declaration_id"] = declaration_id
smth["id"] = idn
smth["url"] = url
rights = smth.pop("rights", {})
guarantors = smth.pop("guarantor", {})
guarantor_realty = smth.pop("guarantor_realty", {})
if isinstance(rights, dict):
for r_id, right in rights.items():
right["object_id"] = idn
right["id"] = r_id
right["declaration_id"] = declaration_id
audit(context, right)
context.emit(rule=STEPS.get(num) + "_rights", data=right)
if isinstance(guarantors, dict):
for g_id, guarantor in guarantors.items():
guarantor["object_id"] = idn
guarantor["id"] = g_id
guarantor["declaration_id"] = declaration_id
audit(context, guarantor)
context.emit(rule=STEPS.get(num) + "_guarantors", data=guarantor)
if isinstance(guarantor_realty, dict):
for g_id, grealty in guarantor_realty.items():
grealty["object_id"] = idn
grealty["id"] = g_id
grealty["declaration_id"] = declaration_id
audit(context, grealty)
context.emit(
rule=STEPS.get(num) + "_guarantor_realty", data=grealty
)
audit(context, smth)
context.emit(rule=STEPS.get(num), data=smth)
audit(context, infocard)
context.emit(rule="person", data=infocard)
context.log.info("Loaded person: [%s] %s" % (person_id, full_name))
def parse(context, data):
with context.load_file(data["content_hash"]) as fh:
with bz2.open(fh, mode="rt", encoding="utf-8") as bz2_file:
for line in bz2_file:
parse_declaration(context, json.loads(str(line)))
|
1665014
|
import json
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
from escapism import escape
from copy import deepcopy
import string
import tornado
def safe_id(id):
"""
Make sure meeting-ids are safe
We try to keep meeting IDs to a safe subset of characters.
Not sure if Jitsi requires this, but I think it goes on some
URLs so easier to be safe.
"""
return escape(id, safe=string.ascii_letters + string.digits + '-')
class BaseHandler(APIHandler):
@property
def videochat(self):
return self.settings['videochat']
@property
def room_prefix(self):
prefix = self.videochat.room_prefix
if not prefix:
prefix = f'jp-VideoChat-{self.request.host}-'
return prefix
class ConfigHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
# Use camelcase for keys, since that's what typescript likes
# FIXME: room_prefix from hostname is generated twice, let's try fix that
self.finish(json.dumps({
"roomPrefix": self.room_prefix,
"jitsiServer": self.videochat.jitsi_server
}))
class GenerateRoomHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
params = json.loads(self.request.body.decode())
display_name = params['displayName']
self.finish(json.dumps({
'id': safe_id(f"{self.room_prefix}{display_name}"),
'displayName': display_name
}))
class RoomsListHandler(BaseHandler):
"""
Return list of rooms available for this user to join.
"""
@property
def videochat(self):
return self.settings['videochat']
@tornado.web.authenticated
def get(self):
# FIXME: Do this prefixing only once
rooms = deepcopy(self.videochat.rooms)
for room in rooms:
room['id'] = safe_id(f"{self.room_prefix}{room['id']}")
self.finish(json.dumps(rooms))
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
def make_url_pattern(endpoint):
return url_path_join(base_url, 'videochat', endpoint)
handlers = [
(make_url_pattern('rooms'), RoomsListHandler),
(make_url_pattern('config'), ConfigHandler),
(make_url_pattern('generate-room'), GenerateRoomHandler)
]
web_app.add_handlers(host_pattern, handlers)
|
1665043
|
import serial
serLidar = serial.Serial("/dev/ttyACM0","115200")
serMotor = serial.Serial("/dev/ttyUSB0","9600")
distArr = []
while True:
dist = serLidar.readline()
try:
dist = dist[:3]
dist = int(dist)
distArr.append(dist)
if len(distArr) == 100:
del distArr[0]
except ValueError:
print("can not convert '{}' to int stopping".format(dist))
serMotor.write(b'5')
exit()
distArrMore = [dist > 50 for dist in distArr]
if len(distArrMore) > 5:
serMotor.write(b'1')
print(dist)
#print("moving forward")
else:
serMotor.write(b'1')
print(dist)
|
1665060
|
import tensorflow as tf
import numpy as np
import os, sys, cv2
from VGGnet_train import *
vgg = VGGnet_train()
|
1665081
|
import math
from mathutils import Vector, Matrix, Color
import bpy
from .createPlane import *
def primLamp( _obj, _dirname ):
if _obj.data.type == "POINT":
return {
"type" : [ "L" ],
"params" : {
"L" : {
"type" : "point",
"point" : {
"Le" : [ 1, 1, 1 ],
"position" : v2a( _obj.location )
}
}
}
}
elif _obj.data.type == "SUN":
return {
"type" : [ "L" ],
"params" : {
"L" : {
"type" : "directional",
"directional" : {
"Le" : [ 1, 1, 1 ],
"direction" : v2a( ( _obj.matrix_world * Vector( ( 0, 0, -1, 0 ) ) ).xyz )
}
}
}
}
elif _obj.data.type == "AREA":
createPlane( _obj, _dirname )
return {
"type" : [ "L" ],
"mesh" : {
"path" : _obj.name + ".obj",
"postprocess" : {
"generate_normals" : True,
"generate_smooth_normals" : False
}
},
"params" : {
"L" : {
"type" : "area",
"area" : {
"Le" : [ 1, 1, 1 ]
}
}
}
}
def v2a( _vector ):
return [ _vector.x, _vector.z, -_vector.y ]
|
1665083
|
def binary_search(array, y):
low_index = 0
high_index = len(array) - 1
middle = 0
while low_index <= high_index:
middle = (high_index + low_index) // 2
if array[middle] < y:
low_index = middle + 1
elif array[middle] > y:
high_index = middle - 1
else:
return middle
return -1
arrSize=int(input("Enter Array Size"))
array=[]
print("Enter Array Elements")
for i in range(arrSize):
array.append(int(input()))
y = int(input("Enter Number you want to find =:-"))
result = binary_search(array, y)
if result != -1:
print("Element is present at index", str(result))
else:
print("Element is not present in array")
|
1665120
|
from pyspark import SparkContext, SparkConf
import sys
appName="salesApp"
master="local"
conf = SparkConf().setAppName(appName)#.setMaster(master)
sc = SparkContext(conf=conf)
base_path="../../data/sales/"
output_path=sys.argv[1]#"/home/curri/projects/spark/out/"
# load sales
sales=sc.textFile(base_path+"sales_*.txt").map(lambda x:x.split('\t')) # Day | Store | Product | Qty
#load stores and products
stores=sc.textFile(base_path+"stores.txt").map(lambda x:x.split('\t')) # id | name
products=sc.textFile(base_path+"products.txt").map(lambda x:x.split('\t')) # id | name | category
# calculate sales by day
sales_by_day=sales.map(lambda x : (x[0],int(x[3])) ).reduceByKey(lambda x,y:x+y)
#save sales by day
sales_by_day.map(lambda l: "{0}\t{1}".format(l[0],l[1])).saveAsTextFile(output_path+"sales_by_day")
#calculate sales by store
sales_by_store=sales.map(lambda x : (x[2],int(x[3])) ).reduceByKey(lambda x,y:x+y)
#now join with stores to get store names
sales_by_store_joined=sales_by_store.join(stores) # output is: store_id | <qty | name>
#reshape
sales_by_store_with_name=sales_by_store_joined.map(lambda x: (x[1][1], x[1][0]))
# and save
sales_by_store_with_name.map(lambda l: "{0}\t{1}".format(l[0],l[1])).saveAsTextFile(output_path+"sales_by_store")
|
1665131
|
from pathlib import Path
root_path = Path(__file__).parent.parent
# define the model
import torch
from torch import nn
from torch.nn import functional as F
class TorchModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5, 1)
self.conv2 = nn.Conv2d(6, 16, 5, 1)
self.fc1 = nn.Linear(16 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
self.pool1 = nn.MaxPool2d((2, 2))
self.pool2 = nn.MaxPool2d((2, 2))
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
x = torch.flatten(x, 1)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# load data
from torchvision import datasets, transforms
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root_path / 'data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root_path / 'data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=1000, shuffle=True)
# define the trainer and evaluator
def trainer(model, optimizer, criterion):
# training the model
model.train()
for data, target in train_loader:
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
def evaluator(model):
# evaluating the model accuracy and average test loss
model.eval()
test_loss = 0
correct = 0
test_dataset_length = len(test_loader.dataset)
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, reduction='sum').item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= test_dataset_length
accuracy = 100. * correct / test_dataset_length
print('Average test loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(test_loss, correct, test_dataset_length, accuracy))
def test_trt(engine):
test_loss = 0
correct = 0
time_elasped = 0
for data, target in test_loader:
output, time = engine.inference(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
time_elasped += time
test_loss /= len(test_loader.dataset)
print('Loss: {} Accuracy: {}%'.format(
test_loss, 100 * correct / len(test_loader.dataset)))
print("Inference elapsed_time (whole dataset): {}s".format(time_elasped))
|
1665144
|
class Solution:
def maxSatisfaction(self, satisfaction: List[int]) -> int:
satisfaction.sort(reverse=True)
ans = cur_sum = 0
for ele in satisfaction:
cur_sum += ele
if cur_sum >= 0:
ans += cur_sum
return ans;
|
1665173
|
import pandas as pd
import pytest
from etna.analysis import StatisticsRelevanceTable
from etna.transforms.feature_selection import MRMRFeatureSelectionTransform
@pytest.mark.parametrize(
"features_to_use, expected_features",
(
("all", ["regressor_1", "regressor_2", "exog"]),
(["regressor_1"], ["regressor_1"]),
(["regressor_1", "unknown_column"], ["regressor_1"]),
),
)
def test_get_features_to_use(ts_with_exog: pd.DataFrame, features_to_use, expected_features):
base_selector = MRMRFeatureSelectionTransform(
relevance_table=StatisticsRelevanceTable(), top_k=3, features_to_use=features_to_use
)
features = base_selector._get_features_to_use(ts_with_exog.df)
assert sorted(features) == sorted(expected_features)
def test_get_features_to_use_raise_warning(ts_with_exog: pd.DataFrame):
base_selector = MRMRFeatureSelectionTransform(
relevance_table=StatisticsRelevanceTable(), top_k=3, features_to_use=["regressor_1", "unknown_column"]
)
with pytest.warns(
UserWarning, match="Columns from feature_to_use which are out of dataframe columns will be dropped!"
):
_ = base_selector._get_features_to_use(ts_with_exog.df)
@pytest.mark.parametrize(
"features_to_use, selected_features, expected_columns",
(
("all", ["regressor_1"], ["regressor_1", "target"]),
(["regressor_1", "regressor_2"], ["regressor_1"], ["regressor_1", "exog", "target"]),
),
)
def test_transform(ts_with_exog: pd.DataFrame, features_to_use, selected_features, expected_columns):
base_selector = MRMRFeatureSelectionTransform(
relevance_table=StatisticsRelevanceTable(), top_k=3, features_to_use=features_to_use
)
base_selector.selected_features = selected_features
transformed_df_with_exog = base_selector.transform(ts_with_exog.df)
columns = set(transformed_df_with_exog.columns.get_level_values("feature"))
assert sorted(columns) == sorted(expected_columns)
|
1665179
|
import os.path
from datetime import datetime
from hashlib import md5
DATE_FORMAT = '%Y-%m-%d %H:%M'
class FileStatistic(object):
def __init__(self, root):
self.root = root
def get_filename(self, name):
return os.path.join(self.root, md5(name + 'v1').hexdigest())
def log(self, name, cpm, accuracy, dt=None):
dt = dt or datetime.now()
fname = self.get_filename(name)
existed_before = os.path.exists(fname)
f = open(fname, 'a')
if not existed_before:
f.write(name + '\n')
f.write("{0} {1} {2}\n".format(dt.strftime(DATE_FORMAT), cpm, accuracy))
f.close()
def get(self, name, accuracy):
result = {}
with open(self.get_filename(name)) as f:
skip_first = True
for l in f:
if skip_first:
skip_first = False
continue
try:
dt, cpm, acc = l.strip().rsplit(' ', 2)
except ValueError:
continue
acc = float(acc)
if acc < accuracy:
continue
cpm = float(cpm)
dt = datetime.strptime(dt, DATE_FORMAT).date()
try:
avg, cnt = result[dt]
except KeyError:
avg, cnt = 0.0, 0
result[dt] = ( avg * cnt + cpm ) / (cnt + 1.0), cnt + 1
return result
|
1665183
|
from Vertex import Vertex
from Connect import Connect
class TriSolve():
#get a triangle and line in 3d space
#converts triangle to 2d space and gets the point of intersection on that plane
#returns a boolean from triPointIntersect
def triPointLineIntersect(self):
return 1
#called by triPointLineIntersect
#returns a boolean
def triPointIntersect(self):
return 1
#sorts the height of a list of Connects and returns an ordered array
#element zero is the highest Connect
#Connects are in 2d space
def sortHeightLines(self, connectL):
self.precondition(type(connectL) == type([]))
heightL = [connectL[0]]
for i in range(1, len(connectL)):
heightInsert(heightL, connectL[i])
return 1
#insert element, highest element at index 0
#takes parameteres of a list, connect object
def heightInsert(self, l, c):
#why can't I change the data in l?
return self.helperHeightInsert(l , c)
def helperHeightInsert(self, l, c):
#print(type(c))
#self.precondition(type(c) == 'Connect')
h = c.getHeight()
if(l == []):
return [c]
elif(l[0].getHeight() < h):
return [c] + l
else:
return [l[0]] + self.heightInsert(l[1:], c)
#helpers
class PreconditionException(Exception):
pass
class PostconditionException(Exception):
pass
def precondition(value_of_precondition):
if (value_of_precondition != True):
raise PreconditionException
def postcondition(value_of_postcondition):
if (value_of_postcondition != True):
raise PostconditionException
|
1665232
|
import logging
import multiprocessing as mp
from datetime import datetime
LOGGER = logging.getLogger(__name__)
class Runner:
"""Runner.
"""
def __init__(self, config, is_debug, is_cold):
"""Constructor.
"""
self._config = config
self._is_debug = is_debug
self._is_cold = is_cold
self._exchanges = {}
self._handlers = {}
def load(self):
"""Load.
"""
LOGGER.info('Loading runner')
handlers_configuration = self._config.handlers
handlers = self.create_handlers(
handlers_configuration=handlers_configuration,
is_debug=self._is_debug,
is_cold=self._is_cold)
self._handlers = handlers
exchanges_configuration = self._config.subscriptions
exchanges = self.create_exchanges(
exchanges_configuration=exchanges_configuration,
handlers=handlers,
is_debug=self._is_debug,
is_cold=self._is_cold)
self._exchanges = exchanges
def run(self):
"""Run.
"""
LOGGER.info('Start running the feed handler')
processes = []
for name, handler in self._handlers.items():
LOGGER.info('Running handler %s', name)
process = mp.Process(target=handler.run)
process.start()
processes.append(process)
for name, exchange in self._exchanges.items():
LOGGER.info('Running exchange %s', name)
if len(self._exchanges) > 1:
process = mp.Process(target=exchange.run)
process.start()
processes.append(process)
else:
exchange.run()
LOGGER.info('Joining all the processes')
for process in processes:
process.join()
def archive(self, date):
"""Archive.
"""
date = datetime.strptime(date, '%Y-%m-%d')
LOGGER.info('Archiving the tables with date %s', date)
processes = []
for name, handler in self._handlers.items():
LOGGER.info('Running handler %s', name)
process = mp.Process(target=handler.run)
process.start()
processes.append(process)
for exchange in self._exchanges.values():
for name, instrument in exchange.instruments.items():
for handler in exchange.handlers.values():
handler.rotate_table(
table=instrument,
last_datetime=date,
allow_fail=True)
LOGGER.info('Closing the handlers')
for handler in self._handlers.values():
handler.prepare_close()
LOGGER.info('Joining all the processes')
for process in processes:
process.join()
LOGGER.info('Archived the tables with date %s', date)
@staticmethod
def create_exchange(
exchange_name, subscription, handlers, is_debug, is_cold):
"""Create exchange.
"""
try:
from befh.exchange.websocket_exchange import WebsocketExchange
exchange = WebsocketExchange(
name=exchange_name,
config=subscription,
is_debug=is_debug,
is_cold=is_cold)
exchange.load(handlers=handlers)
except ImportError as error:
LOGGER.info(
'Cannot load websocket exchange %s and fall into '
'REST api exchange', exchange_name)
from befh.exchange.rest_api_exchange import RestApiExchange
exchange = RestApiExchange(
name=exchange_name,
config=subscription,
is_debug=is_debug,
is_cold=is_cold)
exchange.load(handlers=handlers)
return exchange
@staticmethod
def create_exchanges(
exchanges_configuration, handlers, is_debug, is_cold):
"""Create exchanges.
"""
exchanges = {}
for exchange_name, subscription in exchanges_configuration.items():
exchange = Runner.create_exchange(
exchange_name=exchange_name,
subscription=subscription,
handlers=handlers,
is_debug=is_debug,
is_cold=is_cold)
exchanges[exchange_name] = exchange
return exchanges
@staticmethod
def create_handler(handler_name, handler_parameters, is_debug, is_cold):
"""Create handler.
"""
LOGGER.info('Creating handler %s', handler_name)
handler_name = handler_name.lower()
if handler_name == "sql":
from befh.handler import SqlHandler
handler = SqlHandler(
is_debug=is_debug,
is_cold=is_cold,
**handler_parameters)
elif handler_name == "zmq":
from befh.handler import ZmqHandler
handler = ZmqHandler(
is_debug=is_debug,
is_cold=is_cold,
**handler_parameters)
else:
raise NotImplementedError(
'Handler %s is not implemented' % handler_name)
handler.load(queue=mp.Queue())
return handler
@staticmethod
def create_handlers(handlers_configuration, is_debug, is_cold):
"""Create handlers.
"""
handlers = {}
for handler_name, handler_para in handlers_configuration.items():
handlers[handler_name] = Runner.create_handler(
handler_name=handler_name,
handler_parameters=handler_para,
is_debug=is_debug,
is_cold=is_cold)
return handlers
|
1665331
|
from __future__ import print_function
import sys
from pacolib import *
if len(sys.argv) < 2:
sys.stderr.write('\nUsage: '+sys.argv[0]+' relsize\n\n')
sys.exit(1)
relsize = int(sys.argv[1])
print ('From Paco Require Import hpattern.')
print ('From Paco Require Export paconotation_internal paconotation.')
print ('Set Implicit Arguments.')
print ()
print ('(** * Tactic support for [paco] library')
print ()
print (' This file defines tactics for converting the conclusion to the right form so')
print (' that the accumulation lemmas can be usefully applied. These tactics are used')
print (' in both internal and external approaches.')
print ()
print (' Our main tactic, [pcofix], is defined at the end of the file and')
print (' works for predicates of arity up to 14.')
print ('*)')
print ()
print ('(** ** Internal tactics *)')
print ()
print ('Inductive _paco_mark := _paco_mark_cons.')
print ()
print ('Inductive _paco_foo := _paco_foo_cons.')
print ()
print ('Definition _paco_id {A} (a : A) : A := a.')
print ()
print ('Ltac paco_generalize_hyp mark :=')
print (' let y := fresh "_paco_rel_" in')
print (' match goal with')
print (' | [x: ?A |- _] =>')
print (' match A with')
print (' | mark => clear x')
print (' | _ => intro y;')
print (' match type of y with')
print (' | context[x] => revert x y;')
print (' match goal with [|-forall x, @?f x -> _] =>')
print (' intros x y; generalize (ex_intro f x y)')
print (' end')
print (' | _ => generalize (conj (ex_intro _ x _paco_foo_cons) y)')
print (' end; clear x y; paco_generalize_hyp mark')
print (' end')
print (' end.')
print ()
print ('Ltac paco_destruct_hyp mark :=')
print (' match goal with')
print (' | [x: ?A |- _] =>')
print (' match A with')
print (' | mark => idtac')
print (' | _paco_foo => clear x; paco_destruct_hyp mark')
print (" | exists n, ?p => let n' := fresh n in destruct x as (n', x); paco_destruct_hyp mark")
print (" | ?p /\ ?q => let x' := fresh x in destruct x as (x,x'); paco_destruct_hyp mark")
print (' end')
print (' end.')
print ()
print ('Ltac paco_revert_hyp mark :=')
print (' match goal with [x: ?A |- _] =>')
print (' match A with')
print (' | mark => clear x')
print (' | _ => revert x; paco_revert_hyp mark')
print (' end end.')
print ()
print ('Ltac paco_post_var INC pr cr := let TMP := fresh "_paco_tmp_" in')
print (' repeat (')
print (' match goal with [H: context f [pr] |-_] =>')
print (' let cih := context f [cr] in rename H into TMP;')
print (' assert(H : cih) by (repeat intro; eapply INC, TMP; eassumption); clear TMP')
print (' end);')
print (' clear INC pr.')
print ()
print ('Ltac paco_rename_last :=')
print (' let x := fresh "_paco_xxx_" in match goal with [H: _|-_] => rename H into x end.')
print ()
print ('Ltac paco_simp_hyp CIH :=')
print (' let EP := fresh "_paco_EP_" in')
print (' let FP := fresh "_paco_FF_" in')
print (' let TP := fresh "_paco_TP_" in')
print (' let XP := fresh "_paco_XP_" in')
print (' let PP := type of CIH in')
print (' evar (EP: Prop);')
print (' assert (TP: False -> PP) by (')
print (' intros FP; generalize _paco_mark_cons;')
print (' repeat intro; paco_rename_last; paco_destruct_hyp _paco_mark;')
print (' paco_revert_hyp _paco_mark;')
print (' let con := get_concl in set (TP:=con); revert EP; instantiate (1:= con); destruct FP);')
print (' clear TP;')
print (' assert (XP: EP) by (unfold EP; clear -CIH; repeat intro; apply CIH;')
print (' first [')
print (' (repeat match goal with | [ |- @ex _ _ ] => eexists | [ |- _ /\ _ ] => split end;')
print (' try (reflexivity);')
print (' first [eassumption|apply _paco_foo_cons]); fail')
print (' | (repeat match goal with | [ |- @ex _ _ ] => eexists | [ |- _ /\ _ ] => split end;')
print (' (try unfold _paco_id); eauto using _paco_foo_cons)]);')
print (' unfold EP in *; clear EP CIH; rename XP into CIH.')
print ()
print ('Ltac paco_post_simp CIH :=')
print (' let CIH := fresh CIH in')
print (' intro CIH; paco_simp_hyp CIH;')
print (' first [try(match goal with [ |- context[_paco_id] ] => fail 2 | [ |- context[_paco_foo] ] => fail 2 end) |')
print (' let TMP := fresh "_paco_TMP_" in')
print (' generalize _paco_mark_cons; intro TMP;')
print (' repeat intro; paco_rename_last; paco_destruct_hyp _paco_mark;')
print (' paco_revert_hyp _paco_mark')
print (' ].')
print ()
print ('Ltac paco_ren_r nr cr :=')
print (' first [rename cr into nr | let nr := fresh nr in rename cr into nr].')
print ()
print ('Ltac paco_ren_pr pr cr := rename cr into pr.')
print ()
print ('Ltac paco_revert :=')
print (' match goal with [H: _ |- _] => revert H end.')
print ('')
for n in range (0,relsize+1):
print ("Section SIG"+str(n)+".")
print ("")
for i in range(n):
print ("Variable T"+str(i)+" : "+ifpstr(i,"forall"),end="")
for j in range(i):
print (" (x"+str(j)+": @T"+str(j)+itrstr(" x",j)+")",end="")
print (ifpstr(i,", ")+"Type.")
print ("")
print ("(** ** Signatures *)")
print ("")
print ("Record sig"+str(n)+"T :=")
print (" exist"+str(n)+"T {")
for i in range(n):
print (" proj"+str(n)+"T"+str(i)+": @T"+str(i)+itrstr(" proj"+str(n)+"T", i)+";")
print (" }.")
print ("Definition uncurry"+str(n)+" (R: rel"+str(n)+""+itrstr(" T", n)+"): rel1 sig"+str(n)+"T :=")
print (" fun x => R"+itrstr(" (proj"+str(n)+"T", n, " x)")+".")
print ("Definition curry"+str(n)+" (R: rel1 sig"+str(n)+"T): rel"+str(n)+""+itrstr(" T", n)+" :=")
print (" "+ifpstr(n, "fun"+itrstr(" x", n)+" =>")+" R (@exist"+str(n)+"T"+itrstr(" x", n)+").")
print ("")
print ("Lemma uncurry_map"+str(n)+" r0 r1 (LE : r0 <"+str(n)+"== r1) : uncurry"+str(n)+" r0 <1== uncurry"+str(n)+" r1.")
print ("Proof. intros [] H. apply LE. apply H. Qed.")
print ("")
print ("Lemma uncurry_map_rev"+str(n)+" r0 r1 (LE: uncurry"+str(n)+" r0 <1== uncurry"+str(n)+" r1) : r0 <"+str(n)+"== r1.")
print ("Proof.")
print (" red; intros. apply (LE (@exist"+str(n)+"T"+itrstr(" x", n)+") PR).")
print ("Qed.")
print ("")
print ("Lemma curry_map"+str(n)+" r0 r1 (LE: r0 <1== r1) : curry"+str(n)+" r0 <"+str(n)+"== curry"+str(n)+" r1.")
print ("Proof. ")
print (" red; intros. apply (LE (@exist"+str(n)+"T"+itrstr(" x", n)+") PR).")
print ("Qed.")
print ("")
print ("Lemma curry_map_rev"+str(n)+" r0 r1 (LE: curry"+str(n)+" r0 <"+str(n)+"== curry"+str(n)+" r1) : r0 <1== r1.")
print ("Proof. ")
print (" intros [] H. apply LE. apply H.")
print ("Qed.")
print ("")
print ("Lemma uncurry_bij1_"+str(n)+" r : curry"+str(n)+" (uncurry"+str(n)+" r) <"+str(n)+"== r.")
print ("Proof. unfold le"+str(n)+". intros. apply PR. Qed.")
print ("")
print ("Lemma uncurry_bij2_"+str(n)+" r : r <"+str(n)+"== curry"+str(n)+" (uncurry"+str(n)+" r).")
print ("Proof. unfold le"+str(n)+". intros. apply PR. Qed.")
print ("")
print ("Lemma curry_bij1_"+str(n)+" r : uncurry"+str(n)+" (curry"+str(n)+" r) <1== r.")
print ("Proof. intros [] H. apply H. Qed.")
print ("")
print ("Lemma curry_bij2_"+str(n)+" r : r <1== uncurry"+str(n)+" (curry"+str(n)+" r).")
print ("Proof. intros [] H. apply H. Qed.")
print ("")
print ("Lemma uncurry_adjoint1_"+str(n)+" r0 r1 (LE: uncurry"+str(n)+" r0 <1== r1) : r0 <"+str(n)+"== curry"+str(n)+" r1.")
print ("Proof.")
print (" apply uncurry_map_rev"+str(n)+". eapply le1_trans; [apply LE|]. apply curry_bij2_"+str(n)+".")
print ("Qed.")
print ("")
print ("Lemma uncurry_adjoint2_"+str(n)+" r0 r1 (LE: r0 <"+str(n)+"== curry"+str(n)+" r1) : uncurry"+str(n)+" r0 <1== r1.")
print ("Proof.")
print (" apply curry_map_rev"+str(n)+". eapply le"+str(n)+"_trans; [|apply LE]. apply uncurry_bij2_"+str(n)+".")
print ("Qed.")
print ("")
print ("Lemma curry_adjoint1_"+str(n)+" r0 r1 (LE: curry"+str(n)+" r0 <"+str(n)+"== r1) : r0 <1== uncurry"+str(n)+" r1.")
print ("Proof.")
print (" apply curry_map_rev"+str(n)+". eapply le"+str(n)+"_trans; [apply LE|]. apply uncurry_bij2_"+str(n)+".")
print ("Qed.")
print ("")
print ("Lemma curry_adjoint2_"+str(n)+" r0 r1 (LE: r0 <1== uncurry"+str(n)+" r1) : curry"+str(n)+" r0 <"+str(n)+"== r1.")
print ("Proof.")
print (" apply uncurry_map_rev"+str(n)+". eapply le1_trans; [|apply LE]. apply curry_bij1_"+str(n)+".")
print ("Qed.")
print ("")
print ("End SIG"+str(n)+".")
print ('(** *** Arity 0')
print ('*)')
print ()
print ('Ltac paco_cont0 :=')
print ('generalize _paco_foo_cons; paco_generalize_hyp _paco_mark.')
print ()
print ('Ltac paco_pre0 :=')
print ('generalize _paco_mark_cons; repeat intro; paco_cont0.')
print ()
print ('Ltac paco_post_match0 INC tac1 tac2 :=')
print ('let cr := fresh "_paco_cr_" in intros cr INC; repeat (red in INC);')
print ('match goal with [H: ?x |- _] => match x with')
print ('| bot0 -> _ => clear H; tac1 cr')
print ('| ?pr -> _ => paco_post_var INC pr cr; tac2 pr cr')
print ('| _ => tac1 cr')
print ('end end.')
print ()
print ('Tactic Notation "paco_post0" ident(CIH) "with" ident(nr) :=')
print ('let INC := fresh "_paco_inc_" in')
print ('paco_post_match0 INC ltac:(paco_ren_r nr) paco_ren_pr; paco_post_simp CIH;')
print ("let CIH' := fresh CIH in try rename INC into CIH'.")
print ()
for n in range (1,relsize+1):
print ("(** *** Arity "+str(n))
print ("*)")
print ()
print ('Lemma _paco_convert'+str(n)+': forall'+itrstr(' T',n))
print ('(paco'+str(n)+': forall')
for i in range(n):
print ('(y'+str(i)+': @T'+str(i)+itrstr(' y', i)+')')
print (', Prop)')
print (itrstr(' y', n))
print ('(CONVERT: forall')
for i in range(n):
print ('(x'+str(i)+': @T'+str(i)+itrstr(' x', i)+')')
print ('(EQ: _paco_id (@exist'+str(n)+'T'+itrstr(' T', n)+itrstr(' x', n)+' = @exist'+str(n)+'T'+itrstr(' T', n)+itrstr(' y', n)+'))')
print (', @paco'+str(n)+itrstr(' x', n)+'),')
print ('@paco'+str(n)+itrstr(' y', n)+'.')
print ('Proof. intros. apply CONVERT; reflexivity. Qed.')
print ()
print ('Lemma _paco_convert_rev'+str(n)+': forall'+itrstr(' T',n))
print ('(paco'+str(n)+': forall')
for i in range(n):
print ('(y'+str(i)+': @T'+str(i)+itrstr(' y', i)+')')
print (', Prop)')
print (itrstr(' y', n))
print (itrstr(' x', n))
print ('(EQ: _paco_id (@exist'+str(n)+'T'+itrstr(' T', n)+itrstr(' x', n)+' = @exist'+str(n)+'T'+itrstr(' T', n)+itrstr(' y', n)+'))')
print ('(PACO: @paco'+str(n)+itrstr(' y', n)+'),')
print ('@paco'+str(n)+itrstr(' x', n)+'.')
print ('Proof. intros.')
print ('apply (@f_equal (@sig'+str(n)+'T'+itrstr(' T', n)+') _ (fun x => @paco'+str(n))
for i in range(n):
print (' x.(proj'+str(n)+'T'+str(i)+')')
print (')) in EQ. simpl in EQ. rewrite EQ. apply PACO.')
print ('Qed.')
print ()
print ('Ltac paco_convert_rev'+str(n)+' := match goal with')
print ('| [H: _paco_id (@exist'+str(n)+'T'+(' _' * n)+itrstr(' ?x', n)+' = @exist'+str(n)+'T'+(' _' * n)+itrstr(' ?y', n)+') |- _] =>')
print ('eapply _paco_convert_rev'+str(n)+'; [eapply H; clear H|..]; clear'+itrstr(' x', n)+' H')
print ('end.')
print ()
print ("Ltac paco_cont"+str(n)+itrstr(" e",n)+" :=")
for i in range(n):
print ('let x'+str(i)+' := fresh "_paco_v_" in')
print ('apply _paco_convert'+str(n)+';')
print ('intros'+itrstr(' x', n)+';')
print (itrstr('move x',n-1,' at top; ')+'move x'+str(n-1)+' at top;')
print ('paco_generalize_hyp _paco_mark; revert'+itrstr(' x',n)+'.')
print ()
print ('Lemma _paco_pre'+str(n)+': forall'+itrstr(' T',n)+' (gf: rel'+str(n)+itrstr(' T',n)+')'+itrstr(' x',n))
print ("(X: let gf' := gf in gf'"+itrstr(" x",n)+"), gf"+itrstr(" x",n)+".")
print ('Proof. intros; apply X. Defined.')
print ()
print ('Ltac paco_pre'+str(n)+' := let X := fresh "_paco_X_" in')
print ('generalize _paco_mark_cons; repeat intro;')
print ('apply _paco_pre'+str(n)+';')
print ('match goal with')
print ('| [|- let _ : _'+itrstr(' ?T',n)+' := _ in _'+itrstr(' ?e',n)+'] => intro X; unfold X; clear X;')
print ('paco_cont'+str(n))
for i in range(n):
print (' (e'+str(i)+': T'+str(i)+itrstr(' e',i)+')')
print ('end.')
print ()
print ('Ltac paco_post_match'+str(n)+' INC tac1 tac2 :=')
print ('let cr := fresh "_paco_cr_" in intros cr INC; repeat (red in INC);')
print ('match goal with [H: ?x |- _] => match x with')
print ('| forall'+n*' _'+', bot'+str(n)+n*' _'+' -> _ => clear H; tac1 cr')
print ('| forall'+n*' _'+', ?pr'+n*' _'+' -> _ => paco_post_var INC pr cr; tac2 pr cr')
print ('| _ => tac1 cr')
print ('end end.')
print ()
print ('Ltac paco_simp_hyp'+str(n)+' CIH :=')
print (' let EP := fresh "_paco_EP_" in')
print (' let FP := fresh "_paco_FF_" in')
print (' let TP := fresh "_paco_TP_" in')
print (' let XP := fresh "_paco_XP_" in')
print (' let PP := type of CIH in')
print (' evar (EP: Prop);')
print (' assert (TP: False -> PP) by (')
print (' intros FP; generalize _paco_mark_cons;')
print (' repeat intro; paco_rename_last; paco_destruct_hyp _paco_mark;')
print (' paco_convert_rev'+str(n)+'; paco_revert_hyp _paco_mark;')
print (' let con := get_concl in set (TP:=con); revert EP; instantiate (1:= con); destruct FP);')
print (' clear TP;')
print (' assert (XP: EP) by (unfold EP; clear -CIH; repeat intro; apply CIH;')
print (' first [')
print (' (repeat match goal with | [ |- @ex _ _ ] => eexists | [ |- _ /\ _ ] => split end;')
print (' [..|match goal with [|-_paco_id (?a = ?b)] => unfold _paco_id; reflexivity end];')
print (' first [eassumption|apply _paco_foo_cons]); fail')
print (' | (repeat match goal with | [ |- @ex _ _ ] => eexists | [ |- _ /\ _ ] => split end;')
print (' (try unfold _paco_id); eauto using _paco_foo_cons)]);')
print (' unfold EP in *; clear EP CIH; rename XP into CIH.')
print ()
print ('Ltac paco_post_simp'+str(n)+' CIH :=')
print (' let CIH := fresh CIH in')
print (' intro CIH; paco_simp_hyp'+str(n)+' CIH;')
print (' first [try(match goal with [ |- context[_paco_id] ] => fail 2 | [ |- context[_paco_foo] ] => fail 2 end) |')
print (' let TMP := fresh "_paco_TMP_" in')
print (' generalize _paco_mark_cons; intro TMP;')
print (' repeat intro; paco_rename_last; paco_destruct_hyp _paco_mark;')
print (' paco_convert_rev'+str(n)+'; paco_revert_hyp _paco_mark')
print (' ].')
print ()
print ('Tactic Notation "paco_post'+str(n)+'" ident(CIH) "with" ident(nr) :=')
print ('let INC := fresh "_paco_inc_" in')
print ('paco_post_match'+str(n)+' INC ltac:(paco_ren_r nr) paco_ren_pr; paco_post_simp'+str(n)+' CIH;')
print ("let CIH' := fresh CIH in try rename INC into CIH'.")
print ()
print ("(** ** External interface *)")
print ()
print ("(** We provide our main tactics:")
print ()
print (" - [pcofix{n} ident using lemma with ident']")
print ()
print ()
print (" where [ident] is the identifier used to name the generated coinduction hypothesis,")
print (" [lemma] is an expression denoting which accumulation lemma is to be used, and")
print (" [ident'] is the identifier used to name the accumulation variable.")
print ("*)")
print ()
for n in range(relsize+1):
print ('Tactic Notation "pcofix'+str(n)+'" ident(CIH) "using" constr(lem) "with" ident(r) :=')
print ('paco_pre'+str(n)+'; eapply lem; [..|paco_post'+str(n)+' CIH with r].')
print ()
print ('(** [pcofix] automatically figures out the appropriate index [n] from')
print (' the type of the accumulation lemma [lem] and applies [pcofix{n}].')
print ('*)')
print ()
print ('Tactic Notation "pcofix" ident(CIH) "using" constr(lem) "with" ident(nr) :=')
print (' let N := fresh "_paco_N_" in let TMP := fresh "_paco_TMP_" in')
print (' evar (N : nat);')
print (' let P := type of lem in')
print (' assert (TMP: False -> P) by')
print (' (intro TMP; repeat intro; match goal with [H : _ |- _] => revert H end;')
print (' match goal with')
for n in reversed(range(relsize+1)):
print (' | [|- _'+n*' _'+' -> _] => revert N; instantiate (1 := '+str(n)+')')
print (' end; destruct TMP);')
print (' clear TMP;')
print (' revert N;')
print (' match goal with')
for n in range(relsize+1):
print (' | [|- let _ := '+str(n)+' in _] => intros _; pcofix'+str(n)+' CIH using lem with nr')
print (' end.')
print ()
print ('Tactic Notation "pcofix" ident(CIH) "using" constr(lem) :=')
print (' pcofix CIH using lem with r.')
print ()
print ("""
(** ** Type Class for acc, mult, fold and unfold
*)
Class paco_class (A : Prop) :=
{ pacoacctyp: Type
; pacoacc : pacoacctyp
; pacomulttyp: Type
; pacomult : pacomulttyp
; pacofoldtyp: Type
; pacofold : pacofoldtyp
; pacounfoldtyp: Type
; pacounfold : pacounfoldtyp
}.
Create HintDb paco.
""")
|
1665336
|
import ChromaPy32 as Chroma # Import the Chroma Module
from time import sleep
Keyboard = Chroma.Keyboard() # Initialize a new Keyboard Instance
RED = (255, 0, 0) # Initialize a new color by RGB (RED,GREEN,BLUE)
GREEN = (0, 255, 0)
for y in range(0, Keyboard.MaxRow): # Use Keyboard.MaxRow as an iteration border in a for-loop
Keyboard.setbyGrid(3, y, RED) # sets the whole fourth column to green
for x in range(0, Keyboard.MaxColumn): # Use Keyboard.MaxColumn as iteration border in a for-loop
Keyboard.setbyGrid(x, 2, GREEN) # sets the whole third row to green
Keyboard.applyGrid() # applies the Keyboard-Grid to the connected Keyboard
sleep(5)
|
1665341
|
from collections import Counter, defaultdict
import networkx
from indra.ontology.bio import bio_ontology
def plot_problem(problem):
import matplotlib.pyplot as plt
plt.ion()
plt.figure()
G = bio_ontology.subgraph(problem)
pos = networkx.spring_layout(G)
networkx.draw_networkx(G, pos, node_color='pink')
edge_labels = networkx.get_edge_attributes(G, 'source')
networkx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
plt.show()
if __name__ == '__main__':
bio_ontology.initialize()
xrefs = [(e[0], e[1]) for e in bio_ontology.edges(data=True) if
e[2]['type'] == 'xref']
xrefg = bio_ontology.edge_subgraph(xrefs)
comps = networkx.algorithms.strongly_connected_components(xrefg)
problems = []
for comp in comps:
namespaces = [bio_ontology.get_ns(node) for node in comp]
cnt = Counter(namespaces)
if any(v > 1 for k, v in cnt.items()):
problems.append(comp)
print('Found %d problems in total' % len(problems))
problems_by_ns = defaultdict(list)
for problem in problems:
nscnt = Counter([bio_ontology.get_ns(n) for n in problem])
namespaces = [ns for ns, cnt in nscnt.items() if cnt > 1]
for ns in namespaces:
problems_by_ns[ns].append(problem)
for ns, problems_ns in problems_by_ns.items():
print(ns, len(problems_ns))
|
1665343
|
def TransitionPoint(arr,n):
l=0
u=n-1
while(l<=u):
mid=(int)((l+u)/2)
if(arr[mid]==0):
l=mid+1
elif(arr[mid]==1):
if (mid==0 or (mid>0 and arr[mid-1]==0)):
return mid
u=mid-1
return -1
n=int(input("Enter length of array:"))
arr=[]
for i in range(n):
elem=int(input())
arr.append(elem)
p=TransitionPoint(arr,n);
if(p>=0):
print("OUTPUT:",p)
else:
print("No Transition Point")
|
1665395
|
import numpy as np
from sklearn.ensemble import *
import xgboost as xgb
from sklearn.cross_validation import train_test_split
X = np.random.uniform(size=(100,10))
Y = np.random.uniform(size=(100))
# split our dataset for validation
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size = 0.2)
# our base models
ada = AdaBoostRegressor(n_estimators=500, learning_rate=0.1)
bagging = BaggingRegressor(n_estimators=500)
et = ExtraTreesRegressor(n_estimators=500)
gb = GradientBoostingRegressor(n_estimators=500, learning_rate=0.1, loss='lad',criterion='mse')
rf = RandomForestRegressor(n_estimators=500)
# fit our base models
ada.fit(train_X, train_Y)
bagging.fit(train_X, train_Y)
et.fit(train_X, train_Y)
gb.fit(train_X, train_Y)
rf.fit(train_X, train_Y)
# predict our train set
ada_out_train = ada.predict(train_X)
bagging_out_train = bagging.predict(train_X)
et_out_train = et.predict(train_X)
gb_out_train = gb.predict(train_X)
rf_out_train = rf.predict(train_X)
# predict our test set
ada_out_test = ada.predict(test_X)
bagging_out_test = bagging.predict(test_X)
et_out_test = et.predict(test_X)
gb_out_test = gb.predict(test_X)
rf_out_test = rf.predict(test_X)
# concat column-wise for train
stack_predict_train = np.vstack([ada_out_train,bagging_out_train,et_out_train,gb_out_train,rf_out_train]).T
# concat column-wise for test
stack_predict_test = np.vstack([ada_out_test,bagging_out_test,et_out_test,gb_out_test,rf_out_test]).T
params_xgd = {
'max_depth': 7,
'objective': 'reg:linear',
'learning_rate': 0.033,
'n_estimators': 1000
}
clf = xgb.XGBRegressor(**params_xgd)
clf.fit(stack_predict_train, train_Y, eval_set=[(stack_predict_test, test_Y)],
eval_metric='rmse', early_stopping_rounds=20, verbose=True)
# print mean square error
print(np.mean(np.square(test_Y - clf.predict(stack_predict_test)))
|
1665397
|
from abc import abstractmethod, ABC
from typing import Dict, Optional
class AbstractJSQLService(ABC):
@abstractmethod
def call_jsql(self, sql: str) -> Optional[Dict]:
raise NotImplementedError()
|
1665423
|
import numpy as np
def GetUserDataFunc(news_title,train_user_id_sample,train_user,train_sess,train_label,train_user_id):
def _get_user_data(uid):
click = []
sample = []
label = []
for sid in train_user_id_sample[uid]:
click.append(train_user['click'][train_user_id[sid]])
sample.append(train_sess[sid])
label.append(train_label[sid])
click = np.array(click)
sample = np.array(sample)
label = np.array(label)
click = news_title[click]
sample = news_title[sample]
return click,sample,label
return _get_user_data
def add_noise(weights,lambd):
for i in range(len(weights)):
weights[i] += np.random.laplace(scale = lambd,size=weights[i].shape)
return weights
def fed_single_update(model,doc_encoder,user_encoder,num,lambd,get_user_data,train_uid_table):
random_index = np.random.permutation(len(train_uid_table))[:num]
all_news_weights = []
all_user_weights = []
old_news_weight = doc_encoder.get_weights()
old_user_weight = user_encoder.get_weights()
sample_nums = []
loss = []
for uinx in random_index:
doc_encoder.set_weights(old_news_weight)
user_encoder.set_weights(old_user_weight)
uid = train_uid_table[uinx]
click,sample,label = get_user_data(uid)
#print(label)
g = model.fit([sample,click],label,batch_size = label.shape[0],verbose=False)
loss.append(g.history['loss'][0])
news_weight = doc_encoder.get_weights()
user_weight = user_encoder.get_weights()
if lambd>0:
news_weight = add_noise(news_weight,lambd)
user_weight = add_noise(user_weight,lambd)
#noise =
#weight += noise
all_news_weights.append(news_weight)
all_user_weights.append(user_weight)
sample_nums.append(label.shape[0])
sample_nums = np.array(sample_nums)
sample_nums = sample_nums/sample_nums.sum()
doc_weights = [np.average(weights, axis=0,weights=sample_nums) for weights in zip(*all_news_weights)]
user_weights = [np.average(weights, axis=0,weights=sample_nums) for weights in zip(*all_user_weights)]
doc_encoder.set_weights(doc_weights)
user_encoder.set_weights(user_weights)
loss = np.array(loss).mean()
#print('average loss',loss)
return loss
|
1665427
|
import random
from maro.simulator import Env
from maro.simulator.scenarios.vm_scheduling import AllocateAction, DecisionPayload
from rule_based_algorithm import RuleBasedAlgorithm
class RandomPick(RuleBasedAlgorithm):
def __init__(self, **kwargs):
super().__init__()
def allocate_vm(self, decision_event: DecisionPayload, env: Env) -> AllocateAction:
valid_pm_num: int = len(decision_event.valid_pms)
# Random choose a valid PM.
chosen_idx: int = random.randint(0, valid_pm_num - 1)
# Take action to allocate on the chosen PM.
action: AllocateAction = AllocateAction(
vm_id=decision_event.vm_id,
pm_id=decision_event.valid_pms[chosen_idx]
)
return action
|
1665472
|
from trie.constants import (
BLANK_NODE,
BLANK_HASH,
BINARY_TRIE_NODE_TYPES
)
from trie.exceptions import (
ValidationError,
)
def validate_is_bytes(value):
if not isinstance(value, bytes):
raise ValidationError("Value is not of type `bytes`: got '{0}'".format(type(value)))
def validate_length(value, length):
if len(value) != length:
raise ValidationError("Value is of length {0}. Must be {1}".format(len(value), length))
def validate_is_node(node):
if node == BLANK_NODE:
return
elif len(node) == 2:
key, value = node
validate_is_bytes(key)
if isinstance(value, list):
validate_is_node(value)
else:
validate_is_bytes(value)
elif len(node) == 17:
validate_is_bytes(node[16])
for sub_node in node[:16]:
if sub_node == BLANK_NODE:
continue
elif isinstance(sub_node, list):
validate_is_node(sub_node)
else:
validate_is_bytes(sub_node)
validate_length(sub_node, 32)
else:
raise ValidationError("Invalid Node: {0}".format(node))
def validate_is_bin_node(node):
if node == BLANK_HASH or node[0] in BINARY_TRIE_NODE_TYPES:
return
else:
raise ValidationError("Invalid Node: {0}".format(node))
|
1665478
|
from vm.lua_state import LuaState
def main():
ls = LuaState()
ls.push_boolean(True)
ls.print_stack()
ls.push_integer(10)
ls.print_stack()
ls.push_nil()
ls.print_stack()
ls.push_string('hello')
ls.print_stack()
ls.push_value(-4)
ls.print_stack()
ls.replace(3)
ls.print_stack()
ls.set_top(6)
ls.print_stack()
ls.remove(-3)
ls.print_stack()
ls.set_top(-5)
ls.print_stack()
if __name__ == '__main__':
main()
|
1665479
|
from hashlib import sha3_256
import json
def get_hash(content, hex=False):
if isinstance(content, str):
content = content.encode()
if not isinstance(content, bytes):
raise TypeError(type(content))
hash = sha3_256(content)
result = hash.digest()
if hex:
result = result.hex()
return result
def get_dict_hash(d, hex=False):
"""This function is compatible with the checksum of a "plain" cell"""
content = json.dumps(d, sort_keys=True, indent=2) + "\n"
return get_hash(content,hex=hex)
|
1665486
|
import copier
from .helpers import build_file_tree
def test_empty_suffix(tmp_path_factory):
root = tmp_path_factory.mktemp("demo_empty_suffix")
build_file_tree(
{
root
/ "copier.yaml": """
_templates_suffix: ""
name:
type: str
default: pingu
""",
root / "render_me": "Hello {{name}}!",
root / "{{name}}.txt": "Hello {{name}}!",
root / "{{name}}" / "render_me.txt": "Hello {{name}}!",
}
)
dest = tmp_path_factory.mktemp("dst")
copier.copy(str(root), dest, defaults=True, overwrite=True)
assert not (dest / "copier.yaml").exists()
assert (dest / "render_me").exists()
assert (dest / "pingu.txt").exists()
assert (dest / "pingu" / "render_me.txt").exists()
expected = "Hello pingu!"
assert (dest / "render_me").read_text() == expected
assert (dest / "pingu.txt").read_text() == expected
assert (dest / "pingu" / "render_me.txt").read_text() == expected
def test_binary_file_fallback_to_copy(tmp_path_factory):
root = tmp_path_factory.mktemp("demo_empty_suffix_binary_file")
build_file_tree(
{
root
/ "copier.yaml": """
_templates_suffix: ""
name:
type: str
default: pingu
""",
root
/ "logo.png": b"\x89PNG\r\n\x1a\n\x00\rIHDR\x00\xec\n{{name}}\n\x00\xec",
}
)
dest = tmp_path_factory.mktemp("dst")
copier.copy(str(root), dest, defaults=True, overwrite=True)
logo = dest / "logo.png"
assert logo.exists()
logo_bytes = logo.read_bytes()
assert b"{{name}}" in logo_bytes
assert b"pingu" not in logo_bytes
|
1665499
|
from tt.dataaccess.utils import get_data_store
from tt.actions.utils.utils import ensure_working
from tt.actions.utils.utils import ensure_end_after_start
from tt.dateutils.dateutils import formatted_str_for_isotime_str
def action_stop(colorizer, time):
data = get_data_store().load()
ensure_working(data)
current_entry = data['work'][-1]
ensure_end_after_start(current_entry, time)
current_entry['end'] = time
get_data_store().dump(data)
print('You stopped working on ' + colorizer.red(current_entry['name']) + ' at ' +
colorizer.yellow(formatted_str_for_isotime_str(time, '%H:%M')) + '.')
|
1665502
|
from torch.distributed._sharding_spec import (
ChunkShardingSpec,
)
def generate_chunk_sharding_specs_for_test(sharding_dim):
return [
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
),
# Test different ordering. (Case 1)
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
],
),
# Test different ordering. (Case 2)
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
],
)
]
|
1665652
|
from __future__ import absolute_import, division
import numpy as np
import cv2
from . import Tracker
from ..utils import dict2tuple
from ..utils.complex import real, fft2, ifft2, complex_add, complex_mul, complex_div, fftshift
from ..descriptors.fhog import fast_hog
class TrackerKCF(Tracker):
def __init__(self, **kargs):
super(TrackerKCF, self).__init__('KCF')
self.parse_args(**kargs)
self._correlation = self.setup_kernel(self.cfg.kernel_type)
def parse_args(self, **kargs):
self.cfg = {
'lambda_': 1e-4,
'padding': 1.5,
'output_sigma_factor': 0.125,
'interp_factor': 0.012,
'sigma': 0.6,
'poly_a': 1,
'poly_b': 7,
'cell_size': 4,
'kernel_type': 'gaussian'}
for key, val in kargs.items():
self.cfg.update({key: val})
self.cfg = dict2tuple(self.cfg)
def setup_kernel(self, kernel_type):
assert kernel_type in ['linear', 'polynomial', 'gaussian']
if kernel_type == 'linear':
return lambda x1, x2: self._linear_correlation(x1, x2)
elif kernel_type == 'polynomial':
return lambda x1, x2: self._polynomial_correlation(
x1, x2, self.cfg.poly_a, self.cfg.poly_b)
elif kernel_type == 'gaussian':
return lambda x1, x2: self._gaussian_correlation(
x1, x2, self.cfg.sigma)
def init(self, image, init_rect):
# initialize parameters
self.resize_image = False
if np.sqrt(init_rect[2:].prod()) > 100:
self.resize_image = True
init_rect = init_rect / 2
self.t_center = init_rect[:2] + init_rect[2:] / 2
self.t_sz = init_rect[2:]
mod = self.cfg.cell_size * 2
self.padded_sz = self.t_sz * (1 + self.cfg.padding)
self.padded_sz = self.padded_sz.astype(int) // mod * mod + mod
# get feature size and initialize hanning window
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
self.z = self._crop(image, self.t_center, self.padded_sz)
self.z = fast_hog(np.float32(self.z), self.cfg.cell_size)
self.feat_sz = self.z.shape
self.hann_window = np.outer(
np.hanning(self.feat_sz[0]),
np.hanning(self.feat_sz[1])).astype(np.float32)
self.hann_window = self.hann_window[:, :, np.newaxis]
self.z *= self.hann_window
# create gaussian labels
output_sigma = self.cfg.output_sigma_factor * \
np.sqrt(np.prod(self.feat_sz[:2])) / (1 + self.cfg.padding)
rs, cs = np.ogrid[:self.feat_sz[0], :self.feat_sz[1]]
rs, cs = rs - self.feat_sz[0] // 2, cs - self.feat_sz[1] // 2
y = np.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = fft2(y)
# train classifier
k = self._correlation(self.z, self.z)
self.alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
def update(self, image):
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if self.resize_image:
size = (int(image.shape[1] / 2), int(image.shape[0] / 2))
image = cv2.resize(image, size)
# locate target
x = self._crop(image, self.t_center, self.padded_sz)
x = self.hann_window * fast_hog(np.float32(x), self.cfg.cell_size)
k = self._correlation(x, self.z)
score = real(ifft2(complex_mul(self.alphaf, fft2(k))))
offset = self._locate_target(score)
self.t_center += offset * self.cfg.cell_size
# limit the estimated bounding box to be overlapped with the image
self.t_center = np.clip(
self.t_center, -self.t_sz / 2 + 2,
image.shape[1::-1] + self.t_sz / 2 - 1)
# update model
new_z = self._crop(image, self.t_center, self.padded_sz)
new_z = self.hann_window * fast_hog(np.float32(new_z), self.cfg.cell_size)
k = self._correlation(new_z, new_z)
new_alphaf = complex_div(self.yf, complex_add(fft2(k), self.cfg.lambda_))
self.alphaf = (1 - self.cfg.interp_factor) * self.alphaf + \
self.cfg.interp_factor * new_alphaf
self.z = (1 - self.cfg.interp_factor) * self.z + \
self.cfg.interp_factor * new_z
bndbox = np.concatenate([
self.t_center - self.t_sz / 2, self.t_sz])
if self.resize_image:
bndbox = bndbox * 2
return bndbox
def _crop(self, image, center, size):
corners = np.zeros(4, dtype=int)
corners[:2] = np.floor(center - size / 2).astype(int)
corners[2:] = corners[:2] + size
pads = np.concatenate(
(-corners[:2], corners[2:] - image.shape[1::-1]))
pads = np.maximum(0, pads)
if np.any(pads > 0):
corners = np.concatenate((
corners[:2] + pads[:2],
corners[2:] - pads[2:])).astype(int)
patch = image[corners[1]:corners[3], corners[0]:corners[2]]
if np.any(pads > 0):
patch = cv2.copyMakeBorder(
patch, pads[1], pads[3], pads[0], pads[2],
borderType=cv2.BORDER_REPLICATE)
return patch
def _linear_correlation(self, x1, x2):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
return xcorr / x1.size
def _polynomial_correlation(self, x1, x2, a, b):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
out = (xcorr / x1.size + a) ** b
return out
def _gaussian_correlation(self, x1, x2, sigma):
xcorr = np.zeros((self.feat_sz[0], self.feat_sz[1]), np.float32)
for i in range(self.feat_sz[2]):
xcorr_ = cv2.mulSpectrums(
fft2(x1[:, :, i]), fft2(x2[:, :, i]), 0, conjB=True)
xcorr_ = real(ifft2(xcorr_))
xcorr += xcorr_
xcorr = fftshift(xcorr)
out = (np.sum(x1 * x1) + np.sum(x2 * x2) - 2.0 * xcorr) / x1.size
out[out < 0] = 0
out = np.exp(-out / self.cfg.sigma ** 2)
return out
def _locate_target(self, score):
def subpixel_peak(left, center, right):
divisor = 2 * center - left - right
if abs(divisor) < 1e-3:
return 0
return 0.5 * (right - left) / divisor
_, _, _, max_loc = cv2.minMaxLoc(score)
loc = np.float32(max_loc)
if max_loc[0] in range(1, score.shape[1] - 1):
loc[0] += subpixel_peak(
score[max_loc[1], max_loc[0] - 1],
score[max_loc[1], max_loc[0]],
score[max_loc[1], max_loc[0] + 1])
if max_loc[1] in range(1, score.shape[0] - 1):
loc[1] += subpixel_peak(
score[max_loc[1] - 1, max_loc[0]],
score[max_loc[1], max_loc[0]],
score[max_loc[1] + 1, max_loc[0]])
offset = loc - np.float32(score.shape[1::-1]) / 2
return offset
class TrackerDCF(TrackerKCF):
def __init__(self, **kargs):
kargs.update({'kernel_type': 'linear'})
super(TrackerDCF, self).__init__(**kargs)
|
1665702
|
from .base_enums import AvatarPart
class AvatarStyle(AvatarPart):
"""Avatar styles"""
__install__ = True
__enum_path__ = 'avatar_styles.py'
__path__ = 'avatar_parts/styles'
TRANSPARENT = 'transparent'
CIRCLE = 'circle'
|
1665707
|
from kratos import Interface, Generator, always_ff, posedge, verilog
import tempfile
import os
class ConfigInterface(Interface):
def __init__(self):
Interface.__init__(self, "Config")
width = 8
# local variables
read = self.var("read_data", width)
write = self.var("write_data", width)
r_en = self.var("r_en", 1)
w_en = self.var("w_en", 1)
# common ports
clk = self.clock("clk")
# define master -> slave ports
# and slave -> master ports
m_to_s = [write, r_en, w_en]
s_to_m = [read]
# define master and slave
self.master = self.modport("Master")
self.slave = self.modport("Slave")
for port in m_to_s:
self.master.set_output(port)
self.slave.set_input(port)
for port in s_to_m:
self.master.set_input(port)
self.slave.set_output(port)
# both of them need clock
self.master.set_input(clk)
self.slave.set_input(clk)
def test_modport_io(check_gold):
config = ConfigInterface()
class Master(Generator):
def __init__(self):
Generator.__init__(self, "Master")
# instantiate the interface
self.bus = self.interface(config.master, "bus", is_port=True)
# some logic to loop the read and write
# cycle
self.counter = self.var("counter", 8)
# we wire counter value to the write data
self.wire(self.bus.write_data, self.counter)
# always_ff on the posedge of clock from the interface
@always_ff((posedge, self.bus.clk))
def logic():
if self.counter % 4 == 0:
self.bus.r_en = 1
self.bus.w_en = 0
elif self.counter % 4 == 1:
self.bus.r_en = 0
self.bus.w_en = 1
else:
self.bus.r_en = 0
self.bus.w_en = 0
@always_ff((posedge, self.bus.clk))
def update():
self.counter = self.counter + 1
self.add_always(logic)
self.add_always(update)
class Slave(Generator):
def __init__(self):
Generator.__init__(self, "Slave")
# instantiate the interface
self.bus = self.interface(config.slave, "bus", is_port=True)
self.value = self.var("value", 8)
# just read and write out
@always_ff((posedge, self.bus.clk))
def logic():
if self.bus.r_en:
self.value = self.bus.write_data
elif self.bus.w_en:
self.bus.read_data = self.value
self.add_always(logic)
class Top(Generator):
def __init__(self):
Generator.__init__(self, "Top")
# instantiate master and slave
self.master = Master()
self.slave = Slave()
self.add_child("master", self.master)
self.add_child("slave", self.slave)
# clock will be from outside
clk = self.clock("clk")
# instantiate the interface bus
# notice that we're using config, not the modport
# version such as config.master
self.bus = self.interface(config, "bus_top")
# just need to wire things up
self.wire(self.bus.clk, clk)
self.wire(self.master.bus, self.bus)
self.wire(self.slave.bus, self.bus)
# the following also works
# self.wire(self.master.bus, bus.Master)
# self.wire(self.slave.bus, bus.Slave)
top = Top()
check_gold(top, "test_modport_io")
assert str(top.bus.read_data) == "bus_top.read_data"
def test_port_interface():
mod = Generator("mod")
mod.interface(ConfigInterface(), "port_interface", is_port=True)
with tempfile.TemporaryDirectory() as temp:
filename = os.path.join(temp, "mod.sv")
verilog(mod, filename=filename)
with open(filename) as f:
content = f.read()
assert "endinterface" in content
if __name__ == "__main__":
from conftest import check_gold_fn
test_modport_io(check_gold_fn)
|
1665740
|
import unittest
from katas.kyu_6.the_book_of_mormon import mormons
class MormonsTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(mormons(10, 3, 9), 0)
def test_equal_2(self):
self.assertEqual(mormons(99, 2, 99), 0)
def test_equal_3(self):
self.assertEqual(mormons(40, 2, 120), 1)
def test_equal_4(self):
self.assertEqual(mormons(40, 2, 121), 2)
def test_equal_5(self):
self.assertEqual(mormons(20000, 2, 7000000000), 12)
|
1665749
|
import re
current_template = (
'<details>\n'
'<summary>View Terraform Plan</summary>\n\n'
'```terraform\n'
'Output is limited to 1000 lines and may be truncated. See CircleCI for full details.\n'
'{plan}\n'
'```\n'
'</details>\n'
)
previous_templates = [
"```hcl\n{plan}\n```",
]
def re_comment_match(comment_id, comment_body):
"""Returns a Match object, or None if no match was found"""
def _build_regex(template):
regex = re.escape(template.replace('{plan}', '___plan___')) \
.replace('___plan___', '(.*)')
return f'{re.escape(comment_id)}\n{regex}(.*)'
for tmpl in [current_template, *previous_templates]:
m = re.match(_build_regex(tmpl), comment_body, re.DOTALL)
if m is not None:
return m
return None
def comment_for_pr(comment_id, plan):
"""Returns a formatted string containing comment_id and plan"""
return f'{comment_id}\n{current_template.format(plan=plan)}'
|
1665751
|
from ennemi import estimate_entropy, estimate_mi, pairwise_mi
import numpy as np
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pandas as pd
rcParams["lines.markersize"] = 12
N = 200
week = np.arange(N)
rng = np.random.default_rng(1234)
# The weather is completely determined by temperature, air pressure and wind
# NOTE: This is not a realistic weather model! :)
actual_temp = 15 + 5*np.sin(week / 8) + rng.normal(0, 3, N)
actual_press = 1000 + 30*np.sin(week / 3) + rng.normal(0, 4, N)
wind_dir = rng.choice(["N", "E", "S", "W"], N, p=[0.15, 0.15, 0.4, 0.3])
weather = np.full(N, "cloudy")
weather[(actual_press > 1015) & (actual_temp > 18)] = "clear"
weather[(weather=="cloudy") & (wind_dir=="W")] = "rainy"
weather[(weather=="cloudy") & (wind_dir=="S") & rng.choice([0,1], N)] = "rainy"
# The measurements for these are not accurate either
temp = np.round(actual_temp + rng.normal(0, 1, N))
press = np.round(actual_press + rng.normal(0, 1, N))
# Create a pandas data frame out of the measurements
data = pd.DataFrame({"Weather": weather, "Temp": temp, "Press": press, "Wind": wind_dir})
print("Sample of the data:")
print(data)
# Plot the weather for one "year"
for (forecast, marker, color) in [("cloudy", "$\u2601$", "gray"),
("clear", "$\u2600$", "orange"),
("rainy", "$\u2602$", "blue")]:
plt.scatter(week[weather==forecast], temp[weather==forecast],
marker=marker, color=color)
plt.title("Weather for Entropyville")
plt.xlabel("Week")
plt.ylabel("Temperature")
plt.xlim((0, 50))
plt.savefig("discrete_temp_weather.png", transparent=True)
#
# Fix-up step for pandas DataFrames
#
print("\nFix up data")
# Not the most optimal code, but sufficient in small example
data2 = data.drop(columns=["Weather", "Wind"])
data2["Wind"] = 0
data2.loc[data["Wind"] == "E", "Wind"] = 1
data2.loc[data["Wind"] == "S", "Wind"] = 2
data2.loc[data["Wind"] == "W", "Wind"] = 3
data2["Weather"] = 0
data2.loc[data["Weather"] == "cloudy", "Weather"] = 1
data2.loc[data["Weather"] == "clear", "Weather"] = 2
print(data2)
print(data2.dtypes)
#
# Correlation between continuous variables and weather
#
print("\MI between continuous variables and weather")
print(estimate_mi(data2["Weather"], data2[["Temp", "Press"]], discrete_y=True))
print("Entropy of Weather")
print(estimate_entropy(data2["Weather"], discrete=True))
#
# Conditioning on temperature
#
print("\nConditioned on temperature")
print(estimate_mi(data2["Weather"], data2["Press"], cond=data2["Temp"], discrete_y=True))
#
# Wind
#
print("\nMI between wind and weather")
print(estimate_mi(data2["Weather"], data2["Wind"], discrete_y=True, discrete_x=True))
print("MI between wind and continuous variables")
print(estimate_mi(data2["Wind"], data2[["Temp", "Press"]], discrete_y=True))
# Uncomment to get a warning
#print("\nConditioned on temperature and pressure")
#print(estimate_mi(data2["Weather"], data2["Wind"],
# cond=data2[["Temp","Press"]], discrete_y=True, discrete_x=True))
#
# Pairwise MI
#
print("\nPairwise MI")
print(pairwise_mi(data2, discrete=[False, False, True, True]))
|
1665810
|
import torch
import torch.nn as nn
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec
class TestChunkConverter(AccTestCase):
@parameterized.expand(
[
("chunk", 3, 1),
("chunk", 2000, 2),
("chunk", 3, -2),
]
)
def test_chunk(self, _, chunk, dim):
class Chunk(nn.Module):
def forward(self, x):
return x.chunk(chunk, dim)[0]
inputs = [torch.randn(3, 10, 20)]
self.run_test(
Chunk(),
inputs,
expected_ops={acc_ops.chunk},
)
@parameterized.expand(
[
("chunk", 3, 1),
("chunk", 2000, 1),
("chunk", 3, -2),
]
)
def test_chunk_with_dynamic_shape(self, _, chunk, dim):
class Chunk(nn.Module):
def forward(self, x):
return x.chunk(chunk, dim)[0]
input_specs = [
InputTensorSpec(
shape=(-1, 10, -1),
dtype=torch.float32,
shape_ranges=[((1, 10, 20), (5, 10, 20), (10, 10, 20))],
),
]
self.run_test_with_dynamic_shape(
Chunk(), input_specs, expected_ops={acc_ops.chunk}
)
if __name__ == "__main__":
run_tests()
|
1665816
|
import unittest
from os.path import dirname, join
from pathlib import Path
import numpy as np
from jmetal.core.quality_indicator import GenerationalDistance, InvertedGenerationalDistance, EpsilonIndicator, \
HyperVolume
class GenerationalDistanceTestCases(unittest.TestCase):
""" Class including unit tests for class GenerationalDistance
"""
def test_should_constructor_create_a_non_null_object(self) -> None:
indicator = GenerationalDistance([])
self.assertIsNotNone(indicator)
def test_get_name_return_the_right_value(self):
self.assertEqual("Generational Distance", GenerationalDistance([]).get_name())
def test_get_short_name_return_the_right_value(self):
self.assertEqual("GD", GenerationalDistance([]).get_short_name())
def test_case1(self):
"""
Case 1. Reference front: [[1.0, 1.0]], front: [[1.0, 1.0]]
Expected result: the distance to the nearest point of the reference front is 0.0
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case2(self):
"""
Case 2. Reference front: [[1.0, 1.0], [2.0, 2.0], front: [[1.0, 1.0]]
Expected result: the distance to the nearest point of the reference front is 0.0
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case3(self):
"""
Case 3. Reference front: [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], front: [[1.0, 1.0, 1.0]]
Expected result: the distance to the nearest point of the reference front is 0.0. Example with three objectives
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]))
front = np.array([[1.0, 1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case4(self):
"""
Case 4. reference front: [[1.0, 1.0], [2.0, 2.0]], front: [[1.5, 1.5]]
Expected result: the distance to the nearest point of the reference front is the euclidean distance to any of the
points of the reference front
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.5, 1.5]])
result = indicator.compute(front)
self.assertEqual(np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2)), result)
self.assertEqual(np.sqrt(pow(2.0 - 1.5, 2) + pow(2.0 - 1.5, 2)), result)
def test_case5(self):
"""
Case 5. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5]]
Expected result: the distance to the nearest point of the reference front is the euclidean distance
to the nearest point of the reference front ([1.0, 1.0])
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5]])
result = indicator.compute(front)
self.assertEqual(np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2)), result)
self.assertEqual(np.sqrt(pow(2.0 - 1.5, 2) + pow(2.0 - 1.5, 2)), result)
def test_case6(self):
"""
Case 6. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2]]
Expected result: the distance to the nearest point of the reference front is the average of the sum of each point
of the front to the nearest point of the reference front
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5], [2.2, 2.2]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.1 - 2.2, 2) + pow(2.1 - 2.2, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case7(self):
"""
Case 7. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]]
Expected result: the distance to the nearest point of the reference front is the sum of each point of the front to the
nearest point of the reference front
:return:
"""
indicator = GenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.1 - 2.2, 2) + pow(2.1 - 2.2, 2))
distance_of_third_point = np.sqrt(pow(2.1 - 1.9, 2) + pow(2.1 - 1.9, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point + distance_of_third_point) / 3.0, result)
class InvertedGenerationalDistanceTestCases(unittest.TestCase):
""" Class including unit tests for class InvertedGenerationalDistance
"""
def test_should_constructor_create_a_non_null_object(self) -> None:
indicator = InvertedGenerationalDistance([])
self.assertIsNotNone(indicator)
def test_get_name_return_the_right_value(self):
self.assertEqual("Inverted Generational Distance", InvertedGenerationalDistance([]).get_name())
def test_get_short_name_return_the_right_value(self):
self.assertEqual("IGD", InvertedGenerationalDistance([]).get_short_name())
def test_case1(self):
"""
Case 1. Reference front: [[1.0, 1.0]], front: [[1.0, 1.0]]
Expected result = 0.0
Comment: simplest case
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
self.assertEqual(0.0, result)
def test_case2(self):
"""
Case 2. Reference front: [[1.0, 1.0], [2.0, 2.0], front: [[1.0, 1.0]]
Expected result: average of the sum of the distances of the points of the reference front to the front
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.0, 1.0]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.0, 2) + pow(1.0 - 1.0, 2))
distance_of_second_point = np.sqrt(pow(2.0 - 1.0, 2) + pow(2.0 - 1.0, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case3(self):
"""
Case 3. Reference front: [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], front: [[1.0, 1.0, 1.0]]
Expected result: average of the sum of the distances of the points of the reference front to the front.
Example with three objectives
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]))
front = np.array([[1.0, 1.0, 1.0]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.0, 2) + pow(1.0 - 1.0, 2) + pow(1.0 - 1.0, 2))
distance_of_second_point = np.sqrt(pow(2.0 - 1.0, 2) + pow(2.0 - 1.0, 2) + pow(2.0 - 1.0, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case4(self):
"""
Case 4. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2]]
Expected result: average of the sum of the distances of the points of the reference front to the front.
Example with three objectives
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0], [2.1, 2.1]]))
front = np.array([[1.5, 1.5], [2.2, 2.2]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.1 - 2.2, 2) + pow(2.1 - 2.2, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
def test_case5(self):
"""
Case 5. reference front: [[1.0, 1.0], [2.1, 2.1]], front: [[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]]
Expected result: average of the sum of the distances of the points of the reference front to the front.
Example with three objectives
:return:
"""
indicator = InvertedGenerationalDistance(np.array([[1.0, 1.0], [2.0, 2.0]]))
front = np.array([[1.5, 1.5], [2.2, 2.2], [1.9, 1.9]])
result = indicator.compute(front)
distance_of_first_point = np.sqrt(pow(1.0 - 1.5, 2) + pow(1.0 - 1.5, 2))
distance_of_second_point = np.sqrt(pow(2.0 - 1.9, 2) + pow(2.0 - 1.9, 2))
self.assertEqual((distance_of_first_point + distance_of_second_point) / 2.0, result)
class EpsilonIndicatorTestCases(unittest.TestCase):
""" Class including unit tests for class EpsilonIndicator
"""
def test_should_constructor_create_a_non_null_object(self) -> None:
indicator = EpsilonIndicator(np.array([[1.0, 1.0], [2.0, 2.0]]))
self.assertIsNotNone(indicator)
class HyperVolumeTestCases(unittest.TestCase):
def setUp(self):
self.file_path = dirname(join(dirname(__file__)))
def test_should_hypervolume_return_5_0(self):
reference_point = [2, 2, 2]
front = np.array([[1, 0, 1], [0, 1, 0]])
hv = HyperVolume(reference_point)
value = hv.compute(front)
self.assertEqual(5.0, value)
def test_should_hypervolume_return_the_correct_value_when_applied_to_the_ZDT1_reference_front(self):
filename = 'jmetal/core/test/ZDT1.pf'
front = []
if Path(filename).is_file():
with open(filename) as file:
for line in file:
vector = [float(x) for x in line.split()]
front.append(vector)
else:
print("error")
reference_point = [1, 1]
hv = HyperVolume(reference_point)
value = hv.compute(np.array(front))
self.assertAlmostEqual(0.666, value, delta=0.001)
if __name__ == '__main__':
unittest.main()
|
1665834
|
from __future__ import absolute_import
import kodiak.colbuilders as builders
def is_number(s):
try:
complex(str(s))
except ValueError:
return False
return True
class ComposerTransform(object):
def __init__(self, transforms):
"""
Arguments:
transforms: a list of transforms
"""
self.transforms = transforms
def transform(self, match):
for t in self.transforms:
match = t.transform(match)
return match
class PropertyTransform(object):
def transform(self, match):
"""Adds to the `Match` object `payload` the `default_colbuilder`: `colbuilders.as_attribute`
Args:
match (Match): The `Match` object that is going to be enriched.
Returns:
Match: The enriched `Match` object with a `default_colbuilder` key in the `payload`
Raises:
ValueError: in case the `Match` value attribute is ambiguous.
"""
if match.value is None:
return match
if is_number(match.value):
return match
if match.value.startswith(".") and is_number(match.value[1:]):
raise ValueError(
"`%s` is ambiguous because: `%s` can't be interpreted as property"
% (match.value, match.value[1:]))
# XXX: This doesn't catch the error ".2" Decide interpretation of ".2"
if match.value.startswith(".") and match.value.endswith("!"):
raise ValueError(
"`%s` is ambiguous, name cannot start with `.` and end with `!`"
% match.value)
if match.value.startswith("."):
match.value = match.value[1:]
match.payload["default_colbuilder"] = builders.as_attribute
return match
class MethodTransform(object):
def transform(self, match):
"""Adds to the `Match` object `payload` the `default_colbuilder`: `colbuilders.as_method`
Args:
match (Match): The `Match` object that is going to be enriched.
Returns:
Match: The enriched `Match` object with a `default_colbuilder` key in the `payload`
Raises:
ValueError: in case the `Match` value attribute is ambiguous.
"""
if match.value is None:
return match
if is_number(match.value):
return match
if match.value.endswith("!") and is_number(match.value[:-1]):
raise ValueError(
"`%s` is ambiguous because: `%s` can't be interpreted as method"
% (match.value, match.value[:-1]))
if match.value.startswith(".") and match.value.endswith("!"):
raise ValueError(
"`%s` is ambiguous, name cannot start with `.` and end with `!`"
% match.value)
if match.value.endswith("!"):
match.value = match.value[:-1]
# Maybe here we could catch methods whose arity is > 0
match.payload["default_colbuilder"] = builders.as_method
return match
class IntTransform(object):
def transform(self, match):
pass
default_transform = ComposerTransform([PropertyTransform(), MethodTransform()])
|
1665844
|
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import os
import poppy
from .main import GeminiPrimary
# Classes for dealing with AO Telemetry sets
class GPI_Globals(object):
""" Container for same constants as gpilib's gpi_globals,
with same variable names to ease porting of code. Plus some
other variables as needed."""
gpi_tweet_n = 48
gpi_woof_n = 9
gpi_numacross=43.2
gpi_tweet_spacing = GeminiPrimary.primary_diameter/gpi_numacross
gpi_woof_spacing = GeminiPrimary.primary_diameter/gpi_numacross*5.5
# below ones are not in gpi_globals
pupil_center_subap = 23
pupil_center_tweeter=23.5
pupil_center_woofer=4
class DeformableMirror(poppy.AnalyticOpticalElement):
""" Generic deformable mirror, of the continuous face sheet variety"""
def __init__(self, shape=(10,10)):
poppy.OpticalElement.__init__(self, planetype=poppy.poppy_core._PUPIL)
self._shape = shape # number of actuators
self._surface = np.zeros(shape) # array for the DM surface WFE
self.numacross = shape[0] # number of actuators across diameter of
# the optic's cleared aperture (may be
# less than full diameter of array)
self.actuator_spacing = 1.0/self.numacross # distance between actuators,
# projected onto the primary
self.pupil_center = (shape[0]-1.)/2 # center of clear aperture in actuator units
# (may be offset from center of DM)
@property
def shape(self):
return self._shape
@property
def surface(self):
""" The surface shape of the deformable mirror, in
**meters** """
return self._surface
def set_surface(self, new_surface, units='nm'):
""" Set the entire surface shape of the DM.
Parameters
-------------
new_surface : 2d ndarray
Desired DM surface shape
(note that wavefront error will be 2x this)
units : string
Right now this *must* be 'nm' for nanometers,
which is the default. Other units may be added later
if needed.
"""
assert new_surface.shape == self.shape
if units!='nm':
raise NotImplementedError("Units other than nanometers not yet implemented.")
self._surface[:] = np.asarray(new_surface, dtype=float)*1e-9
def set_actuator(self, actx, acty, new_value, units='nm'):
""" Set the entire surface shape of the DM.
Parameters
-------------
actx, acty : integers
Coordinates of the actuator you wish to control
new_value : float
Desired surface height for that actuator
(note that wavefront error will be 2x this)
units : string
Right now this *must* be 'nm' for nanometers,
which is the default. Other units may be added later
if needed.
Example
-----------
dm.set_actuator(12,22, 123.4)
"""
# FIXME do something more comprehensive with units
assert units=='nm'
if actx < 0 or actx > self.shape[1]-1:
raise ValueError("X axis coordinate is out of range")
if acty < 0 or acty > self.shape[0]-1:
raise ValueError("Y axis coordinate is out of range")
self._surface[acty, actx] = new_value*1e-9
def get_coordinates(self, one_d=False):
""" Y and X coordinates for the actuators
Parameters
------------
one_d : bool
Return 1-dimensional arrays of coordinates per axis?
Default is to return 2D arrays with same shape as full array.
"""
y_act = (np.arange(self.shape[0])-self.pupil_center)*self.actuator_spacing
x_act = (np.arange(self.shape[1])-self.pupil_center)*self.actuator_spacing
if not one_d: # convert to 2D
y_act.shape = (self.shape[0],1)
y_act = y_act * np.ones( (1, self.shape[1]))
x_act.shape = (1, self.shape[1])
x_act = x_act * np.ones( (self.shape[0], 1))
return y_act, x_act
def get_opd(self,wave):
""" Return the surface optical path delay for the optic.
Interpolates from the current optic surface state onto the
desired coordinates for the wave.
CAUTION: This right now uses a fairly simple representation
of the actuator influence function, which should not be
taken too seriously just yet.
"""
# the following could be replaced with a higher fidelity model if needed
interpolated_surface = self._get_surface_via_gaussian_influence_functions(wave)
return interpolated_surface
#phasor = np.exp(1.j * 2 * np.pi * interpolated_surface/wave.wavelength)
#return phasor
def _get_surface_via_gaussian_influence_functions(self, wave):
""" Infer a finely-sampled surface from simple Gaussian influence functions centered on
each actuator.
Work in progress, oversimplified, not a great representation of the true influence function
"""
y, x = wave.coordinates()
y_act, x_act = self.get_coordinates(one_d=True)
interpolated_surface = np.zeros(wave.shape)
crosstalk = 0.15 # amount of crosstalk on advancent actuator
sigma = self.actuator_spacing/np.sqrt((-np.log(crosstalk)))
pixelscale = x[0,1]-x[0,0] # scale of x,y
boxsize = (3*sigma)/pixelscale # half size for subarray
for yi, yc in enumerate(y_act):
for xi, xc in enumerate(x_act):
if self._surface[yi,xi] == 0: continue
# 2d Gaussian
r = ((x - xc)**2 + (y-yc)**2)/sigma**2
interpolated_surface += self._surface[yi,xi] * np.exp(-r)
return interpolated_surface
def display(self, annotate=False, grid=False, what='opd', crosshairs=False, *args, **kwargs):
"""Display an Analytic optic by first computing it onto a grid.
Parameters
----------
wavelength : float
Wavelength to evaluate this optic's properties at
npix : int
Number of pixels to use when sampling the optical element.
what : str
What to display: 'intensity', 'surface' or 'phase', or 'both'
ax : matplotlib.Axes instance
Axes to display into
nrows, row : integers
# of rows and row index for subplot display
crosshairs : bool
Display crosshairs indicating the center?
colorbar : bool
Show colorbar?
colorbar_orientation : bool
Desired orientation, horizontal or vertical?
Default is horizontal if only 1 row of plots, else vertical
opd_vmax : float
Max value for OPD image display, in meters.
title : string
Plot label
"""
if what=='both': raise NotImplementedError('still need to implement display both mode for display_actuators')
kwargs['crosshairs']= crosshairs
kwargs['what'] = what
returnvalue = poppy.AnalyticOpticalElement.display(self, *args, **kwargs)
if annotate: self.annotate()
if grid: self.annotate_grid()
return returnvalue
def display_actuators(self, annotate=False, grid=True, what='surface', crosshairs=False, *args, **kwargs):
""" Display the optical surface, viewed as discrete actuators
Parameters
------------
annotate : bool
Annotate coordinates and types of actuators on the display? Default false.
grid : bool
Annotate grid of actuators on the display? Default true.
what : string
What to display: 'intensity' transmission, 'surface' or 'phase', or 'both'
"""
# display in DM coordinates
# temporarily set attributes appropriately as if this were a regular OpticalElement
self.amplitude = np.ones_like(self.surface)
self.opd = self.surface
self.pixelscale = self.actuator_spacing
# back compatibility for older poppy syntax (which is confusing)
if what=='surface': what='phase'
#then call parent class display
returnvalue = poppy.OpticalElement.display(self, what=what, crosshairs=crosshairs, **kwargs)
# now un-set all the temporary attributes, since this is analytic and
# these are unneeded
del self.pixelscale
del self.opd
del self.amplitude
if annotate: self.annotate()
if grid: self.annotate_grid()
return returnvalue
def annotate(self, marker='o', **kwargs):
""" Overplot actuator coordinates on some already-existing pupil display
"""
yc, xc = self.get_coordinates()
ax = plt.gca()
# jump through some hoops to avoid autoscaling the X,Y coords
# of the prior plot here, but retain the autoscale state
autoscale_state = (ax._autoscaleXon, ax._autoscaleYon)
ax.autoscale(False)
plt.scatter(xc, yc, marker=marker, **kwargs)
ax._autoscaleXon, ax._autoscaleYon = autoscale_state
def annotate_grid(self, linestyle=":", color="black", **kwargs):
y_act, x_act = self.get_coordinates(one_d=True)
ax = plt.gca()
for x in x_act:
plt.axvline(x+ (self.actuator_spacing/2), linestyle=linestyle, color=color)
for y in y_act:
plt.axhline(y+ (self.actuator_spacing/2), linestyle=linestyle, color=color)
class GPITweeter(DeformableMirror):
def __init__(self, mems_print_through=True):
DeformableMirror.__init__(self, shape=(GPI_Globals.gpi_tweet_n, GPI_Globals.gpi_tweet_n))
self.name = "<NAME>"
self.numacross = GPI_Globals.gpi_numacross
self.actuator_spacing = GPI_Globals.gpi_tweet_spacing
self.pupil_center = GPI_Globals.pupil_center_tweeter
self.pupil_diam = GPI_Globals.gpi_tweet_n*GPI_Globals.gpi_tweet_spacing # for display, projected full area around 48x48 subaps
self.mems_print_through = mems_print_through
self._mems_print_through_amplitude = 15e-9
# 15 nm, estimated from http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.513.4649&rep=rep1&type=pdf
my_path=os.path.abspath(os.path.dirname(__file__))
self._actuator_type_info = fits.open(os.path.join(my_path, 'data','GPI_tweeter_actuators.fits'))
@property
def bad_actuators(self):
"""Returns a list of coordinate indices for the actuators which are
nonoperable """
act_map = self._actuator_type_info
wflagged = np.where( ( act_map[0].data == act_map[0].header['DEAD']) |
( act_map[0].data == act_map[0].header['WEAK']) )
output = []
for i in range(len(wflagged[0])):
yc,xc = wflagged[0][i], wflagged[1][i]
label = 'DEAD' if (act_map[0].data[yc,xc] == act_map[0].header['DEAD'] ) else 'WEAK'
output.append([xc,yc,label])
return output
def get_opd(self, wave):
opd = DeformableMirror.get_opd(self,wave)
if self.mems_print_through:
mems_print_through_opd = self._get_opd_MEMS_print_through(wave)
opd += mems_print_through_opd
return opd
def _get_opd_MEMS_print_through(self,wave):
""" DM surface print through """
# GPI tweeter actuators are reimaged to 18 cm subapertures
# Boston DM print through info in:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.513.4649&rep=rep1&type=pdf
# ao4elt.lesia.obspm.fr/sites/ao4elt/IMG/ppt/Bifano.ppt
# in horizontal direction, the print through is about 35/190 pixels = 18% of the width
# in the vertical direction, closer to 31%, but it's more like 2 narrow bars each 10% wide
# and there's a 10% wide dot in the middle of it too
#printthrough properties:
pt_col_width = 0.18
# 15 nm, estimated from http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.513.4649&rep=rep1&type=pdf
pt_col_value = self._mems_print_through_amplitude
pt_row_width = 0.10
pt_row_value = -1 * self._mems_print_through_amplitude
if not isinstance(wave, poppy.Wavefront): # pragma: no cover
raise ValueError("getPhasor must be called with a Wavefront to define the spacing")
assert (wave.planetype == poppy.poppy_core._PUPIL)
opd = np.zeros(wave.shape)
y, x = wave.coordinates()
pixscale = x[0,1] - x[0,0]
opd[np.mod(x,self.actuator_spacing) <= (self.actuator_spacing*pt_col_width)] += pt_row_value
opd[np.mod(y,self.actuator_spacing) <= (self.actuator_spacing*pt_col_width)] += pt_col_value
return opd
#phasor = np.exp(1.j * 2 * np.pi * opd/wave.wavelength)
#return phasor
def annotate(self, markbad=True, badmarker='o', marker='+', **kwargs):
# first plot all the normal ones
DeformableMirror.annotate(self, marker=marker, **kwargs)
if markbad:
# now the less-than-good ones
yc, xc = self.get_coordinates()
ax = plt.gca()
autoscale_state = (ax._autoscaleXon, ax._autoscaleYon)
ax.autoscale(False)
act_map = self._actuator_type_info
for act_type, color in zip(['DEAD', 'COUPLED', 'WEAK','VARIABLE'],
['red', 'orange', 'brown', 'magenta']):
wflagged = np.where(act_map[0].data == act_map[0].header[act_type])
plt.scatter(xc[wflagged], yc[wflagged], marker=badmarker, color=color)
ax._autoscaleXon, ax._autoscaleYon = autoscale_state
class GPIWoofer(DeformableMirror):
def __init__(self):
DeformableMirror.__init__(self, shape=(GPI_Globals.gpi_woof_n, GPI_Globals.gpi_woof_n))
self.name = "<NAME>"
self.pupil_diam = 8.6 # for display, projected full area around 48x48 subaps of tweeter
self.numacross = GPI_Globals.gpi_numacross
self.actuator_spacing = GPI_Globals.gpi_woof_spacing
self.pupil_center = GPI_Globals.pupil_center_woofer
def annotate(self, marker='s', color='teal', s=50, alpha=0.4, **kwargs):
""" Annotate the DM actuator coordinates.
Applies some cosmetic defaults to distinguish Woofer from Tweeter actuators
"""
DeformableMirror.annotate(self, marker=marker, color=color, s=s, alpha=alpha, **kwargs)
|
1665867
|
from __future__ import annotations
import contextlib
import sys
import traceback
from typing import Generator
from typing import NoReturn
class MyCustomError(RuntimeError):
pass
def g() -> NoReturn:
raise AssertionError('hi')
def f() -> NoReturn:
g()
@contextlib.contextmanager
def simulate_running() -> Generator[None, None, None]:
try:
yield
except Exception:
traceback.print_exc()
def test1():
try:
f()
except AssertionError as e:
raise MyCustomError(e)
def test2():
try:
f()
except AssertionError as e:
exc_info = sys.exc_info()
raise MyCustomError(e).with_traceback(exc_info[2])
def main():
print('*' * 79)
with simulate_running():
test1()
print('*' * 79)
print('Notice how the stacktrace does not contain f() or g() at all')
print('You can fix that however')
print('*' * 79)
with simulate_running():
test2()
print('*' * 79)
if __name__ == '__main__':
raise SystemExit(main())
OUTPUT = """\
*******************************************************************************
Traceback (most recent call last):
File "python/best_practices_reraising_exceptions.py", line 31, in test1
f()
File "python/best_practices_reraising_exceptions.py", line 18, in f
g()
File "python/best_practices_reraising_exceptions.py", line 14, in g
raise AssertionError('hi')
AssertionError: hi
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "python/best_practices_reraising_exceptions.py", line 24, in simulate_running
yield
File "python/best_practices_reraising_exceptions.py", line 47, in main
test1()
File "python/best_practices_reraising_exceptions.py", line 33, in test1
raise MyCustomError(e)
MyCustomError: hi
*******************************************************************************
Notice how the stacktrace does not contain f() or g() at all
You can fix that however
*******************************************************************************
Traceback (most recent call last):
File "python/best_practices_reraising_exceptions.py", line 38, in test2
f()
File "python/best_practices_reraising_exceptions.py", line 18, in f
g()
File "python/best_practices_reraising_exceptions.py", line 14, in g
raise AssertionError('hi')
AssertionError: hi
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "python/best_practices_reraising_exceptions.py", line 24, in simulate_running
yield
File "python/best_practices_reraising_exceptions.py", line 53, in main
test2()
File "python/best_practices_reraising_exceptions.py", line 41, in test2
raise MyCustomError(e).with_traceback(exc_info[2])
File "python/best_practices_reraising_exceptions.py", line 38, in test2
f()
File "python/best_practices_reraising_exceptions.py", line 18, in f
g()
File "python/best_practices_reraising_exceptions.py", line 14, in g
raise AssertionError('hi')
MyCustomError: hi
******************************************************************************
""" # noqa: E501
|
1665906
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import yfinance as yf
from pandas_datareader import data as pdr
import xlsxwriter
import requests
from yahoo_fin import stock_info as si
import pickle
import bs4 as bs
# You need to change this to a convenient spot on your own hard drive.
my_path = '/Users/shashank/Downloads/Code/Finance'
threshold = 0.80
# You need to go to Yahoo and download a list of the S&P 500 components. Make sure to save it to
# a CSV file with column headers that include "Symbol", "Date" and "Close"
def save_spx_tickers():
resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class':'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.find_all('td') [0].text.strip()
tickers.append(ticker)
with open('spxTickers.pickle', 'wb') as f:
pickle.dump(tickers, f)
return tickers
sp500_tickers = save_spx_tickers()
# Make the ticker symbols readable by Yahoo Finance
sp500_tickers = [item.replace(".", "-") for item in sp500_tickers]
# Upload a list of the S&P 500 components downloaded from Yahoo.
mylist= []
mylist2 = []
df_sp500_tickers = pd.DataFrame(list(zip(sp500_tickers)), columns =['Symbol'])
# This module loops through the S&P 500 tickers, downloads the data from Yahoo and creates a separate CSV
# file of historical data for each ticker (e.g. AAPL.csv).
# Skip this routine if you already have the CSV files available.
'''
for index, ticker in df_sp500_tickers.iterrows():
global df
my_ticker = ticker['Symbol']
yf_ticker = yf.Ticker(my_ticker)
data = yf_ticker.history(period="max")
df = pd.DataFrame(data)
df.reset_index(level=0, inplace=True)
df['Symbol'] = my_ticker
df = df[['Symbol','Date','Close']]
df.drop_duplicates(subset ="Date", keep = 'first', inplace = True) #Yahoo has a tendency to duplicate the last row.
df.to_csv(path_or_buf = my_path + "/data/" + my_ticker +".csv", index=False)
'''
# Creates the dataframe container for the stats data.
df_tradelist = pd.DataFrame(index=[], columns=['my_ticker', 'hold_per', 'pct_uprows', 'max_up_return', 'min_up_return', 'avg_up_return', 'avg_down_return', 'exp_return', 'stdev_returns', 'pct_downside', 'worst_return', 'least_pain_pt', 'total_years', 'max_consec_beat', 'best_buy_date', 'best_sell_date', 'analyzed_years'])
df_tradelist.head()
# Convert prices to holding period returns based on 20 trading days per month.
def convert_prices_to_periods():
global dperiods
global dfr
dfr = df.pct_change(periods = dperiods)
dfr.reset_index(level=0, inplace=True)
dfr.rename(columns={'Close':'Returns'}, inplace=True)
dfr = dfr.round(4)
# Separate out the date column into separate month, year and day values.
def separate_date_column():
global dfr
dfr['Month'] = pd.DatetimeIndex(dfr['Date']).month
dfr['Day'] = pd.DatetimeIndex(dfr['Date']).day
dfr['Year'] = pd.DatetimeIndex(dfr['Date']).year
dfr['M-D'] = dfr['Month'].astype(str)+'-'+dfr['Day'].astype(str)
pd.set_option('display.max_rows', len(dfr))
# Pivot the table to show years across the top and Month-Day values in the first column on the left.
def pivot_the_table():
global dfr_pivot
dfr_pivot = dfr.pivot(index='M-D', columns='Year', values='Returns')
dfr_pivot.reset_index(level=0, inplace=True)
dfr_pivot = pd.DataFrame(dfr_pivot)
dfr_pivot.columns.name="Index"
# The pivot operation created empty cells for weekends and holiday, so I filled them with EOD values from
# the previous trading day.
dfr_pivot.fillna(method='ffill', inplace=True)
# As of this date, 1/22/2020, we are only evaluating results through 12/31/2019, so we will drop the
# 2020 year column.
if 2020 in dfr_pivot.columns:
dfr_pivot.drop(2020, axis=1, inplace=True)
# Add additional calculated columns to facilitate statistic calculations for each stock.
def add_calculated_items():
global dfr_pivot
global lookback
global start
# The lookback figure is the number (must be an integer) of years back from last year (2019) that you want to include in
# analysis, i.e. the calculations below. It's probably a good idea to keep it at 20 years or less
# to reflect more recent market conditions.
lookback = 20
start = 1
if lookback > len(dfr_pivot.columns) - 1:
start = 1
else:
start = len(dfr_pivot.columns) - lookback
dfr_pivot['YearCount'] = dfr_pivot.count(axis=1, numeric_only=True)
dfr_pivot['Lookback'] = lookback
dfr_pivot['UpCount'] = dfr_pivot[dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-2] > 0].count(axis=1)
dfr_pivot['DownCount'] = dfr_pivot[dfr_pivot.iloc[:,start:len(dfr_pivot.columns)] < 0].count(axis=1)
dfr_pivot['PctUp'] = dfr_pivot['UpCount']/dfr_pivot['Lookback']
dfr_pivot['PctDown'] = dfr_pivot['DownCount']/dfr_pivot['Lookback']
dfr_pivot['AvgReturn'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-6].mean(axis=1)
dfr_pivot['StDevReturns'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-7].std(axis=1)
dfr_pivot['67PctDownside'] = dfr_pivot['AvgReturn']-dfr_pivot['StDevReturns']
dfr_pivot['MaxReturn'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-9].max(axis=1)
dfr_pivot['MinReturn'] = dfr_pivot.iloc[:,start:len(dfr_pivot.columns)-10].min(axis=1)
# Add a fictional date column in Python date/time format so the table can be sorted by date. Then sort by Date.
# Reset the index and round the float values to 4 decimals.
def sortbydate_resetindex_export():
global dfr_pivot
dfr_pivot['Date'] = '2000-' + dfr_pivot['M-D'].astype(str)
dfr_pivot['Date'] = pd.to_datetime(dfr_pivot['Date'], infer_datetime_format=True)
dfr_pivot.sort_values(by='Date',ascending=True, inplace=True)
dfr_pivot.reset_index(inplace=True)
dfr_pivot = dfr_pivot.round(4)
# Calculate the trading statistics for the rolling holding periods for the stock.
def calc_trading_stats():
global interval
global dfr_pivot
global pct_uprows
global max_up_return
global min_up_return
global avg_up_return
global avg_down_return
global exp_return
global stdev_returns
global pct_downside
global worst_return
global least_pain_pt
global total_years
global n_consec
global max_n_consec
global max_consec_beat
global best_sell_date
global best_buy_date
global analyzed_years
global lookback
pct_uprows = (dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, 'PctUp'].count() / dfr_pivot.loc[:, 'PctUp'].count()).astype(float).round(4)
max_up_return = dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, 'MaxReturn'].max()
min_up_return = dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, 'MinReturn'].min()
avg_up_return = dfr_pivot.loc[dfr_pivot['PctUp'] > 0.5, 'AvgReturn'].mean()
avg_up_return = np.float64(avg_up_return).round(4)
avg_down_return = dfr_pivot.loc[dfr_pivot['PctDown'] > 0.5, 'AvgReturn'].mean()
avg_down_return = np.float64(avg_down_return).round(4)
exp_return = round(dfr_pivot['AvgReturn'].mean(), 4)
stdev_returns = dfr_pivot['StDevReturns'].mean()
stdev_returns = np.float64(stdev_returns).round(4)
worst_return = dfr_pivot['MinReturn'].min()
pct_downside = exp_return - stdev_returns
pct_downside = np.float64(pct_downside).round(4)
least_pain_pt = dfr_pivot.loc[dfr_pivot['PctUp'] > threshold, '67PctDownside'].max()
total_years = dfr_pivot['YearCount'].max()
analyzed_years = lookback
n_consec = 0
max_n_consec = 0
for x in dfr_pivot['PctUp']:
if (x > threshold):
n_consec += 1
else: # check for new max, then start again from 1
max_n_consec = max(n_consec, max_n_consec)
n_consec = 1
max_consec_beat = max_n_consec
try:
best_sell_date = dfr_pivot.loc[dfr_pivot['67PctDownside'] == least_pain_pt, 'M-D'].iloc[0]
except:
best_sell_date = "nan"
try:
row = dfr_pivot.loc[dfr_pivot['M-D'] == best_sell_date, 'M-D'].index[0] - interval
col = dfr_pivot.columns.get_loc('M-D')
best_buy_date = dfr_pivot.iloc[row,col]
except:
best_buy_date = "nan"
# If the pct_uprows and history conditions are met, then create the array of stat values and append
# it to the recommended trade list.
def filter_and_append_stats():
global statsdata
global df_statsdata
global df_tradelist
# Save the stats data separately to export to Excel for further research on each ticker if desired.
statsdata = np.array([my_ticker, hold_per, pct_uprows, max_up_return, min_up_return, avg_up_return, avg_down_return, exp_return, stdev_returns, pct_downside, worst_return, least_pain_pt, total_years, max_consec_beat, best_buy_date, best_sell_date, analyzed_years])
df_statsdata = pd.DataFrame(statsdata.reshape(-1, len(statsdata)), columns=['my_ticker', 'hold_per', 'pct_uprows', 'max_up_return', 'min_up_return', 'avg_up_return', 'avg_down_return', 'exp_return', 'stdev_returns', 'pct_downside', 'worst_return', 'least_pain_pt', 'total_years', 'max_consec_beat', 'best_buy_date', 'best_sell_date', 'analyzed_years'])
if pct_uprows > 0.1:
if total_years > 9:
df_tradelist = df_tradelist.append(dict(zip(df_tradelist.columns, statsdata)), ignore_index=True)
# This module grabs each ticker file, transforms it and calculates the statistics needed for a 90 day holding period.
def calc_3month_returns():
global dfr
global dfr_pivot
global df_tradelist
global dfr_3mo
global df_statsdata_3mo
global threshold
global hold_per
global dperiods
global interval
dperiods = 60
hold_per = "3 Mos"
interval = 90
convert_prices_to_periods()
separate_date_column()
pivot_the_table()
add_calculated_items()
sortbydate_resetindex_export()
# Export the pivot table to CSV for further research if desired.
#dfr_pivot.to_csv(path_or_buf = my_path + "/data/" + my_ticker + "_dfr_pivot_3mo.csv", index=False)
# Save dfr_pivot to separate dataframe for exporting to Excel
dfr_3mo = pd.DataFrame(dfr_pivot)
calc_trading_stats()
filter_and_append_stats()
# Save statsdata to separate dataframe for exporting to Excel
df_statsdata_3mo = df_statsdata.copy()
# This module grabs each ticker file, transforms it and calculates the statistics needed for a 60 day holding period.
def calc_2month_returns():
global dfr
global dfr_pivot
global df_tradelist
global dfr_2mo
global df_statsdata_2mo
global threshold
global hold_per
global dperiods
global interval
dperiods = 40
hold_per = "2 Mos"
interval = 60
convert_prices_to_periods()
separate_date_column()
pivot_the_table()
add_calculated_items()
sortbydate_resetindex_export()
# Export the pivot table to CSV for further research if desired.
#dfr_pivot.to_csv(path_or_buf = my_path + "/data/" + my_ticker + "_dfr_pivot_2mo.csv", index=False)
# Save dfr_pivot to separate dataframe for exporting to Excel
dfr_2mo = pd.DataFrame(dfr_pivot)
calc_trading_stats()
filter_and_append_stats()
# Save statsdata to separate dataframe for exporting to Excel
df_statsdata_2mo = df_statsdata.copy()
# This module grabs each ticker file, transforms it and calculates the statistics needed for a 30 day holding period.
def calc_1month_returns():
global dfr
global dfr_pivot
global df_tradelist
global dfr_1mo
global df_statsdata_1mo
global threshold
global hold_per
global dperiods
global interval
dperiods = 20
hold_per = "1 Mo"
interval = 30
convert_prices_to_periods()
separate_date_column()
pivot_the_table()
add_calculated_items()
sortbydate_resetindex_export()
# Export the pivot table to CSV for further research if desired.
#dfr_pivot.to_csv(path_or_buf = my_path + "/data/" + my_ticker + "_dfr_pivot_1mo.csv", index=False)
# Save dfr_pivot to separate dataframe for exporting to Excel
dfr_1mo = pd.DataFrame(dfr_pivot)
calc_trading_stats()
filter_and_append_stats()
# Save statsdata to separate dataframe for exporting to Excel
df_statsdata_1mo = df_statsdata.copy()
# Build and export an Excel file for each ticker using XlsxWriter
def export_to_excel():
excel_file_path = my_path + "/data/" + my_ticker + ".xlsx"
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(excel_file_path, engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df_statsdata_1mo.to_excel(writer, sheet_name='Stats', index=False)
df_statsdata_2mo.to_excel(writer, sheet_name='Stats', startrow=2, header=False, index=False)
df_statsdata_3mo.to_excel(writer, sheet_name='Stats', startrow=3, header=False, index=False)
dfr_1mo.to_excel(writer, sheet_name='1 Mo Returns', index=False)
dfr_2mo.to_excel(writer, sheet_name='2 Mo Returns', index=False)
dfr_3mo.to_excel(writer, sheet_name='3 Mo Returns', index=False)
# Get the xlsxwriter objects from the dataframe writer object.
workbook = writer.book
worksheet1 = writer.sheets['Stats']
worksheet2 = writer.sheets['1 Mo Returns']
worksheet3 = writer.sheets['2 Mo Returns']
worksheet4 = writer.sheets['3 Mo Returns']
# Add conditional formatting to highlight positive returns in green
end_column = dfr_1mo.columns.get_loc("YearCount")
grn_format = workbook.add_format({'bg_color': '#C6EFCE','font_color': '#006100'})
worksheet2.conditional_format(1, 2, 365, end_column - 1,{'type':'cell','criteria':'>','value':0,'format':grn_format})
worksheet3.conditional_format(1, 2, 365, end_column - 1,{'type':'cell','criteria':'>','value':0,'format':grn_format})
worksheet4.conditional_format(1, 2, 365, end_column - 1,{'type':'cell','criteria':'>','value':0,'format':grn_format})
# Freeze panes for scrolling
worksheet2.freeze_panes(1, 2)
worksheet3.freeze_panes(1, 2)
worksheet4.freeze_panes(1, 2)
# Save the file
writer.save()
# Read CSV files by ticker, transform and extract stats from each one.
for index, ticker in df_sp500_tickers.iterrows():
global dfr
my_ticker = ticker['Symbol']
df = pd.read_csv (my_path + "/data/" + my_ticker + ".csv")
df.set_index('Date', inplace=True)
df = df['Close']
df = pd.DataFrame(df, columns=['Close'])
calc_1month_returns()
calc_2month_returns()
calc_3month_returns()
export_to_excel()
# Make a copy and convert the trade list to a Pandas dataframe.
df_tradelist_copy = df_tradelist.copy()
df_tradelist = pd.DataFrame(df_tradelist)
#df_tradelist.to_csv(path_or_buf = my_path + "/df_tradelist.csv", index=False)
#df_tradelist_copy.to_csv(path_or_buf = my_path + "/df_tradelist_copy.csv", index=False)
# Clean it up by removing rows with NaN's and infinity values and dropping duplicates.
df_tradelist.replace("inf", np.nan, inplace=True)
df_tradelist.dropna(inplace=True)
df_tradelist = df_tradelist[~df_tradelist.max_up_return.str.contains("nan")]
df_tradelist = df_tradelist[~df_tradelist.avg_down_return.str.contains("nan")]
df_tradelist.sort_values(by=['pct_uprows'], ascending=False)
df_tradelist.drop_duplicates(subset ="my_ticker", keep = 'first', inplace = True)
df_tradelist.tail(10)
df_tradelist.head()
#df_tradelist.shape
# Export the trade list to CSV files for execution and/or further research if desired.
df_tradelist.to_csv(path_or_buf = my_path + "/df_tradelist.csv", index=False)
|
1665978
|
import os
import math
from functools import singledispatch
from typing import overload, Union
import numba
import numpy as np
from .util import (
TempFileHolder,
glue_csv,
glue_hdf,
glue_parquet,
parse_csv,
parse_hdf,
parse_parquet,
_parallel_argsort,
)
try:
import pandas as pd
pandas_import = True
except ModuleNotFoundError:
pandas_import = False
if pandas_import:
# fmt: off
# function overloading for the correct return type depending on the input
@overload
def quantile_normalize(data: pd.DataFrame,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> pd.DataFrame: ...
@overload
def quantile_normalize(data: np.ndarray,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> np.ndarray: ...
# fmt: on
@singledispatch
def quantile_normalize(
data: Union[pd.DataFrame, np.ndarray],
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> Union[pd.DataFrame, np.ndarray]:
"""
Quantile normalize your array/dataframe.
It does quantile normalization in the "correct" way in the sense that
it takes the mean of duplicate values instead of ignoring them.
Args:
data: numpy.ndarray or pandas.DataFrame to be normalized
axis: axis along to normalize. Axis=1 (default) normalizes each
column/sample which gives them identical distributions.
Axis=0 normalizes each row/feature giving them all identical
distributions.
target: distribution to normalize onto
ncpus: number of cpus to use for normalization
Returns: a quantile normalized copy of the input.
"""
raise NotImplementedError(
f"quantile_normalize not implemented for type {type(data)}"
)
@quantile_normalize.register(pd.DataFrame)
def quantile_normalize_pd(
data: pd.DataFrame,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> pd.DataFrame:
qn_data = data.copy()
# if we use axis 0, then already transpose here, and not later
if axis == 0:
qn_data[:] = quantile_normalize_np(
qn_data.values.astype(float), axis, target, ncpus
)
else:
qn_data[:] = quantile_normalize_np(
qn_data.values.astype(float), axis, target, ncpus
)
return qn_data
def incremental_quantile_normalize(
infile: str,
outfile: str,
rowchunksize: int = 100_000,
colchunksize: int = 8,
ncpus: int = 1,
) -> None:
"""
Memory-efficient quantile normalization implementation by splitting
the task into sequential subtasks, and writing the intermediate results
to disk instead of keeping them in memory. This makes the memory
footprint independent of the input table, however also slower..
Args:
infile: path to input table. The table can be either a csv-like file
of which the delimiter is auto detected. Or the infile can be a
hdf file, which requires to be stored with format=table.
outfile: path to the output table. Has the same layout and delimiter
as the input file. If the input is csv-like, the output is csv-
like. If the input is hdf, then the output is hdf.
rowchunksize: how many rows to read/write at the same time when
combining intermediate results. More is faster, but also uses
more memory.
colchunksize: how many columns to use at the same time when
calculating the mean and normalizing. More is faster, but also
uses more memory.
ncpus: The number of cpus to use. Scales diminishingly, and more
than four is generally not useful.
"""
if infile.endswith((".hdf", ".h5")):
dataformat = "hdf"
columns, index = parse_hdf(infile)
elif infile.endswith((".csv", ".tsv", ".txt")):
dataformat = "csv"
columns, index, delimiter = parse_csv(infile)
elif infile.endswith((".parquet")):
dataformat = "parquet"
columns, index, index_used, schema = parse_parquet(infile)
else:
raise NotImplementedError(
"Only HDF ('.hdf', '.h5'), "
"text ('.csv', '.tsv', '.txt'), "
"and parquet ('.parquet') formats are supported."
)
# now scan the table for which columns and indices it contains
nr_cols = len(columns)
nr_rows = len(index)
# store intermediate tables
tmp_vals = []
tmp_sorted_vals = []
tmp_idxs = []
# calculate the target (rank means)
target = np.zeros(nr_rows)
with TempFileHolder() as tfh:
# loop over our column chunks and keep updating our target
for i in range(math.ceil(nr_cols / colchunksize)):
col_start, col_end = (
i * colchunksize,
np.clip((i + 1) * colchunksize, 0, nr_cols),
)
# read relevant columns
if dataformat == "hdf":
with pd.HDFStore(infile) as hdf:
assert len(hdf.keys()) == 1
key = hdf.keys()[0]
cols = [
hdf.select_column(key, columns[i])
for i in range(col_start, col_end)
]
df = pd.concat(cols, axis=1).astype("float32")
elif dataformat == "csv":
df = pd.read_csv(
infile,
sep=delimiter,
comment="#",
index_col=0,
usecols=[0, *list(range(col_start + 1, col_end + 1))],
).astype("float32")
elif dataformat == "parquet":
df = pd.read_parquet(
infile, columns=columns[col_start:col_end]
)
# get the rank means
data, sorted_idx = _parallel_argsort(
df.values, ncpus, df.values.dtype
)
del df
sorted_vals = np.take_along_axis(
data,
sorted_idx,
axis=0,
)
rankmeans = np.mean(sorted_vals, axis=1)
# update the target
target += (rankmeans - target) * (
(col_end - col_start) / (col_end)
)
# save all our intermediate stuff
tmp_vals.append(
tfh.get_filename(prefix="qnorm_", suffix=".npy")
)
tmp_sorted_vals.append(
tfh.get_filename(prefix="qnorm_", suffix=".npy")
)
tmp_idxs.append(
tfh.get_filename(prefix="qnorm_", suffix=".npy")
)
np.save(tmp_vals[-1], data)
np.save(tmp_sorted_vals[-1], sorted_vals)
np.save(tmp_idxs[-1], sorted_idx)
del data, sorted_idx, sorted_vals
# now that we have our target we can start normalizing in chunks
qnorm_tmp = []
# store intermediate results
# and start with our index and store it
index_tmpfiles = []
for chunk in np.array_split(
index, math.ceil(len(index) / rowchunksize)
):
index_tmpfiles.append(
tfh.get_filename(prefix="qnorm_", suffix=".p")
)
pd.DataFrame(chunk).to_pickle(
index_tmpfiles[-1], compression=None
)
qnorm_tmp.append(index_tmpfiles)
del index
# for each column chunk quantile normalize it onto our distribution
for i in range(math.ceil(nr_cols / colchunksize)):
# read the relevant columns in
data = np.load(tmp_vals[i], allow_pickle=True)
sorted_idx = np.load(tmp_idxs[i], allow_pickle=True)
sorted_vals = np.load(tmp_sorted_vals[i], allow_pickle=True)
# quantile normalize
qnormed = _numba_accel_qnorm(
data, sorted_idx, sorted_vals, target
)
del data, sorted_idx, sorted_vals
# store it in tempfile
col_tmpfiles = []
for j, chunk in enumerate(
np.array_split(
qnormed, math.ceil(qnormed.shape[0] / rowchunksize)
)
):
tmpfile = tfh.get_filename(
prefix=f"qnorm_{i}_{j}_", suffix=".npy"
)
col_tmpfiles.append(tmpfile)
np.save(tmpfile, chunk)
del qnormed, chunk
qnorm_tmp.append(col_tmpfiles)
if os.path.exists(outfile):
os.remove(outfile)
# glue the separate files together and save them
if dataformat == "hdf":
glue_hdf(outfile, columns, qnorm_tmp)
elif dataformat == "csv":
glue_csv(outfile, columns, qnorm_tmp, delimiter)
elif dataformat == "parquet":
glue_parquet(outfile, columns, qnorm_tmp, index_used, schema)
else:
@singledispatch
def quantile_normalize(
data: np.ndarray,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> np.ndarray:
"""
Quantile normalize your array.
It does quantile normalization in the "correct" way in the sense that
it takes the mean of duplicate values instead of ignoring them.
Args:
data: numpy.ndarray or pandas.DataFrame to be normalized
axis: axis along to normalize. Axis=1 (default) normalizes each
column/sample which gives them identical distributions.
Axis=0 normalizes each row/feature giving them all identical
distributions.
target: distribution to normalize onto
ncpus: number of cpus to use for normalization
Returns: a quantile normalized copy of the input.
"""
raise NotImplementedError(
f"quantile_normalize not implemented for type {type(data)}"
)
@quantile_normalize.register(np.ndarray)
def quantile_normalize_np(
_data: np.ndarray,
axis: int = 1,
target: Union[None, np.ndarray] = None,
ncpus: int = 1,
) -> np.ndarray:
# check for supported dtypes
if not np.issubdtype(_data.dtype, np.number):
raise ValueError(
f"The type of your data ({_data.dtype}) is is not "
f"supported, and might lead to undefined behaviour. "
f"Please use numeric data only."
)
# numba does not (yet) support smaller
elif any(
np.issubdtype(_data.dtype, dtype) for dtype in [np.int32, np.float32]
):
dtype = np.float32
else:
dtype = np.float64
# take a transposed view of our data if axis is one
if axis == 0:
_data = np.transpose(_data)
elif axis == 1:
pass
else:
raise ValueError(
f"qnorm only supports 2 dimensional data, so the axis"
f"has to be either 0 or 1, but you set axis to "
f"{axis}."
)
# sort the array, single process or multiprocessing
if ncpus == 1:
# single process sorting
data = _data.astype(dtype=dtype)
# we do the sorting outside of numba because the numpy implementation
# is faster, and numba does not support the axis argument.
sorted_idx = np.argsort(data, axis=0)
elif ncpus > 1:
# multiproces sorting
data, sorted_idx = _parallel_argsort(_data, ncpus, dtype)
else:
raise ValueError("The number of cpus needs to be a positive integer.")
sorted_val = np.take_along_axis(data, sorted_idx, axis=0)
if target is None:
# if no target supplied get the (sorted) rowmeans
target = np.mean(sorted_val, axis=1)
else:
# otherwise make sure target is correct data type and shape
if not isinstance(target, np.ndarray):
try:
target = np.array(target)
except Exception:
raise ValueError(
"The target could not be converted to a " "numpy.ndarray."
)
if target.ndim != 1:
raise ValueError(
f"The target array should be a 1-dimensionsal vector, however "
f"you supplied a vector with {target.ndim} dimensions"
)
if target.shape[0] != data.shape[0]:
raise ValueError(
f"The target array does not contain the same amount of values "
f"({target.shape[0]}) as the data contains rows "
f"({data.shape[0]})"
)
if not np.issubdtype(target.dtype, np.number):
raise ValueError(
f"The type of your target ({data.dtype}) is is not "
f"supported, and might lead to undefined behaviour. "
f"Please use numeric data only."
)
target = np.sort(target.astype(dtype=dtype))
final_res = _numba_accel_qnorm(data, sorted_idx, sorted_val, target)
if axis == 0:
final_res = final_res.T
return final_res
@numba.jit(nopython=True, fastmath=True, cache=True)
def _numba_accel_qnorm(
qnorm: np.ndarray,
sorted_idx: np.ndarray,
sorted_val: np.ndarray,
target: np.ndarray,
) -> np.ndarray:
"""
numba accelerated "actual" qnorm normalization.
"""
# get the shape of the input
n_rows = qnorm.shape[0]
n_cols = qnorm.shape[1]
for col_i in range(n_cols):
i = 0
# we fill out a column not from lowest index to highest index,
# but we fill out qnorm from lowest value to highest value
while i < n_rows:
n = 0
val = 0.0
# since there might be duplicate numbers in a column, we search for
# all the indices that have these duplcate numbers. Then we take
# the mean of their rowmeans.
while (
i + n < n_rows
and sorted_val[i, col_i] == sorted_val[i + n, col_i]
):
val += target[i + n]
n += 1
# fill out qnorm with our new value
if n > 0:
val /= n
for j in range(n):
idx = sorted_idx[i + j, col_i]
qnorm[idx, col_i] = val
i += n
return qnorm
|
1666011
|
from datetime import datetime, timedelta
from typing import Dict
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import DeleteView
from django.urls import reverse_lazy
from django.db.models import Q
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, Http404, HttpResponseServerError
from django.shortcuts import get_object_or_404, render
from django.db.models import Max, Min, ProtectedError
from django.db import IntegrityError
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.mixins import UserPassesTestMixin
from django.contrib import messages
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.http import JsonResponse
from django.conf import settings
from rse.models import *
from rse.forms import *
from rse.views.helper import *
############
### RSEs ###
############
@login_required
def rse(request: HttpRequest, rse_username: str) -> HttpResponse:
# Get the user
user = get_object_or_404(User, username=rse_username)
# Dict for view
view_dict = {} # type: Dict[str, object]
# Get RSE if exists
rse = get_object_or_404(RSE, user=user)
view_dict['rse'] = rse
# Construct q query and check the project filter form
q = Q()
from_date = None
until_date = None
if request.method == 'GET':
form = FilterProjectForm(request.GET)
if form.is_valid():
filter_range = form.cleaned_data["filter_range"]
from_date = filter_range[0]
q &= Q(end__gte=from_date)
until_date = filter_range[1]
q &= Q(start__lte=until_date)
# apply status type query
status = form.cleaned_data["status"]
if status in 'PRFX':
q &= Q(project__status=status)
elif status == 'L':
q &= Q(project__status='F')|Q(project__status='R')
elif status == 'U':
q &= Q(project__status='F')|Q(project__status='R')|Q(project__status='P')
else:
form = FilterProjectForm()
# Get RSE allocations grouped by RSE based off Q filter and save the form
q &= Q(rse=rse)
allocations = RSEAllocation.objects.filter(q)
view_dict['allocations'] = allocations
view_dict['form'] = form
# RSE in dictinary with allocations
rses = {}
rses[rse] = allocations
view_dict['rses'] = rses
# Get the commitment summary (date, effort, RSEAllocation)
if allocations:
view_dict['commitment_data'] = [(rse, RSEAllocation.commitment_summary(allocations, from_date, until_date))]
return render(request, 'rse.html', view_dict)
@login_required
def rseid(request: HttpRequest, rse_id: int) -> HttpResponse:
r = get_object_or_404(RSE, id=rse_id)
return rse(request, r.user.username)
@login_required
def rses(request: HttpRequest) -> HttpResponse:
"""
Filters to be handled client side with DataTables
"""
rses = RSE.objects.all()
# calculate grade point (only displayed for superusers)
for rse in rses:
try:
rse.grade = rse.futureSalaryBand(date=timezone.now().date()).short_str
except:
rse.grade = "No Data"
return render(request, 'rses.html', { "rses": rses })
def ajax_salary_band_by_year(request):
"""
Simple responsive AJAX query to return options (for html options drop down) displaying salary bands per year.
Not required to be post logged in (publically available information)
"""
year = request.GET.get('year')
selected = request.GET.get('selected')
sbs = SalaryBand.objects.filter(year=year).order_by('year')
view_dict = {}
view_dict['sbs'] = sbs
if selected is not None and selected.isnumeric():
view_dict['selected'] = int(selected)
return render(request, 'includes/salaryband_options.html', view_dict)
@user_passes_test(lambda u: u.is_superuser)
def rse_salary(request: HttpRequest, rse_username: str) -> HttpResponse:
# Get the user
user = get_object_or_404(User, username=rse_username)
# Dict for view
view_dict = {} # type: Dict[str, object]
# Get RSE if exists
rse = get_object_or_404(RSE, user=user)
view_dict['rse'] = rse
# get salary grade changes
sgcs = SalaryGradeChange.objects.filter(rse=rse)
view_dict['sgcs'] = sgcs
# salary grade change form
if request.method == 'POST':
form = SalaryGradeChangeForm(request.POST, rse=rse)
if form.is_valid():
sgc = form.save()
messages.add_message(request, messages.SUCCESS, f'Salary Grade Change {sgc} successfully added.')
else:
form = SalaryGradeChangeForm(rse=rse)
view_dict['form'] = form
return render(request, 'rse_salary.html', view_dict)
class rse_salarygradechange_delete(UserPassesTestMixin, DeleteView):
""" POST only special delete view which redirects to project allocation view """
model = SalaryGradeChange
success_message = "Salary grade change deleted successfully."
def test_func(self):
""" Only for super users """
return self.request.user.is_superuser
def get(self, request, *args, **kwargs):
""" disable this view when arriving by get (i.e. only allow post) """
raise Http404("Page does not exist")
def get_success_url(self):
return reverse_lazy('rse_salary', kwargs={'rse_username': self.get_object().rse.user.username})
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(rse_salarygradechange_delete, self).delete(request, *args, **kwargs)
@login_required
def commitment(request: HttpRequest) -> HttpResponse:
# Dict for view
view_dict = {} # type: Dict[str, object]
# Construct q query and check the project filter form
q = Q()
from_date = None
until_date = None
if request.method == 'GET':
form = FilterProjectForm(request.GET)
if form.is_valid():
filter_range = form.cleaned_data["filter_range"]
from_date = filter_range[0]
q &= Q(end__gte=from_date)
until_date = filter_range[1]
q &= Q(start__lte=until_date)
# apply status type query
status = form.cleaned_data["status"]
if status in 'PRFX':
q &= Q(project__status=status)
elif status == 'L':
q &= Q(project__status='F')|Q(project__status='R')
elif status == 'U':
q &= Q(project__status='F')|Q(project__status='R')|Q(project__status='P')
else:
form = FilterProjectForm()
# Get RSE allocations grouped by RSE based off Q filter and save the form
allocations = RSEAllocation.objects.filter(q)
view_dict['form'] = form
# Get unique RSE ids allocated to project and build list of (RSE, [RSEAllocation]) objects for commitment graph
allocation_unique_rses = allocations.values('rse').distinct()
commitment_data = []
rse_allocations = {}
for a in allocation_unique_rses:
r_a = allocations.filter(rse__id=a['rse'])
rse = RSE.objects.get(id=a['rse'])
rse_allocations[rse] = r_a
commitment_data.append((rse, RSEAllocation.commitment_summary(r_a, from_date, until_date)))
view_dict['commitment_data'] = commitment_data
view_dict['rse_allocations'] = rse_allocations
return render(request, 'commitments.html', view_dict)
|
1666042
|
import re
from typing import Pattern, Union
def escape(string: str) -> str:
return re.escape(string)
def is_match(string: str, pattern: str) -> bool:
return re.match(pattern, string) is not None
def replace(reg: Union[str, Pattern], input: str, replacement: str) -> str:
if isinstance(reg, str):
return re.sub(input, replacement, reg)
else:
return reg.sub(input, replacement)
def create(pattern: str, options: int = 0) -> Pattern:
return re.compile(pattern)
__all__ = ["escape", "is_match", "create", "replace"]
|
1666045
|
import numpy as np
import pdb
import ad3.factor_graph as fg
import time
def test_random_instance(n):
costs = np.random.rand(n)
budget = np.sum(costs) * np.random.rand()
scores = np.random.randn(n)
tic = time.clock()
x_gold = solve_lp_knapsack_lpsolve(scores, costs, budget)
toc = time.clock()
print 'lpsolve:', toc - tic
tic = time.clock()
x = solve_lp_knapsack_ad3(scores, costs, budget)
toc = time.clock()
print 'ad3:', toc - tic
res = x - x_gold
#print x
#print x_gold
if res.dot(res) > 1e-6:
pdb.set_trace()
def solve_lp_knapsack_ad3(scores, costs, budget):
factor_graph = fg.PFactorGraph()
binary_variables = []
for i in xrange(len(scores)):
binary_variable = factor_graph.create_binary_variable()
binary_variable.set_log_potential(scores[i])
binary_variables.append(binary_variable)
negated = [False] * len(binary_variables)
factor_graph.create_factor_knapsack(binary_variables, negated, costs, budget)
#pdb.set_trace()
# Run AD3.
factor_graph.set_verbosity(1)
factor_graph.set_eta_ad3(.1)
factor_graph.adapt_eta_ad3(True)
factor_graph.set_max_iterations_ad3(1000)
value, posteriors, additional_posteriors, status = factor_graph.solve_lp_map_ad3()
return posteriors
def solve_lp_knapsack_gurobi(scores, costs, budget):
from gurobipy import *
n = len(scores)
# Create a new model.
m = Model("lp_knapsack")
# Create variables.
for i in xrange(n):
m.addVar(lb=0.0, ub=1.0)
m.update()
vars = m.getVars()
# Set objective.
obj = LinExpr()
for i in xrange(n):
obj += scores[i]*vars[i]
m.setObjective(obj, GRB.MAXIMIZE)
# Add constraint.
expr = LinExpr()
for i in xrange(n):
expr += costs[i]*vars[i]
m.addConstr(expr, GRB.LESS_EQUAL, budget)
#pdb.set_trace()
# Optimize.
m.optimize()
assert m.status == GRB.OPTIMAL
x = np.zeros(n)
for i in xrange(n):
x[i] = vars[i].x
return x
def solve_lp_knapsack_lpsolve(scores, costs, budget):
import lpsolve55 as lps
relax = True
n = len(scores)
lp = lps.lpsolve('make_lp', 0, n)
# Set verbosity level. 3 = only warnings and errors.
lps.lpsolve('set_verbose', lp, 3)
lps.lpsolve('set_obj_fn', lp, -scores)
lps.lpsolve('add_constraint', lp, costs, lps.LE, budget)
lps.lpsolve('set_lowbo', lp, np.zeros(n))
lps.lpsolve('set_upbo', lp, np.ones(n))
if not relax:
lps.lpsolve('set_int', lp, [True] * n)
else:
lps.lpsolve('set_int', lp, [False] * n)
# Solve the ILP, and call the debugger if something went wrong.
ret = lps.lpsolve('solve', lp)
assert ret == 0, pdb.set_trace()
# Retrieve solution and return
[x, _] = lps.lpsolve('get_variables', lp)
x = np.array(x)
return x
if __name__ == "__main__":
n_tests = 100
n = 100
for i in xrange(n_tests):
test_random_instance(n)
|
1666056
|
import networkx as nx
from gremlin_python.process.graph_traversal import in_, coalesce, constant, select
from gremlin_python.process.traversal import T, P, Column
from nepytune import drawing
def query_website_node(g, website_id):
return g.V(website_id).valueMap(True).toList()[0]
def query_transient_nodes_for_website(g, website_id, limit=10000):
return (g.V(website_id)
.in_("visited")
.limit(limit)
.project("uid", "pid")
.by("uid")
.by(in_("has_identity").values("pid").fold())
.group()
.by(coalesce(select("pid").unfold(), constant("transient-nodes-connected-to-website")))
.by(select("uid").dedup().limit(100).fold())
.unfold()
.project("persistent-node-id", "transient-nodes")
.by(select(Column.keys))
.by(select(Column.values))
.where(select("transient-nodes").unfold().count().is_(P.gt(1)))
).toList()
def create_graph_for_website_and_transient_nodes(website_node, transient_nodes_for_website):
website_id = website_node[T.id]
graph = nx.Graph()
graph.add_node(
website_id,
**{
"id": website_id,
"label": website_node[T.label],
"title": website_node["title"][0],
"url": website_node["url"][0]
}
)
transient_nodes = []
persistent_nodes = []
for node in transient_nodes_for_website:
if node["persistent-node-id"] != "transient-nodes-connected-to-website":
pnode = node["persistent-node-id"]
persistent_nodes.append(pnode)
graph.add_node(
pnode,
id=pnode,
label="persistentId"
)
for tnode in node["transient-nodes"]:
graph.add_edge(
pnode,
tnode,
label="has_identity"
)
for tnode in node["transient-nodes"]:
graph.add_node(
tnode,
id=tnode,
label="transientId"
)
graph.add_edge(
website_id,
tnode,
label="visited"
)
transient_nodes.append(tnode)
return graph
def show(g, website_id):
"""Show users that visited website on more than one device."""
transient_nodes_for_website = query_transient_nodes_for_website(g, website_id)
website_node = query_website_node(g, website_id)
raw_graph = create_graph_for_website_and_transient_nodes(website_node, transient_nodes_for_website)
graph = drawing.spring_layout(raw_graph)
drawing.draw(
title="",
scatters=[drawing.edges_scatter(graph)] + list(drawing.scatters_by_label(graph, attrs_to_skip=["pos"])),
)
|
1666128
|
class AutoRepr:
"""Simple, generic __repr__ for all class properties, sans those that start with underscores"""
def __repr__(self):
items = ("%s = %r" % (k, v) for k, v in self.__dict__.items() if not k.startswith('_'))
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
|
1666173
|
from PIL import Image
import collections
import glob
import sys
#n = 1
#for i in glob.glob('p/*.jpg'):
#img = Image.open(i)
def jo(filename):
#读取字库
fonts = []
for i in glob.glob('pp/*.png'):
fonts.append((i.replace('pp/','').replace('.png',''),Image.open(i).load()))
#print(fonts[0][1])
img = Image.open(filename)
#img = img.convert('RGB')
img = img.convert('RGB')
pixdata = img.load()
#print(pixdata[0,0])
#l = [pixdata[x,y] for x in range(img.size[0]) for y in range(img.size[1])]
#l.sort()
#print(l[1])
#边框
for x in range(img.size[0]):
pixdata[x,0] = (255, 255, 255)
for y in range(img.size[1]):
pixdata[0,y] = (255, 255, 255)
for x in range(img.size[0]):
pixdata[x,img.size[1]-1] = (255, 255, 255)
for y in range(img.size[1]):
pixdata[img.size[0]-1,y] = (255, 255, 255)
#纯白
for y in range(img.size[1]):
for x in range(img.size[0]):
#if pixdata[x,y][0] >= 200 and pixdata[x,y][1] >= 200 and pixdata[x,y][2] >= 200 or pixdata[x,y][0] <= 110 and pixdata[x,y][1] <= 110 and pixdata[x,y][2] <= 110:
if pixdata[x,y][0] >= 175 and pixdata[x,y][1] >= 175 and pixdata[x,y][2] >= 175:
pixdata[x,y] = (255, 255, 255)
#去噪
for i in range(2):
for y in range(img.size[1]):
for x in range(img.size[0]):
try:
if pixdata[x,y] != (255,255,255) and (pixdata[x-1,y] == pixdata[x+1,y] == (255,255,255) or pixdata[x,y-1] == pixdata[x,y+1] == (255,255,255)):
pixdata[x,y] = (255, 255, 255)
elif pixdata[x,y] != (255,255,255) and pixdata[x+1,y] != (255,255,255) and pixdata[x,y+1] != (255,255,255) and pixdata[x+1,y+1] != (255,255,255) and pixdata[x-1,y] == pixdata[x-1,y+1] == pixdata[x,y-1] == pixdata[x+1,y-1] == pixdata[x+2,y] == pixdata[x+2,y+1] == pixdata[x,y+2] == pixdata[x+1,y+2] == (255,255,255):
pixdata[x,y] = pixdata[x+1,y] = pixdata[x,y+1] = pixdata[x+1,y+1]= (255, 255, 255)
except IndexError:
continue
#img = img.convert('P')
#print(pixdata[20,17])
#img.save('c.png')
#img.save('b.png')
#img.show()
#分割
#内容区块
db = []
for x in range(img.size[0]):
for y in range(img.size[1]):
if pixdata[x,y] != (255,255,255):
db.append(x)
break
j = len(db)-1
k = 0
for i in reversed(db):
if i == db[j-1]+1:
k += 1
else:
k = 0
if k >= 2:
del db[j]
j -= 1
def tooshort(l):
for v in l[::2]:
i = l.index(v)
if db[i+1] - db [i] <= 2:
return True
if len(db) > 8 or tooshort(db):
db = []
for x in range(img.size[0]):
nowhite = 0
for y in range(img.size[1]):
if pixdata[x,y] != (255,255,255):
nowhite += 1
if nowhite == 3:
db.append(x)
break
db1 = []
for i,v in enumerate(db):
if i == 0 or i == len(db)-1:
continue
if db[i-1]+1 == v == db[i+1]-1 or db[i-1]+1 != v != db[i+1]-1:
db1.append(i)
for i in db1[::-1]:
del db[i]
while len(db) > 8:
for v in db[1:-1:2]:
i = db.index(v)
if v - db[i-1] <=10:
del db[i+1],db[i]
break
#while len(db) < 8:
# for v in db[::2]:
# i = db.index(v)
# if db[i+1] - v > 40:
# db.insert(i+1,v+24)
#print(db)
#exit()
#切割对比
answer = []
n = 1
def eye(v,v1,n):
img0 = img.crop((v,0,v1,img.size[1]))
pixdata = img0.load()
dby = []
for y in range(img0.size[1]):
nowhite = 0
for x in range(img0.size[0]):
if pixdata[x,y] != (255,255,255):
nowhite += 1
if nowhite == 3:
dby.append(y)
break
j = len(dby)-1
k = 0
for i in reversed(dby):
if i == dby[j-1]+1:
k += 1
else:
k = 0
if k >= 2:
del dby[j]
j -= 1
#img0.crop((0,dby[0],img0.size[0],dby[-1])).save('{}.png'.format(n))
#n += 1
#print(dby)
if dby[-1] - dby[0] > 25:
while len(dby) > 2:
if len(dby) == 3:
del dby[-1]
for v in dby[1::2]:
i = dby.index(v)
if v - dby[i-1] <= 4:
del dby[i],dby[i-1]
break
#print(dby)
#for x in list(range(img0.size[0]))[:4]:
dbx = 0
#print(123123,n)
#img0.crop((0,dby[0],img0.size[0],dby[-1])).save('{}.png'.format(n))
#n += 1
img0 = img0.crop((0,dby[0],img0.size[0],dby[-1]))
pixdata = img0.load()
for x in range(5):
#white = img0.size[1]
nowhite = 0
#for y in list(range(img0.size[1]))[3:]:
for y in range(img0.size[1]):
if y <= 3 and pixdata[x,y] != (255,255,255):
break
if y > 3 and pixdata[x,y] != (255,255,255):
nowhite += 1
if 2 <= nowhite <= 3:
#for y in range(img0.size[1])[3:]:
#pixdata[x,y] = (255,255,255)
dbx += 1
else:
break
#print(nowhite)
#break
#img0.crop((dbx,dby[0],img0.size[0],dby[-1])).save('{}.png'.format(n))
#img0.crop((dbx,0,img0.size[0],img0.size[1])).save('{}.png'.format(n))
#print(dbx,dby)
#imgl.append(img0.crop((0,db1[0],img0.size[0],db1[-1])))
img0 = img0.crop((dbx,0,img0.size[0],dby[-1]))
pixdata = img0.load()
diffl = []
for i in fonts:
diff = 0
for y in range(img0.size[1]):
for x in range(img0.size[0]):
try:
if pixdata[x,y] != (255,255,255) and i[1][x,y] == (255,255,255) or pixdata[x,y] == (255,255,255) and i[1][x,y] != (255,255,255):
diff += 1
except IndexError:
continue
diffl.append((i[0],diff))
min = ['',360]
for i in diffl:
if min[1] >= i[1]:
min = i
#print(sorted(diffl))
#print(min[0].split('_')[0])
return min[0].split('_')[0]
#imgl = []
#for i,v in enumerate(db[::2]):
#print(db)
for v in db[::2]:
i = db.index(v)
#print(i,v)
if db[i+1] - v > 40:
font = eye(v,v+29,n)
answer.append(font)
n += 1
if font == 'b':
font = eye(v+25,db[i+1],n)
#img.crop((v+25,0,db[i+1],img.size[1])).save('t.png')
#print(v+19)
elif font == 'm':
font = eye(v+28,db[i+1],n)
elif font == 'h' or font == 'q':
font = eye(v+26,db[i+1],n)
else:
font = eye(v+31,db[i+1],n)
else:
font = eye(v,db[i+1],n)
answer.append(font)
n += 1
#if db[i+1] - v > 40:
# #db.insert(i+1,v+29)
# #db.insert(i+2,v+30)
# print(db)
# font = eye(v+31,db[i+1],n)
# answer.append(font)
# n += 1
#answer.append(min[0].split('_')[0])
#print(min)
return ''.join(answer)
if __name__ == '__main__':
for i in sys.argv[1:]:
print(jo(i),end=' ')
print()
#print(jo('p/35.jpg'))
#print(tesser('a.jpg'))
|
1666203
|
from mypy.plugin import MethodContext
from mypy.types import TupleType, UninhabitedType
from classes._registry import INVALID_ARGUMENTS_MSG # noqa: WPS436
from classes.contrib.mypy.typeops.instance_context import InstanceContext
def check_type(
instance_context: InstanceContext,
) -> bool:
"""
Checks that args to ``.instance`` method are correct.
We cannot use ``@overload`` on ``.instance`` because ``mypy``
does not correctly handle ``ctx.api.fail`` on ``@overload`` items:
it then tries new ones, which produce incorrect results.
So, that's why we need this custom checker.
"""
return all([
_check_all_args(instance_context.passed_args, instance_context.ctx),
])
def _check_all_args(
passed_args: TupleType,
ctx: MethodContext,
) -> bool:
fake_args = [
passed_arg
for passed_arg in passed_args.items[1:]
if isinstance(passed_arg, UninhabitedType)
]
if not fake_args:
ctx.api.fail(INVALID_ARGUMENTS_MSG, ctx.context)
return False
return True
|
1666208
|
class Handler:
"""Base Handler class"""
def deploy(self, sourceUrl=None, entrypoint=None):
raise NotImplementedError("deploy")
def destroy(self):
raise NotImplementedError("destroy")
def __call__(self, request, context=None):
raise NotImplementedError("__call__")
def __add__(self):
raise NotImplementedError("__add__")
|
1666247
|
from panorama import Panaroma
import imutils
import cv2
#Take picture from folder like: Hill1 & Hill2, scene1 & scene2, my1 & my2, taj1 & taj2, lotus1 & lotus2, beach1 & beach2, room1 & room2
print("Enter the number of images you want to concantenate:")
no_of_images = int(input())
print("Enter the image name in order of left to right in way of concantenation:")
#like taj1.jpg, taj2.jpg, taj3.jpg .... tajn.jpg
filename = []
for i in range(no_of_images):
print("Enter the %d image:" %(i+1))
filename.append(input())
images = []
for i in range(no_of_images):
images.append(cv2.imread(filename[i]))
#We need to modify the image resolution and keep our aspect ratio use the function imutils
for i in range(no_of_images):
images[i] = imutils.resize(images[i], width=400)
for i in range(no_of_images):
images[i] = imutils.resize(images[i], height=400)
panaroma = Panaroma()
if no_of_images==2:
(result, matched_points) = panaroma.image_stitch([images[0], images[1]], match_status=True)
else:
(result, matched_points) = panaroma.image_stitch([images[no_of_images-2], images[no_of_images-1]], match_status=True)
for i in range(no_of_images - 2):
(result, matched_points) = panaroma.image_stitch([images[no_of_images-i-3],result], match_status=True)
#to show the got panaroma image and valid matched points
for i in range(no_of_images):
cv2.imshow("Image {k}".format(k=i+1), images[i])
cv2.imshow("Keypoint Matches", matched_points)
cv2.imshow("Panorama", result)
#to write the images
cv2.imwrite("Matched_points.jpg",matched_points)
cv2.imwrite("Panorama_image.jpg",result)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
1666380
|
import numpy as np
from scipy.optimize import check_grad
from value_iter import value_iter
from utils import norm_distr, laplace_distr, printoptions
def compute_g(mdp, policy, p_0, T, d_last_step_list, expected_features_list):
nS, nA, nF = mdp.nS, mdp.nA, mdp.num_features
# base case
G = np.zeros((nS, nF))
# recursive case
for t in range(T-1):
# G(s') = \sum_{s, a} p(a | s) p(s' | s, a) [ p(s) g(s, a) + G_prev[s] ]
# p(s) is given by d_last_step_list[t]
# g(s, a) = f(s) - F(s) + \sum_{s'} p(s' | s, a) F(s')
# Distribute the addition to get three different terms:
# First term: p(s) [f(s') - F(s')]
# Second term: p(s) \sum_{s2} p(s2 | s, a) F(s2)
# Third term: G_prev[s]
g_first = mdp.f_matrix - expected_features_list[t]
g_second = mdp.T_matrix.dot(expected_features_list[t+1])
g_second = g_second.reshape((nS, nA, nF))
g_total = np.expand_dims(g_first, axis=1) + g_second
prob_s_a = np.expand_dims(d_last_step_list[t].reshape(nS), axis=1) * policy[t]
G_value = np.expand_dims(prob_s_a, axis=2) * g_total
G_value = mdp.T_matrix_transpose.dot(G_value.reshape((nS * nA, nF)))
G_recurse = np.expand_dims(policy[t], axis=-1) * np.expand_dims(G, axis=1)
G_recurse = mdp.T_matrix_transpose.dot(G_recurse.reshape((nS * nA, nF)))
G = G_value + G_recurse
return G
def compute_d_last_step(mdp, policy, p_0, T, gamma=1, verbose=False, return_all=False):
"""Computes the last-step occupancy measure"""
D, d_last_step_list = p_0, [p_0]
for t in range(T-1):
# D(s') = \sum_{s, a} D_prev(s) * p(a | s) * p(s' | s, a)
state_action_prob = np.expand_dims(D, axis=1) * policy[t]
D = mdp.T_matrix_transpose.dot(state_action_prob.flatten())
if verbose is True: print(D)
if return_all: d_last_step_list.append(D)
return (D, d_last_step_list) if return_all else D
def compute_feature_expectations(mdp, policy, p_0, T):
nS, nA, nF = mdp.nS, mdp.nA, mdp.num_features
expected_features = mdp.f_matrix
expected_feature_list = [expected_features]
for t in range(T-2, -1, -1):
# F(s) = f(s) + \sum_{a, s'} p(a | s) * p(s' | s, a) * F(s')
future_features = mdp.T_matrix.dot(expected_features).reshape((nS, nA, nF))
future_features = future_features * np.expand_dims(policy[t], axis=2)
expected_features = mdp.f_matrix + np.sum(future_features, axis=1)
expected_feature_list.append(expected_features)
return expected_features, expected_feature_list[::-1]
def rlsp(mdp, s_current, p_0, horizon, temp=1, epochs=1, learning_rate=0.2,
r_prior=None, r_vec=None, threshold=1e-3, check_grad_flag=False):
"""The RLSP algorithm"""
def compute_grad(r_vec):
# Compute the Boltzmann rational policy \pi_{s,a} = \exp(Q_{s,a} - V_s)
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step, d_last_step_list = compute_d_last_step(
mdp, policy, p_0, horizon, return_all=True)
if d_last_step[s_current] == 0:
print('Error in om_method: No feasible trajectories!')
return r_vec
expected_features, expected_features_list = compute_feature_expectations(
mdp, policy, p_0, horizon)
G = compute_g(mdp, policy, p_0, horizon, d_last_step_list, expected_features_list)
# Compute the gradient
dL_dr_vec = G[s_current] / d_last_step[s_current]
# Gradient of the prior
if r_prior!= None: dL_dr_vec += r_prior.logdistr_grad(r_vec)
return dL_dr_vec
def compute_log_likelihood(r_vec):
policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp)
d_last_step = compute_d_last_step(mdp, policy, p_0, horizon)
log_likelihood = np.log(d_last_step[s_current])
if r_prior!= None: log_likelihood += np.sum(r_prior.logpdf(r_vec))
return log_likelihood
def get_grad(_):
"""dummy function for use with check_grad()"""
return dL_dr_vec
if r_vec is None:
r_vec = 0.01*np.random.randn(mdp.f_matrix.shape[1])
print('Initial reward vector: {}'.format(r_vec))
if check_grad_flag: grad_error_list=[]
for i in range(epochs):
dL_dr_vec = compute_grad(r_vec)
if check_grad_flag:
grad_error_list.append(check_grad(compute_log_likelihood, get_grad, r_vec))
# Gradient ascent
r_vec = r_vec + learning_rate * dL_dr_vec
# with printoptions(precision=4, suppress=True):
# print('Epoch {}; Reward vector: {}'.format(i, r_vec))
# if check_grad_flag: print('grad error: {}'.format(grad_error_list[-1]))
if np.linalg.norm(dL_dr_vec) < threshold:
if check_grad_flag:
print()
print('Max grad error: {}'.format(np.amax(np.asarray(grad_error_list))))
print('Median grad error: {}'.format(np.median(np.asarray(grad_error_list))))
break
return r_vec
|
1666399
|
from .common import StaticTestBase
class FStringTests(StaticTestBase):
def test_format_spec(self):
codestr = """
def f(x: float) -> str:
return f"{x:.2f}"
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertEqual(f(2.134), "2.13")
|
1666454
|
import os
import warnings
import numpy as np
import cv2
import create_patch
import caffe
import pdb
def get_patch(bottom_data, c, output_size):
h, w = bottom_data.shape[2:]
patch_img = np.zeros((3, c[4], c[4]))
im_r0 = max(0, c[2]-c[4]/2)
im_c0 = max(0, c[3]-c[4]/2)
im_r1 = min(h, c[2]+c[4]/2)
im_c1 = min(h, c[3]+c[4]/2)
p_r0 = max(0, c[4]/2-c[2])
p_c0 = max(0, c[4]/2-c[3])
p_r1 = min(c[4], h+c[4]/2-c[2])
p_c1 = min(c[4], w+c[4]/2-c[3])
patch_img[:, p_r0:p_r1, p_c0:p_c1] = bottom_data[c[1], :, im_r0:im_r1, im_c0:im_c1].copy()
patch_img = patch_img.transpose((1,2,0))
return cv2.resize(patch_img, (output_size, output_size)).transpose((2,0,1)).astype(np.float)
class RandomSamplingLayer(caffe.Layer):
def setup(self, bottom, top):
warnings.filterwarnings("ignore")
params = eval(self.param_str)
self.output_size = params['output_size']
self.num = params['num']
self.by_ovlp = params['by_ovlp']
self.minsz = params['minsz']
self.maxsz = params['maxsz']
if self.num % bottom[0].data.shape[0] != 0:
raise Exception("num should be divided by batch size.")
self.num_cand = self.num / bottom[0].data.shape[0]
if len(top) != 2:
raise Exception("Need exact two tops.")
if len(bottom) != 2:
raise Exception("Need exact two bottoms.")
def reshape(self, bottom, top):
top[0].reshape(self.num, bottom[0].data.shape[1], self.output_size, self.output_size)
top[1].reshape(self.num, 1)
def forward(self, bottom, top):
idx = 0
for i in range(bottom[0].data.shape[0]):
img = bottom[0].data[i,...].transpose((1,2,0)).copy()
seg = bottom[1].data[i,...].transpose((1,2,0)).copy()
patch, cls = create_patch.createRandomPatchImg(img, seg, self.num_cand, [self.minsz, self.maxsz], 0.1, self.output_size, by_ovlp=self.by_ovlp, show=False)
if patch.shape[0] != self.num_cand:
raise Exception("Number of patches not consistent: %d vs. %d" % (patch.shape[0], self.num_cand))
for k in range(patch.shape[0]):
top[0].data[idx,...] = get_patch(bottom[0].data, np.hstack((np.array([cls[k], i]), patch[k,:])), self.output_size)
top[1].data[idx,:] = cls[k]
idx += 1
# check
if False:
if not os.path.isdir('output/class/'):
os.makedirs('output/class/')
show_data = top[0].data.copy()
show_data[:,0,...] += 104
show_data[:,1,...] += 117
show_data[:,2,...] += 123
num = np.zeros((21,), dtype=np.int)
for i in range(show_data.shape[0]):
cv2.imwrite('output/class/' + str(top[1].data[i,:].astype(np.int)) + '_' + str(num[top[1].data[i,:].astype(np.int)])+ '.jpg', show_data[i,...].transpose((1,2,0)).astype(np.uint8))
num[top[1].data[i,:].astype(np.int)] += 1
pdb.set_trace()
def backward(self, top, propagate_down, bottom):
pass
class GraphToTripletLayer(caffe.Layer):
def setup(self, bottom, top):
warnings.filterwarnings("ignore")
self.N = bottom[0].data.shape[0]
if len(top) != 3:
raise Exception("Need exact three tops")
if len(bottom) != 2:
raise Exception("Need exact two bottoms")
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
top[1].reshape(*bottom[0].data.shape)
top[2].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
in_graph = []
self.triplet_idx = -1 * np.ones((self.N, 3), dtype=np.int)
labels = bottom[1].data[...]
for i in np.random.permutation(self.N):
self.triplet_idx[i,0] = i
label = labels[i]
pos_cand = [idx for idx in in_graph if labels[idx] == label]
neg_cand = [idx for idx in in_graph if labels[idx] != label]
if len(pos_cand) != 0:
self.triplet_idx[i,1] = np.random.choice(pos_cand)
if len(neg_cand) != 0:
self.triplet_idx[i,2] = np.random.choice(neg_cand)
in_graph.append(i)
for i in range(self.N):
if self.triplet_idx[i,1] == -1:
label = labels[i]
pos_cand = [idx for idx in in_graph if labels[idx] == label and idx != i]
if len(pos_cand) != 0:
self.triplet_idx[i,1] = np.random.choice(pos_cand)
else:
self.triplet_idx[i,1] = i
if self.triplet_idx[i,2] == -1:
label = labels[i]
neg_cand = [idx for idx in in_graph if labels[idx] != label]
if len(neg_cand) != 0:
self.triplet_idx[i,2] = np.random.choice(neg_cand)
for i in range(self.N):
top[0].data[i,...] = bottom[0].data[self.triplet_idx[i,0],...]
top[1].data[i,...] = bottom[0].data[self.triplet_idx[i,1],...]
if self.triplet_idx[i,2] != -1:
top[2].data[i,...] = bottom[0].data[self.triplet_idx[i,2],...]
else:
top[2].data[i,...] = 0.
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = 0.
for i in range(self.N):
bottom[0].diff[self.triplet_idx[i,0],...] += top[0].diff[i,...]
bottom[0].diff[self.triplet_idx[i,1],...] += top[1].diff[i,...]
if self.triplet_idx[i,2] != -1:
bottom[0].diff[self.triplet_idx[i,2],...] += top[2].diff[i,...]
|
1666472
|
import torch
import torch.nn as nn
import torch.optim as optim
from functools import partial
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import OneCycle, CosineWarmupLR
def build_optimizer(model, optim_cfg):
if optim_cfg.type == 'adam':
optimizer = optim.Adam(model.parameters(), lr=optim_cfg.lr, weight_decay=optim_cfg.weight_decay)
elif optim_cfg.type == 'sgd':
optimizer = optim.SGD(
model.parameters(), lr=optim_cfg.lr, weight_decay=optim_cfg.weight_decay,
momentum=optim_cfg.momentum
)
elif optim_cfg.type == 'adam_onecycle':
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, optim_cfg.lr, get_layer_groups(model), wd=optim_cfg.weight_decay, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg, lr_cfg):
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if lr_cfg.policy == 'onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.lr, list(lr_cfg.moms), lr_cfg.div_factor, lr_cfg.pct_start
)
elif lr_cfg.policy == 'cosine':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, total_steps, last_epoch=last_epoch)
elif lr_cfg.policy == 'step':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, lr_cfg.step, last_epoch=last_epoch)
else:
raise NotImplementedError
if 'warmup' in lr_cfg:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=lr_cfg.warmup_iters,
eta_min=optim_cfg.lr * lr_cfg.warmup_ratio
)
return lr_scheduler, lr_warmup_scheduler
|
1666499
|
from django.shortcuts import render
def home(request):
return render(request, 'testapp/index.html', {})
|
1666558
|
import unittest
import os
from decimal import Decimal
from unittest import mock
from moto import mock_dynamodb2
from iwanttoreadmore.models.vote_history import VoteHistory
from tests.data.data_test_vote_history import (
create_vote_history_table,
create_test_vote_history_data,
)
from tests.helpers import remove_table
@mock_dynamodb2
class VoteHistoryTestCase(unittest.TestCase):
def setUp(self):
"""
Create a vote history table and populate it with example data
"""
os.environ["VOTES_HISTORY_TABLE"] = "iwanttoreadmore-votes-history-test"
self.vote_history_table = create_vote_history_table(
os.environ["VOTES_HISTORY_TABLE"]
)
create_test_vote_history_data(self.vote_history_table)
def tearDown(self):
remove_table(os.environ["VOTES_HISTORY_TABLE"])
def test_get_vote_history(self):
vote_history = VoteHistory()
# Positive cases
self.assertEqual(
[1111, 2222], vote_history.get_vote_history("user_1", "project_a/topic_aaa")
)
self.assertEqual(
[3333], vote_history.get_vote_history("user_1", "project_a/topic_bbb")
)
# Negative cases
self.assertEqual(
[], vote_history.get_vote_history("user_2", "project_a/topic_aaa")
)
self.assertEqual(
[], vote_history.get_vote_history("user_1", "project_a/topic_ccc")
)
@mock.patch("time.time", return_value=9999)
def test_add_vote_history(self, _):
vote_history = VoteHistory()
# Invalid vote
vote_history.add_vote_history("user_1", "project_a", "topic_aaa", "192.168.0.1")
self.assertEqual(
[Decimal(1111), Decimal(2222)],
vote_history.get_vote_history("user_1", "project_a/topic_aaa"),
)
# Valid vote
vote_history.add_vote_history("user_1", "project_a", "topic_aaa", "192.168.0.3")
self.assertEqual(
[Decimal(1111), Decimal(2222), Decimal(9999)],
vote_history.get_vote_history("user_1", "project_a/topic_aaa"),
)
# New vote
vote_history.add_vote_history("user_1", "project_a", "topic_ccc", "192.168.0.3")
self.assertEqual(
[Decimal(9999)],
vote_history.get_vote_history("user_1", "project_a/topic_ccc"),
)
def test_check_ip_voted(self):
vote_history = VoteHistory()
# Valid vote
self.assertFalse(
vote_history.check_ip_voted("user_1", "project_a/topic_aaa", "192.168.0.3")
)
# Invalid vote
self.assertTrue(
vote_history.check_ip_voted("user_1", "project_a/topic_aaa", "192.168.0.1")
)
# New vote
self.assertFalse(
vote_history.check_ip_voted("user_1", "project_a/topic_ccc", "192.168.0.1")
)
def test_check_ip_voted_project(self):
vote_history = VoteHistory()
# Valid vote
self.assertFalse(
vote_history.check_ip_voted_project("user_1", "project_a", "192.168.0.3")
)
# Invalid vote
self.assertTrue(
vote_history.check_ip_voted_project("user_1", "project_a", "192.168.0.1")
)
# New vote
self.assertFalse(
vote_history.check_ip_voted_project("user_1", "project_b", "192.168.0.1")
)
if __name__ == "__main__":
unittest.main()
|
1666598
|
from .iplot_location import iplot_location
from .iplot_timeseries import iplot_timeseries
from .iplot import iplot
from .iplot_scatter import iplot_scatter
from .iplot_line import iplot_line
from .iplot_data_intervals import iplot_data_intervals
from .iplot_scatter_mapbox import iplot_scatter_mapbox
from .iplot_bar_polar import iplot_bar_polar
from .iplot_histogram import iplot_histogram
from .iplot_candlestick import iplot_candlestick
from .iplot_box import iplot_box
from .iplot_correlation_headmap import iplot_correlation_headmap
|
1666605
|
from flask import Flask, make_response, request, g, jsonify
import json
import schedule
import medrxiv.get_medrxiv_data as med
import schedule
def do_medrxiv_data():
print("run!")
with open("./key.json", 'r') as f:
keyword = json.loads(f.read())
print(keyword)
data = med.get_list(keyword)
with open("./result.json", 'w') as res:
json.dump(data, res)
# 定时任务爬虫
def tasklist():
# 清空任务
schedule.clear()
# 创建一个按秒间隔执行任务
schedule.every(1).hours.do(do_medrxiv_data)
schedule.run_all()
# 网络编程
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def get_paper():
with open("./result.json", 'r') as f:
temp = json.loads(f.read())
return jsonify(data=temp)
if __name__ == '__main__':
do_medrxiv_data()
app.run(debug=True)
|
1666623
|
import os
import random
import cv2
import numpy as np
import torch
import torchvision.datasets as datasets
import tqdm
from dg_util.python_utils import drawing
from dg_util.python_utils import pytorch_util as pt_util
from dg_util.python_utils.persistent_dataloader import PersistentDataLoader
from sklearn.decomposition import PCA
from torch.utils.data.dataloader import DataLoader
import arg_parser
from datasets.r2v2_dataset import R2V2Dataset
from models.vince_model import VinceModel
from utils.transforms import RepeatedImagenetTransform
from utils.transforms import StandardVideoTransform
from utils.util_functions import to_uint8
"""
Example run command
python visualizations/view_nearest_neighbors.py \
--title sample_mosaic \
--description none \
--checkpoint-dir logs/moco/MocoImagenetModel/checkpoints_r18-b-256-q-65536-fsize-64-vid-ibc-4-no-self/ \
--data-path /home/xkcd/datasets/r2v2_large_with_ids/ \
--num-workers 80 --backbone ResNet18 --pytorch-gpu-ids 0 --feature-extractor-gpu-ids 0 \
-b 512 \
"""
NUM_QUERIES = 100
NUM_NEIGHBORS = 10
NUM_TO_COMPARE = 50000
data_subset = "val"
args = arg_parser.parse_args()
def get_data_item(data):
if isinstance(data, dict):
data = data["data"]
data = data.squeeze(1)
elif isinstance(data, list) or isinstance(data, tuple):
data, label = data
data = data.squeeze(1)
else:
raise NotImplementedError
return data
def dataset_nn(model, data_loader):
with torch.no_grad():
num_to_compare = min(int(NUM_TO_COMPARE / args.batch_size + 1) * args.batch_size, len(data_loader.dataset))
# Get features
image_array = np.zeros((num_to_compare, args.input_height, args.input_width, 3), dtype=np.uint8)
features_array = None
data_ind = 0
pbar = tqdm.tqdm(total=num_to_compare)
for data in data_loader:
data = get_data_item(data)
data_size = data.shape[0]
data = data.to(model.device)
output = model.get_embeddings({"data": data, "batch_type": ("images", len(data))})
features = output["extracted_features"]
if features_array is None:
feature_size = features.shape[1]
features_array = torch.zeros((num_to_compare, feature_size), dtype=torch.float32, device=model.device)
features_array[data_ind: data_ind + data_size] = features
data = to_uint8(data)
image_array[data_ind: min(num_to_compare, data_ind + data_size)] = data
data_ind += data_size
pbar.update(data_size)
if data_ind >= num_to_compare:
break
pbar.close()
if features_array.shape[1] != 64:
features_array_new = pt_util.to_numpy(features_array)
pca = PCA(n_components=64)
features_array_new = pca.fit_transform(features_array_new)
features_array_new = pt_util.from_numpy(features_array_new).to(features_array.device)
features_array = features_array_new
features_array = torch.nn.functional.normalize(features_array, dim=-1)
return features_array, image_array
def draw_nns(source_features, source_images, source_name, target_features=None, target_images=None, target_name=None):
skip_first = False
if target_features is None:
target_features = source_features
target_images = source_images
target_name = source_name
skip_first = True
num_to_compare = target_features.shape[0]
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
rand_selection = np.sort(np.random.choice(source_features.shape[0], NUM_QUERIES, replace=False))
query_features = source_features[rand_selection]
dists = torch.mm(query_features, target_features.T)
val, neighbors = torch.topk(dists, k=(NUM_NEIGHBORS + int(skip_first)), dim=1, sorted=True, largest=True)
if skip_first:
neighbors = neighbors[:, 1:]
neighbors = target_images[pt_util.to_numpy(neighbors)]
os.makedirs(
os.path.join(args.checkpoint_dir, "neighbors_from_%s_to_%s" % (source_name, target_name)), exist_ok=True
)
# Get images
for ii in tqdm.tqdm(range(neighbors.shape[0])):
images = []
image = source_images[rand_selection[ii]].copy()
image = np.pad(image, ((10, 10), (10, 10), (0, 0)), "constant")
images.append(image)
for jj in range(neighbors.shape[1]):
image = neighbors[ii, jj].copy()
images.append(image)
subplot = drawing.subplot(images, 1, neighbors.shape[1] + 1, args.input_width, args.input_height, border=5)
cv2.imwrite(
os.path.join(
args.checkpoint_dir,
"neighbors_from_%s_to_%s" % (source_name, target_name),
"bsize_%06d_%03d.jpg" % (num_to_compare, ii),
),
subplot[:, :, ::-1],
)
def main():
with torch.no_grad():
torch_devices = args.pytorch_gpu_ids
device = "cuda:" + str(torch_devices[0])
model = VinceModel(args)
model.restore()
model.eval()
model.to(device)
yt_dataset = R2V2Dataset(
args, "val", transform=StandardVideoTransform(args.input_size, "val"), num_images_to_return=1
)
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
data_loader = PersistentDataLoader(
yt_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
collate_fn=R2V2Dataset.collate_fn,
worker_init_fn=R2V2Dataset.worker_init_fn,
)
yt_features, yt_images = dataset_nn(model, data_loader)
del data_loader
draw_nns(yt_features, yt_images, "youtube")
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
valdir = os.path.join(args.imagenet_data_path, data_subset)
transform = RepeatedImagenetTransform(args.input_height, data_subset="val", repeats=1)
imagenet_dataset = datasets.ImageFolder(valdir, transform)
data_loader = DataLoader(
imagenet_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True
)
imagenet_features, imagenet_images = dataset_nn(model, data_loader)
del data_loader
draw_nns(imagenet_features, imagenet_images, "imagenet")
draw_nns(imagenet_features, imagenet_images, "imagenet", yt_features, yt_images, "youtube")
draw_nns(yt_features, yt_images, "youtube", imagenet_features, imagenet_images, "imagenet")
if __name__ == "__main__":
main()
|
1666643
|
from collections import Counter, OrderedDict
from itertools import chain
from functools import partial
class OrderedCounter(Counter, OrderedDict):
pass
class OutOfVocabularyError(LookupError):
pass
def _add_special_tokens(tokens, bos=None, eos=None):
tokens = list(tokens)
if bos is not None:
tokens.insert(0, bos)
if eos is not None:
tokens.append(eos)
return tokens
class Vocab(object):
""" Represents a Vocabulary to map objects to ids
Example:
>>> v1 = Vocab({'the': 5, 'of': 1, 'and': 3})
>>> v1
{'<pad>': 0, '<unk>': 1, 'the': 2, 'and': 3, 'of': 4}
>>> v1['and']
3
>>> v1['xyz']
1
>>> v1.freq['and']
3
>>> v2 = Vocab(['the', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'other', 'fox'])
>>> v2
{'<pad>': 0, '<unk>': 1, 'the': 2, 'fox': 3, 'quick': 4, 'brown': 5, 'jumps': 6, 'over': 7, 'other': 8}
"""
ORDER_BY_OPTIONS = ('frequency', 'insertion', 'alpha')
INVALID_ORDER_BY_ARGUMENT_MESSAGE = ("Invalid argument for order_by. "
"Use {options}, or a function. "
"Got: {value}")
@classmethod
def build_from_texts(cls, texts, bos=None, eos=None, pad='<pad>', unk=None, max_size=None, min_count=1):
""" Builds a Vocabulary from a list of sentences
Arguments:
texts (list[list[str]]): Corpus used to build the vocabulary
eos (str): Optional special token for the end of each sentence.
pad (str): Special token for strings padding
unk (str): Special token for Out of Vocabulary terms.
max_size (int): Maximum size of the Vocabulary in number of
elements. The discarded elements are selected from the list of less frequent terms.
If None is passed the size of the vocabulary will have no growth limit.
"""
examples = chain.from_iterable(map(partial(_add_special_tokens, eos=eos, bos=bos), texts))
vocab = cls(examples, eos=eos, bos=bos, pad=pad, unk=unk, max_size=max_size, min_count=min_count)
return vocab
def __init__(self, vocab={}, bos=None, eos=None, pad='<pad>', unk=None, max_size=None, order_by='frequency', min_count=1):
""" Constructor
Arguments:
vocab (iterable or mapping): If a mapping is passed it will be used
as a dictionary of terms counts. If a iterable is passed the
elements are counted from the iterable. Default: {}
eos (str): Optional special token for the end of each sentence.
pad (str): Special token for strings padding
unk (str): Special token for Out of Vocabulary terms.
max_size (int): Maximum size of the Vocabulary in number of
elements. The discarded elements are selected from the list of less frequent terms.
If None is passed the size of the vocabulary will have no growth limit.
order_by_freq (bool): If true the order of insertion is ordering from frequency
"""
self.pad = pad
self.eos = eos
self.bos = bos
self.unk = unk
self.max_size = max_size
self.min_count = min_count
self.freq = Counter()
self.idx2word = list()
self.word2idx = {}
self.start_index = 0
if order_by is None:
raise ValueError(self.INVALID_ORDER_BY_ARGUMENT_MESSAGE.format(
options=self.ORDER_BY_OPTIONS, value=order_by))
self.default_order = order_by
if self.pad is not None: # Padding always go to index
self.add([self.pad])
if unk is not None:
self.add([self.unk])
if bos is not None:
self.add([self.bos])
if eos is not None:
self.add([self.eos])
self.add(vocab, order_by=order_by, min_count=self.min_count)
def add(self, vocab, order_by=None, min_count=None):
""" Merge vocabularies.
Note:
*) The order for numbering new words uses the `order_by` parameter.
By default it orders from the most frequent to the least frequent.
*) The number of new added terms depends on the vocabulary max size
setted at vocabulary construction time.
*) This method preserves the index numbers for the old words.
Arguments:
vocab (iterable or mapping): If a mapping is passed it will be used
as a dictionary of terms counts. If a iterable is passed the
elements are counted from the iterable. Default: {}
order_by (None or str or function of tuples): If none is passed used
the order passed at creation. If 'insertion'
is passed the vocab iteration order is used, if 'frequency'
is passed the frequency order is used, if 'alpha'
is passed the order of the keys will be used. If a function
is passed the function is used to sort the elements by
applying it for each tuple (word, frequency). Default: 'freq'
"""
if (isinstance(order_by, str) and order_by not in self.ORDER_BY_OPTIONS) or \
(order_by is not None and
not isinstance(order_by, str) and
not callable(order_by)):
raise ValueError(self.INVALID_ORDER_BY_ARGUMENT_MESSAGE.format(
options=self.ORDER_BY_OPTIONS,
value=order_by))
vocab = OrderedCounter(vocab)
if order_by is None: # Insertion order
order_by = self.default_order
if isinstance(order_by, str):
if order_by == 'frequency': # Use frequency ordering
it = vocab.most_common()
elif order_by == 'insertion': # Use insertion Ordering
it = vocab.items()
else: # Alpha. Use alphabetical order
it = sorted(vocab.items(), key=lambda x: x[0])
else: # Use function order
it = sorted(vocab.items(), key=order_by)
for word, freq in it:
if word in self.word2idx: # The word is already in the vocabulary
self.freq[word] += freq
elif (self.max_size is None or len(self) < self.max_size) and (min_count is None or freq >= min_count):
self.freq[word] = freq
self.word2idx[word] = len(self)
self.idx2word.append(word)
def __len__(self):
""" Returns the number of terms in the vocabulary
"""
return len(self.word2idx)
def __iter__(self):
""" Returns an iterator to word in the vocabulary
"""
return iter(self.idx2word)
def __contains__(self, word):
""" vocab.__contains__(word) <==> word in vocab
"""
return word in self.word2idx
def words(self):
return iter(self)
def indexes(self):
yield from range(self.start_index, len(self)+self.start_index)
def items(self):
""" Returns an iterator to
"""
yield from ((word, self.start_index + i) for i, word in enumerate(self.idx2word))
def __getitem__(self, word):
""" Returns the index of the given term
"""
index = self.word2idx.get(word, self.word2idx[self.unk] if self.unk else None)
if index is None:
raise OutOfVocabularyError("{} is not in the Vocabulary".format(word))
else:
return index + self.start_index
def __repr__(self):
opt_params = {'bos': self.bos,
'eos': self.eos,
'pad': self.pad,
'unk': self.unk,
'max_size': self.max_size,
'order_by': self.default_order,
'min_count': self.min_count}
format_params = ',\n\t'.join(
'{}={}'.format(name, repr(param))
for name, param in opt_params.items()
if param is not None
)
return '{}({})'.format(self.__class__.__name__,
format_params)
def __call__(self, tokens):
""" Converts list of tokens to list of indexes
"""
ids_seq = []
if self.bos is not None:
ids_seq.append(self[self.bos])
if self.unk is None:
ids_seq.extend([self[token] for token in tokens if token in self.word2idx])
else:
ids_seq.extend([self[token] for token in tokens])
if self.eos is not None:
ids_seq.append(self[self.eos])
return ids_seq
def __getstate__(self):
return {'words': self.idx2word,
'freqs': [self.freq[word] for word in self.idx2word],
'pad': self.pad,
'eos': self.eos,
'bos': self.bos,
'unk': self.unk,
'max_size': self.max_size,
'start_index': self.start_index,
'min_count': self.min_count,
'default_order': self.default_order
}
def __setstate__(self, d):
self.start_index = d['start_index']
self.idx2word = d['words']
self.word2idx = {word: i for i, word in enumerate(d['words'])}
self.freq = Counter(d['freqs'])
self.pad = d['pad']
self.eos = d['eos']
self.bos = d['bos']
self.unk = d['unk']
self.max_size = d['max_size']
self.min_count = d['min_count']
self.default_order = d['default_order']
def fit(self, texts):
""" Updates the Vocabulary from a list of tokenized sentences
Arguments:
texts (list[list[str]]): Corpus (already tokenized) used to build the vocabulary
"""
examples = chain.from_iterable(map(partial(_add_special_tokens, eos=self.eos, bos=self.bos), texts))
self.add(examples)
|
1666651
|
class Template(object):
"""
a rudimentary replacement for Jinja2. currently in place
due to a bug with the typing module.
"""
def __init__(self, template_string):
self._template_string = template_string
def render(self, **parameters):
chunks = []
iterator = iter(self._template_string)
while True:
try:
c = next(iterator)
if c == "{":
next_c = next(iterator)
if next_c == "{":
chunks.append(_capture_variable(iterator, parameters))
else:
chunks.append(c)
chunks.append(next_c)
continue
chunks.append(c)
except StopIteration:
break
return "".join(chunks)
def _capture_variable(iterator, parameters):
"""
return the replacement string.
this assumes the preceeding {{ has already been
popped off.
"""
key = ""
next_c = next(iterator)
while next_c != "}":
key += next_c
next_c = next(iterator)
# remove the final "}"
next(iterator)
return parameters[key]
|
1666667
|
import numpy as np
from PySide2.QtCore import Qt
from hexrd.ui.hexrd_config import HexrdConfig
from utils import select_files_when_asked
def test_load_data(qtbot, main_window, default_config_path, default_data_path):
# Prove this gets changed
assert 'GE' not in HexrdConfig().detectors
# Load config file
with select_files_when_asked(default_config_path):
main_window.ui.action_open_config_file.triggered.emit()
# Should have loaded the instrument config
detectors = HexrdConfig().detectors
assert len(detectors) == 1
assert 'GE' in detectors
def is_dummy_data():
for ims in HexrdConfig().imageseries_dict.values():
if len(ims) != 1 or not np.all(ims[0] == 1):
return False
return True
# There should only be dummy data currently
assert is_dummy_data()
load_panel = main_window.simple_image_series_dialog
# Press the "Select Image Files" button
with select_files_when_asked(default_data_path):
qtbot.mouseClick(load_panel.ui.image_files, Qt.LeftButton)
qtbot.mouseClick(load_panel.ui.read, Qt.LeftButton)
assert not is_dummy_data()
ims = HexrdConfig().imageseries_dict['GE']
assert len(ims) == 480
|
1666693
|
import numpy as np
import random
from numba import jit
def indicator(S, n):
x = np.zeros(n)
x[list(S)] = 1
return x
def sample_live_icm(g, num_graphs):
'''
Returns num_graphs live edge graphs sampled from the ICM on g. Assumes that
each edge has a propagation probability accessible via g[u][v]['p'].
'''
import networkx as nx
live_edge_graphs = []
for _ in range(num_graphs):
h = nx.Graph()
h.add_nodes_from(g.nodes())
for u,v in g.edges():
if random.random() < g[u][v]['p']:
h.add_edge(u,v)
live_edge_graphs.append(h)
return live_edge_graphs
def f_all_influmax_multlinear(x, Gs, Ps, ws):
'''
Objective function for the multilinear extension of a live-edge
influence maximization problem.
x: continuous decision variables
Gs/Ps/ws: representation of the influence maximization problem as an
expectation over a sampled set of probabilistic coverage functions (see below)
'''
n = len(Gs)
sample_weights = 1./n * np.ones(n)
return objective_live_edge(x, Gs, Ps, ws, sample_weights)
def make_multilinear_objective_samples(live_graphs, target_nodes, selectable_nodes, p_attend):
'''
Given a set of sampled live edge graphs, returns an function evaluating the
multilinear extension for the corresponding influence maximization problem.
live_graphs: list of networkx graphs containing sampled live edges
target_nodes: nodes that should be counted towards the objective
selectable_nodes: nodes that are eligible to be chosen as seeds
p_attend: probability that each node will be influenced if it is chosen as
a seed.
'''
Gs, Ps, ws = live_edge_to_adjlist(live_graphs, target_nodes, p_attend)
def f_all(x):
x_expand = np.zeros(len(live_graphs[0]))
x_expand[selectable_nodes] = x
return f_all_influmax_multlinear(x_expand, Gs, Ps, ws)
return f_all
def make_multilinear_gradient_samples(live_graphs, target_nodes, selectable_nodes, p_attend):
'''
Given a set of sampled live edge graphs, returns an stochastic gradient
oracle for the multilinear extension of the corresponding influence
maximization problem.
live_graphs: list of networkx graphs containing sampled live edges
target_nodes: nodes that should be counted towards the objective
selectable_nodes: nodes that are eligible to be chosen as seeds
p_attend: probability that each node will be influenced if it is chosen as
a seed.
'''
import random
Gs, Ps, ws = live_edge_to_adjlist(live_graphs, target_nodes, p_attend)
def gradient(x, batch_size):
x_expand = np.zeros(len(live_graphs[0]))
x_expand[selectable_nodes] = x
samples = random.sample(range(len(Gs)), batch_size)
grad = gradient_live_edge(x_expand, [Gs[i] for i in samples], [Ps[i] for i in samples], [ws[i] for i in samples], 1./batch_size * np.ones(len(Gs)))
return grad[selectable_nodes]
return gradient
def live_edge_to_adjlist(live_edge_graphs, target_nodes, p_attend):
'''
Takes a list of live edge graphs and converts them to the format used by the functions below.
For each live edge graph g, the corresponding entry of Gs is the adjacency list of a bipartite graph,
with each row representing a connected component of g and the entries of that row giving the nodes in
that connected component. Each row is terminated with -1s.
Each entry of Ps is an array of 1s of the same size as the corresponding entry of Gs.
Each entry of ws is an array, with each entry giving the size of the corresponding connected
component.
'''
import networkx as nx
Gs = []
Ps = []
ws = []
target_nodes = set(target_nodes)
for g in live_edge_graphs:
cc = list(nx.connected_components(g))
n = len(cc)
max_degree = max([len(c) for c in cc])
G_array = np.zeros((n, max_degree), dtype=np.int)
P = np.zeros((n, max_degree))
G_array[:] = -1
for i in range(n):
for j, v in enumerate(cc[i]):
G_array[i, j] = v
P[i, j] = p_attend[v]
Gs.append(G_array)
Ps.append(P)
w = np.zeros((n))
for i in range(n):
w[i] = len(target_nodes.intersection(cc[i]))
ws.append(w)
return Gs, Ps, ws
@jit
def gradient_live_edge(x, Gs, Ps, ws, weights):
'''
Gradient wrt x of the live edge influence maximization model.
x: current probability of seeding each node
Gs/Ps/ws represent the input graphs, as defined in live_edge_to_adjlist
'''
grad = np.zeros((len(x)))
for i in range(len(Gs)):
grad += weights[i]*gradient_coverage(x, Gs[i], Ps[i], ws[i])
grad /= len(x)
return grad
@jit
def objective_live_edge(x, Gs, Ps, ws, weights):
'''
Objective in the live edge influence maximization model, where nodes are
seeded with probability in the corresponding entry of x.
Gs/Ps/ws represent the input graphs, as defined in live_edge_to_adjlist
weights: probability of each graph occurring
'''
total = 0
for i in range(len(Gs)):
total += weights[i] * objective_coverage(x, Gs[i], Ps[i], ws[i])
return total
'''
The following functions compute gradients/objective values for the multilinear relaxation
of a (probabilistic) coverage function. The function is represented by the arrays G and P.
Each row of G is a set to be covered, with the entries of the row giving the items that will
cover it (terminated with -1s). The corresponding entry of P gives the probability that
the item will cover that set (independently of all others).
Corresponding to each row of G is an entry in the vector w, which gives the contribution
to the objective from covering that set.
'''
@jit
def gradient_coverage(x, G, P, w):
'''
Calculates gradient of the objective at fractional point x.
x: fractional point as a vector. Should be reshapable into a matrix giving
probability of choosing copy i of node u.
G: graph (adjacency list)
P: probability on each edge.
w: weights for nodes in R
'''
grad = np.zeros((x.shape[0]))
#process gradient entries one node at a time
for v in range(G.shape[0]):
p_all_fail = 1
for j in range(G.shape[1]):
if G[v, j] == -1:
break
p_all_fail *= 1 - x[G[v, j]]*P[v, j]
for j in range(G.shape[1]):
u = G[v, j]
if u == -1:
break
#0/0 should be 0 here
if p_all_fail == 0:
p_others_fail = 0
else:
p_others_fail = p_all_fail/(1 - x[u]*P[v, j])
grad[u] += w[v]*P[v, j]*p_others_fail
return grad
@jit
def marginal_coverage(x, G, P, w):
'''
Returns marginal probability that each RHS vertex is reached.
'''
probs = np.ones((G.shape[0]))
for v in range(G.shape[0]):
for j in range(G.shape[1]):
if G[v, j] == -1:
break
u = G[v, j]
probs[v] *= 1 - x[u]*P[v, j]
probs = 1 - probs
return probs
@jit
def objective_coverage(x, G, P, w):
'''
Weighted objective value: the expected weight of the RHS nodes that are reached.
'''
return np.dot(w, marginal_coverage(x, G, P, w))
|
1666698
|
import glob
import logging
import os
import re
import shutil
import time
import torch
import torch.distributed as dist
from apex import amp
from apex.parallel import DistributedDataParallel, convert_syncbn_model
from tqdm import tqdm
from asr import lr_schedulers, metrics, optimizers
from asr.utils.checks import check_loss
logger = logging.getLogger(__name__)
class Trainer:
def __init__(self,
serialization_dir,
params,
model,
loss,
alphabet,
local_rank=0,
world_size=1,
sync_bn=False,
opt_level='O0',
keep_batchnorm_fp32=None,
loss_scale=1.0):
self.alphabet = alphabet
self.clip_grad_norm = params.get('clip_grad_norm', None)
self.clip_grad_value = params.get('clip_grad_value', None)
self.warmup_epochs = params.get('warmup_epochs', 0)
self.label_smoothing = params.get('label_smoothing', 0.0)
self.local_rank = local_rank
self.loss = loss.cuda()
self.model = model
self.best_monitor = float('inf')
self.monitor = params.get('monitor', 'loss')
self.serialization_dir = serialization_dir
self.distributed = world_size > 1
self.world_size = world_size
self.epoch = 0
self.num_epochs = 0
self.start_epoch = 0
self.start_iteration = 0
self.start_time = 0
self.iterations_per_epoch = None
self.time_since_last = time.time()
self.save_every = params.get('save_every', 60 * 10) # 10 minutes
if sync_bn:
logger.info('Using Apex `sync_bn`')
self.model = convert_syncbn_model(self.model)
self.model = model.cuda()
# Setup optimizer
parameters = [(n, p) for n, p in self.model.named_parameters()
if p.requires_grad]
self.optimizer = optimizers.from_params(params.pop("optimizer"),
parameters,
world_size=self.world_size)
self.model, self.optimizer = amp.initialize(
self.model,
self.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale=loss_scale)
# Setup lr scheduler
scheduler_params = params.pop('lr_scheduler', None)
self.lr_scheduler = None
if scheduler_params:
self.lr_scheduler = lr_schedulers.from_params(
scheduler_params, self.optimizer)
self.base_lrs = list(
map(lambda group: group['initial_lr'],
self.optimizer.param_groups))
# Setup metrics
metrics_params = params.pop('metrics', [])
if 'loss' not in metrics_params:
metrics_params = ['loss'] + metrics_params
# Initializing history
self.metrics = {}
for phase in ['train', 'val']:
self.metrics[phase] = metrics.from_params(metrics_params,
alphabet=alphabet)
if self.distributed:
self.model = DistributedDataParallel(self.model,
delay_allreduce=True)
def score(self, metrics):
sign = -1. if self.monitor.startswith('-') else 1.
name = self.monitor.replace('-', '')
return sign * metrics[name].avg
def restore(self):
self.load_checkpoint()
self.lr_scheduler.step(max(self.start_epoch - self.warmup_epochs, 0))
if self.start_epoch or self.start_iteration:
logger.info(
f'Restoring from epoch {self.start_epoch + 1} and it. {self.start_iteration}.'
)
def preprocess_batch(
self,
batch,
):
sample, target, sample_lengths, target_lengths = batch
return sample.cuda(), target.cuda(), sample_lengths.cuda(
), target_lengths.cuda()
def run(self, train_loader, val_loader=None, num_epochs=1):
# Trying to restore from a last checkpoint
self.restore()
if self.iterations_per_epoch is None:
self.iterations_per_epoch = len(train_loader)
else:
assert len(train_loader) == self.iterations_per_epoch, (
'len(train_loader) != iterations_per_epoch '
'found in checkpoint.')
logger.info(self.model)
self.start_time = time.time()
self.time_since_last = time.time(
) # reset counter for model checkpoint
self.num_epochs = num_epochs
for self.epoch in range(self.start_epoch, self.num_epochs):
start = time.time()
train_data_time = self._run_epoch(train_loader,
metrics=self.metrics['train'],
is_train=True)
train_lap = time.time()
if val_loader:
val_data_time = self._run_epoch(val_loader,
metrics=self.metrics['val'],
is_train=False)
val_lap = time.time()
tmp = (
f'Epoch [{self.epoch + 1}/{num_epochs}] - '
'{phase:>5}: {metrics}. '
'Data Time: {data_time:.2f}s. Time: {time:.2f}s ({rate:.2f} samples/s)'
)
train_time = train_lap - start
train_string = tmp.format(
phase='Train',
metrics=self.metrics['train'].to_str(val=False),
data_time=train_data_time,
time=train_time,
rate=self.metrics['train'].count / train_time)
logger.info(train_string)
if val_loader:
val_time = val_lap - train_lap
val_string = tmp.format(
phase='Val',
metrics=self.metrics['val'].to_str(val=False),
data_time=val_data_time,
time=val_time,
rate=self.metrics['val'].count / val_time)
logger.info(val_string)
self.save_checkpoint()
if self.lr_scheduler:
if not (self.warmup_epochs
and self.epoch + 1 < self.warmup_epochs):
self.lr_scheduler.step(
max(self.epoch + 1 - self.warmup_epochs, 0))
logger.info(
f"Learning rate set to {self.optimizer.param_groups[0]['lr']:.6f}"
)
def _run_epoch(self, loader, metrics, is_train=False):
self.model.train(is_train)
if is_train:
if loader.sampler and hasattr(loader.sampler, 'set_epoch'):
loader.sampler.set_epoch(self.epoch)
if loader.batch_sampler and hasattr(loader.batch_sampler,
'set_epoch'):
loader.batch_sampler.set_epoch(self.epoch)
if self.start_iteration == 0:
metrics.reset()
last_seen = 0
with tqdm(desc=f'Epoch [{self.epoch + 1}/{self.num_epochs}] - '
f'{"Train" if is_train else "Val"}',
total=len(loader.dataset),
leave=False,
miniters=int(0.05 * len(loader.dataset)),
maxinterval=3600,
unit='samples',
disable=(self.local_rank !=
0)) as pbar, torch.set_grad_enabled(is_train):
data_time = 0
lap = time.time()
for current_iteration, batch in enumerate(loader):
# If is the first epoch and start it. is different than curr it., we skip the
# batches
if is_train and self.epoch == self.start_epoch and self.start_iteration:
if current_iteration < self.start_iteration:
continue
# Last one, update pbar
pbar.set_postfix(metrics.as_dict(), refresh=False)
pbar.update(metrics.count - last_seen)
last_seen = metrics.count
if self.local_rank == 0:
pbar.unpause()
lap = time.time()
self.start_iteration = 0
continue
batch = self.preprocess_batch(batch)
sample, target, sample_lengths, target_lengths = batch
data_time += time.time() - lap
output, output_lengths = self.model(sample, sample_lengths)
# Ensure float32 loss
output = output.float()
loss = self.loss(output, target, output_lengths,
target_lengths)
if self.label_smoothing:
alpha = self.label_smoothing
loss = (1 -
alpha) * loss + alpha * torch.nn.functional.kl_div(
torch.log_softmax(output, dim=-1),
torch.empty_like(output).fill_(
1 / output.shape[-1]),
reduction='batchmean')
loss_value = loss.detach()
if self.distributed:
dist.all_reduce(loss_value)
loss_value /= self.world_size
is_valid_loss = check_loss(loss_value)
if not is_valid_loss:
loss_value.zero_()
metrics.update(
(loss_value.repeat(sample.shape[0]), output.detach(),
target.detach(), output_lengths.detach(),
target_lengths.detach()))
# Updating progress bar
pbar.set_postfix(metrics.as_dict(), refresh=False)
pbar.update((metrics.count - last_seen))
last_seen = metrics.count
if not is_train:
lap = time.time()
continue
if not is_valid_loss:
lap = time.time()
if self.local_rank == 0:
logger.warning(
'Skipping grad update due to invalid loss.')
continue
# LR warmup: for distributed training
if self.warmup_epochs and self.epoch < self.warmup_epochs:
for param_group, lr in zip(self.optimizer.param_groups,
self.base_lrs):
param_group['lr'] = (lr - lr / self.world_size) * float(
1 + current_iteration +
self.epoch * self.iterations_per_epoch) / (
self.warmup_epochs * self.iterations_per_epoch
) + lr / self.world_size
else:
param_group = self.optimizer.param_groups[-1]
pbar.set_postfix_str(
(pbar.postfix + f", lr={param_group['lr']:.6f}"),
refresh=True)
# compute gradient
self.optimizer.zero_grad()
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
# Clipping the norm, avoiding gradient explosion
if self.clip_grad_norm:
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer), self.clip_grad_norm)
if self.clip_grad_value:
torch.nn.utils.clip_grad_value_(
amp.master_params(self.optimizer),
self.clip_grad_value)
# optimizer step
self.optimizer.step()
self.save_checkpoint(current_iteration, is_train)
lap = time.time()
return data_time
def save_checkpoint(self, iteration=0, is_train=False):
if self.local_rank != 0:
return
epoch = self.epoch
# Actually save_checkpoint is called after the iteration ends, so we are really
# in the next iteration
current_iteration = iteration + 1 if is_train else self.iterations_per_epoch
if current_iteration == self.iterations_per_epoch:
epoch += 1
current_iteration = 0
total_iterations = epoch * self.iterations_per_epoch + current_iteration
if is_train and time.time() - self.time_since_last < self.save_every:
return
self.time_since_last = time.time()
models_dir = os.path.join(self.serialization_dir, 'models')
os.makedirs(models_dir, exist_ok=True)
ckpt_path = os.path.join(models_dir, f'model-{total_iterations}.pth')
best_ckpt_path = os.path.join(models_dir, f'best-model.pth')
model = self.model
klasses = (torch.nn.DataParallel,
torch.nn.parallel.DistributedDataParallel,
DistributedDataParallel)
if isinstance(self.model, klasses):
model = self.model.module
monitor = self.score(self.metrics['val'])
chkpt_dict = {
'model':
model.state_dict(),
'epoch':
epoch,
'epoch_iterations':
current_iteration,
'iterations_per_epoch':
self.iterations_per_epoch,
'best_monitor':
monitor if not is_train and monitor < self.best_monitor else
self.best_monitor,
'metrics': {k: m.state_dict()
for k, m in self.metrics.items()},
'optimizer':
self.optimizer.state_dict(),
}
torch.save(chkpt_dict, ckpt_path)
if not is_train and monitor < self.best_monitor:
logger.info(f'Best {self.monitor} found ({monitor:.4f} < '
f'{self.best_monitor}). Saving...')
self.best_monitor = monitor
chkpt_dict['best_monitor'] = monitor
shutil.copyfile(ckpt_path, best_ckpt_path)
def load_checkpoint(self):
if not os.path.exists(os.path.join(self.serialization_dir, 'models')):
return
have_checkpoint = (self.serialization_dir is not None and any(
"model-" in x for x in os.listdir(
os.path.join(self.serialization_dir, 'models'))))
if not have_checkpoint:
return
serialization_files = glob.glob(
os.path.join(self.serialization_dir, 'models', '*'))
model_checkpoints = sorted(
[f for f in serialization_files if 'model-' in f],
key=lambda x: int(re.findall(r'-([0-9]+)\.pth', x)[0]))
total_iterations = int(
re.search(r"model-([0-9]+)\.pth", model_checkpoints[-1]).group(1))
ckpt_path = os.path.join(self.serialization_dir, 'models',
f'model-{total_iterations}.pth')
logger.info(f'Last model checkpoint found in {ckpt_path}. Loading...')
try:
ckpt_dict = torch.load(ckpt_path,
map_location=lambda storage, loc: storage)
except RuntimeError:
logger.error(
f'Problem reading file {ckpt_path}. Trying older checkpoints...'
)
os.remove(ckpt_path)
self.load_checkpoint()
return
model = self.model
klasses = (torch.nn.DataParallel,
torch.nn.parallel.DistributedDataParallel,
DistributedDataParallel)
if isinstance(self.model, klasses):
model = model.module
model.load_state_dict(ckpt_dict['model'])
self.optimizer.load_state_dict(ckpt_dict['optimizer'])
self.best_monitor = ckpt_dict['best_monitor']
for k, state_dict in ckpt_dict['metrics'].items():
self.metrics[k].load_state_dict(state_dict)
self.start_epoch = ckpt_dict['epoch']
self.start_iteration = ckpt_dict['epoch_iterations']
self.iterations_per_epoch = ckpt_dict['iterations_per_epoch']
|
1666701
|
from setuptools import find_packages, setup
with open("readme.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="diemlib", # Replace with your own username
version="1.1.4",
author="<NAME>",
author_email="<EMAIL>",
description="Python Utilities for DIEM",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/IBM/diem/tree/main/packages/diemlib",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
|
1666711
|
def check_goldbach_for_num(n,primes_set) :
'''gets an even integer- n, and a set of primes- primes_set. Returns whether there're two primes which their sum is n'''
relevant_primes_set={p for p in primes_set if p<n}
for prime in primes_set :
if (n-prime) in relevant_primes_set:
return True
return False
|
1666712
|
import os
import infra.basetest
class TestS6Rc(infra.basetest.BRTest):
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
BR2_PACKAGE_S6_RC=y
BR2_TARGET_ROOTFS_CPIO=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
def test_run(self):
img = os.path.join(self.builddir, "images", "rootfs.cpio")
self.emulator.boot(arch="armv5",
kernel="builtin",
options=["-initrd", img])
self.emulator.login()
_, exit_code = self.emulator.run("s6-svscan -h")
self.assertEqual(exit_code, 100)
# Set up two service directories with a dependency
self.emulator.run("mkdir -p source/testsv1")
self.emulator.run("mkdir -p source/testsv2")
self.emulator.run("echo oneshot > source/testsv1/type")
self.emulator.run("echo oneshot > source/testsv2/type")
self.emulator.run("echo 'echo foo' > source/testsv1/up")
self.emulator.run("echo 'echo bar' > source/testsv2/up")
self.emulator.run("echo testsv1 > source/testsv2/dependencies")
self.emulator.run("chmod +x source/testsv1/up")
self.emulator.run("chmod +x source/testsv2/up")
# Compile the service database
_, exit_code = self.emulator.run("s6-rc-compile compiled source")
self.assertEqual(exit_code, 0)
# Inspect dependencies
cmd = "s6-rc-db -c compiled -d dependencies testsv1"
output, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
self.assertEqual(output[0], "testsv2")
cmd = "s6-rc-db -c compiled dependencies testsv2"
output, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
self.assertEqual(output[0], "testsv1")
|
1666734
|
from CSIKit.csi.csidata import CSIData
from CSIKit.csi.csiframe import CSIFrame
from CSIKit.csi.frames.iwl import IWLCSIFrame
from CSIKit.csi.csimetadata import CSIMetadata
|
1666739
|
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import json
import sys
import time
import schedule
import os
import boto3
import urllib.request
from requests import get
import csv
bucket = os.environ['BUCKET_NAME']
folder = os.environ['FOLDER_PATH']
client = boto3.client(
's3',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
region_name=os.environ['AWS_REGION']
)
def check_exists_by_id(id, driver):
'''
Checks if an element with input id exists on a webpage
'''
try:
driver.find_element_by_id(id)
except NoSuchElementException:
return False
return True
def check_exists_by_xpath(xpath, driver):
'''
Checks if an element with input xpath exists on a webpage
'''
try:
driver.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
def find_next_page_button_index(elements):
'''
Finds the button to go to the next page of search results
'''
for i in range(len(elements)):
if elements[i].text == ' > ':
return i
return -1
def write_data(out, outfile):
'''
Outputs object to a file
'''
with open(outfile, 'w') as f:
f.write(json.dumps(out))
print("Results written")
return out
def upload_json_to_s3(output_json, bucket, folder, filename):
'''
Uploads scraped data to S3
'''
write_data(output_json, 'nj_campfin_scraped.json')
client.upload_file('nj_campfin_scraped.json', bucket, folder + filename)
def get_previously_scraped_from_s3(output_filename):
'''
Gets previously scraped data from S3 file
'''
url = os.environ['AWS_URL_PREFIX'] + '/' + os.environ['BUCKET_NAME'] + '/' + os.environ['FOLDER_PATH'] + output_filename
data = []
try:
with urllib.request.urlopen(url) as json_file:
data = json.loads(json_file.read().decode())
except urllib.error.HTTPError:
return []
return data
def create_filing_name_from_json(filing_json):
'''
Creates file name for pdf from a scraped filing
'''
name = ''.join(x for x in filing_json['name'] if x.isalnum())
date = ''.join(x for x in filing_json['date'] if x.isalnum())
form = ''.join(x for x in filing_json['form'] if x.isalnum())
period = ''.join(x for x in filing_json['period'] if x.isalnum())
amend = ''.join(x for x in filing_json['amend'] if x.isalnum())
out = name + date + form + period + amend + '.pdf'
return out
def get_filing_and_upload_to_s3(url, referer_url, bucket, folder, filename):
'''
Downloads a filing pdf and then uploads it to S3
'''
reply = get(url, stream=True, headers={'Referer': referer_url})
with open('temp_filing.pdf', 'wb') as file:
for chunk in reply.iter_content(chunk_size=1024):
if chunk:
bytes = file.write(chunk)
client.upload_file('temp_filing.pdf', bucket, folder+filename)
file_url = os.environ['AWS_URL_PREFIX'] + '/' + os.environ['BUCKET_NAME'] + '/' + os.environ['FOLDER_PATH'] + filename
return file_url
def advance_to_page(browser, page_controls_xpath, page_num):
'''
Goes to a certain page of filing results
'''
while True:
if check_exists_by_xpath(page_controls_xpath, browser):
page_controls = browser.find_elements_by_xpath(page_controls_xpath)
else:
page_controls = None
advance_several = -1
for i in range(len(page_controls)):
if page_controls[i].text.strip() == str(page_num):
page_controls[i].click()
return
elif page_controls[i].text == '>>':
advance_several = i
if advance_several == -1:
raise ValueError("page_num out of range")
else:
page_controls[advance_several].click()
def convert_csv_row_to_result(csv_row):
'''
Takes a row from the logged results and converts it to a scraped object
'''
if len(csv_row) > 7:
return {
'name': csv_row[0],
'summary_link': csv_row[1],
'location': csv_row[2],
'party': csv_row[3],
'office_or_type': csv_row[4],
'election_type': csv_row[5],
'year': csv_row[6],
'date': csv_row[7],
'form': csv_row[8],
'period': csv_row[9],
'amendment': csv_row[10],
'url': csv_row[11]
}
else:
return {
'name': csv_row[0],
'summary_link': csv_row[1],
'location': csv_row[2],
'party': csv_row[3],
'office_or_type': csv_row[4],
'election_type': csv_row[5],
'year': csv_row[6],
}
def get_filing_list(first_name, last_name, year, office, outfile, get_filings, upload_pdfs, logfile):
'''
Scrapes filings with inputted search parameters and outputs them with given output parameters
TODO: separate input and output portions into different functions?
'''
print("Began scraping with params {} {} {} {} {}".format(first_name, last_name, year, office, outfile))
results = [] # stores scraping results
# read in scraped results if logfile is inputted
if logfile is not None and logfile != '':
reader = csv.reader(logfile)
for row in reader:
results.append(convert_csv_row_to_result(row))
print(len(results))
# read in results previous scraping passes if previously_scraped file is inputted
previously_scraped = []
if outfile is not None and outfile != '':
previously_scraped = get_previously_scraped_from_s3(outfile.split('/')[-1])
previously_scraped_no_urls = []
for old_details in previously_scraped:
copy = dict(old_details)
if 'url' in copy:
del copy['url']
previously_scraped_no_urls.append(copy)
# set up webdriver
#path_to_chromedriver = '/usr/local/bin/chromedriver' #TODO -> env var
path_to_chromedriver = os.environ['PATH_TO_CHROMEDRIVER']
browser = webdriver.Chrome(executable_path = path_to_chromedriver)
# open filings search page
url = 'http://www.elec.state.nj.us/ELECReport/searchcandidate.aspx' #TODO -> env var, maybe?
browser.get(url)
# "type" search parameters into form
browser.find_element_by_id('txtFirstName').send_keys(first_name)
browser.find_element_by_id('txtLastName').send_keys(last_name)
if (year is not None and year != ''):
browser.find_element_by_xpath("//select[@name='ctl00$ContentPlaceHolder1$usrCandidate1$ddlYear']/option[text()='{}']".format(year)).click()
if (office is not None and office != ''):
browser.find_element_by_xpath("//select[@name='ctl00$ContentPlaceHolder1$usrCandidate1$ddlOffice']/option[text()='{}']".format(office)).click()
browser.find_element_by_id('btnSearch').click() # search
# xpaths to find things on the search results page
docs_table_xpath = "//div[@id='VisibleReportContentctl00_ContentPlaceHolder1_BITSReportViewer1_reportViewer1_ctl09']//table[@cols='6']"
docs_table_or_norecords_xpath = "//div[@id='VisibleReportContentctl00_ContentPlaceHolder1_BITSReportViewer1_reportViewer1_ctl09'][.//table[@cols='6'] or .//div/text()='No Records Found']"
names_table_xpath = "//table[@id='ContentPlaceHolder1_usrCommonGrid1_gvwData']"
names_page_links_template = "ContentPlaceHolder1_usrCommonGrid1_rptPaging_LinkButton1_{}"
page_controls_xpath = "//a[@class='bodytext']"
date_sort_controls_xpath = "//td/a[@tabindex]"
# get the page controls on the page
if check_exists_by_xpath(page_controls_xpath, browser):
page_controls = browser.find_elements_by_xpath(page_controls_xpath)
else:
page_controls = None
# advance to results that haven't been scraped if there are logged scraped results
if len(results) > 25:
advance_to_page(browser, page_controls_xpath, (len(results) // 25) + 1)
if logfile is not None and logfile != '':
writer = csv.writer(logfile)
# scraping loop
# scrapes each page of search results, then advances to next page
# breaks if no more pages
while True:
# wait for search results to show up
wait = WebDriverWait(browser, int(os.environ['WAIT_TIME']))
wait.until(
EC.presence_of_element_located((By.XPATH, docs_table_or_norecords_xpath))
)
# get the list of candidates on the results page
names_table = browser.find_element_by_xpath(names_table_xpath)
names_rows = names_table.find_elements_by_xpath("./tbody/tr")
# for each candidate...
for i in range(1, len(names_rows)):
# get the list of candidates on the results page
names_table = browser.find_element_by_xpath(names_table_xpath)
names_rows = names_table.find_elements_by_xpath("./tbody/tr")
name_row = names_rows[i]
# get the name and information in the name row element
name = name_row.find_element_by_xpath("./td/a").text
name_items = name_row.find_elements_by_xpath("./td")
location = name_items[1].text
party = name_items[2].text
office_or_type = name_items[3].text
election_type = name_items[4].text
year = name_items[5].text
# click on the candidate name
try:
name_row.click()
except WebDriverException:
print(" ".join([name, location, party, office_or_type, election_type, year]) + " FAILED")
continue
# wait for results to show up
try:
wait = WebDriverWait(browser, int(os.environ['WAIT_TIME']))
wait.until(
EC.presence_of_element_located((By.XPATH, docs_table_or_norecords_xpath))
)
except TimeoutException:
print("TIMEOUT EXCEPTION: " + name)
# if there are no filings, continue
if (check_exists_by_xpath("//div[@id='VisibleReportContentctl00_ContentPlaceHolder1_BITSReportViewer1_reportViewer1_ctl09']//div[text()='No Records Found']", browser)):
print("No records, continuing")
if get_filings:
continue
# get the candidate contributors url
if check_exists_by_xpath("//a[text()='Summary Data']", browser):
summary_data_link = browser.find_element_by_xpath("//a[text()='Summary Data']").get_attribute("href")
else:
summary_data_link = ''
# if you need to scrape the filings (and not just the candidate info)...
if get_filings:
browser.find_element_by_xpath(date_sort_controls_xpath).click() # sort the filings by date
# wait for sort to complete and loading screen to go away
wait = WebDriverWait(browser, int(os.environ['WAIT_TIME']))
wait.until(EC.presence_of_element_located((By.XPATH, "//img[@src='/ELECReport/Reserved.ReportViewerWebControl.axd?OpType=Resource&Version=12.0.2402.20&Name=Microsoft.ReportingServices.Rendering.HtmlRenderer.RendererResources.sortAsc.gif']")))
wait = WebDriverWait(browser, int(os.environ['WAIT_TIME']))
time.sleep(0.5)
wait.until(
EC.invisibility_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_BITSReportViewer1_reportViewer1_AsyncWait"))
#EC.element_to_be_clickable((By.XPATH, date_sort_controls_xpath))
)
browser.find_element_by_xpath(date_sort_controls_xpath).click() # click sort again to sort the right way
# wait...
wait = WebDriverWait(browser, int(os.environ['WAIT_TIME']))
wait.until(
EC.presence_of_element_located((By.XPATH, "//img[@src='/ELECReport/Reserved.ReportViewerWebControl.axd?OpType=Resource&Version=12.0.2402.20&Name=Microsoft.ReportingServices.Rendering.HtmlRenderer.RendererResources.sortDesc.gif']"))
)
time.sleep(0.5)
wait = WebDriverWait(browser, int(os.environ['WAIT_TIME']))
wait.until(
EC.invisibility_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_BITSReportViewer1_reportViewer1_AsyncWait"))
)
docs_table = browser.find_element_by_xpath(docs_table_xpath) #get the filings table
filing_rows = docs_table.find_elements_by_xpath("./tbody/tr") #get rows in the filings table
#for each row in the filing table...
for filing_row in filing_rows[2:]:
# scrape the info in the row
filing_items = filing_row.find_elements_by_xpath(".//div")
file_element = filing_row.find_element_by_xpath(".//a")
details = {
'name': name.strip(),
'summary_link': summary_data_link,
'location': location,
'party': party,
'office_or_type': office_or_type,
'election_type': election_type,
'year': year,
'date': filing_items[0].text.strip(),
'form': filing_items[1].text.strip(),
'period': filing_items[2].text.strip(),
'amendment' : filing_items[3].text.strip(),
}
# get url for filing and download it if input options say you should
if outfile is not None and outfile != '' and details not in previously_scraped:
if details in previously_scraped_no_urls:
prev_details_index = previously_scraped_no_urls.index(details)
details['url'] = previously_scraped[prev_details_index]['url']
else:
details['url'] = ''
if details not in previously_scraped and upload_pdfs:
print("Downloading...")
file_url = get_filing_and_upload_to_s3(file_element.get_attribute("href"), url, bucket, folder, create_filing_name_from_json(details))
print(file_url)
details['url'] = file_url
pass
else:
details['url'] = ''
# write scraped data to log if logfile is given
if details not in results and logfile is not None and logfile != '':
writer.writerow([
details['name'],
details['summary_link'],
details['location'],
details['party'],
details['office_or_type'],
details['election_type'],
details['year'],
details['date'],
details['form'],
details['period'],
details['amendment'],
details['url']
])
logfile.flush()
if details not in results:
results.append(details)
else: # abbreviated writeout for just candidate info
details = {
'name': name.strip(),
'summary_link': summary_data_link,
'location': location,
'party': party,
'office_or_type': office_or_type,
'election_type': election_type,
'year': year,
}
if details not in results and logfile is not None and logfile != '':
writer.writerow([
details['name'],
details['summary_link'],
details['location'],
details['party'],
details['office_or_type'],
details['election_type'],
details['year'],
])
logfile.flush()
if details not in results:
results.append(details)
# advance to next filings page, if it exists, else break
if check_exists_by_xpath(page_controls_xpath, browser):
page_controls = browser.find_elements_by_xpath(page_controls_xpath)
else:
page_controls = None
if page_controls == None or find_next_page_button_index(page_controls) == -1:
break
else:
page_controls[find_next_page_button_index(page_controls)].click()
# write out results to outfile or stdout
if outfile is not None and outfile != '':
write_data(results, outfile)
upload_json_to_s3(results, bucket, folder, outfile.split('/')[-1])
else:
sys.stdout.write(json.dumps(results))
def main():
'''with open('log.csv', 'r+') as logfile:
get_filing_list(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6].lower() == 'true', sys.argv[7].lower() == 'true', None)'''
# run the scraping job every 6 hours
# TODO: move scraping interval to environment var
schedule.every(6).hours.do(get_filing_list, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[7].lower() == 'true', sys.argv[7].lower() == 'true', None)
while True:
schedule.run_pending()
time.sleep(1)
if __name__=='__main__':
main()
|
1666758
|
import argparse
import asyncio
import csv
import logging
import os
import random
import time
import statistics
import numpy as np
from nepytune.benchmarks.query_runner import get_query_runner
from nepytune.benchmarks.connection_pool import NeptuneConnectionPool
QUERY_NAMES = [
'get_sibling_attrs', 'undecided_user_check', 'undecided_user_audience',
'brand_interaction_audience', 'get_all_transient_ids_in_household',
'early_website_adopters'
]
parser = argparse.ArgumentParser(description="Run query benchmarks")
parser.add_argument("--users", type=int, default=10)
parser.add_argument("--samples", type=int, default=1000)
parser.add_argument("--queries", default=['all'], type=str,
nargs='+', choices=QUERY_NAMES + ['all'])
parser.add_argument("--verbose", action='store_true')
parser.add_argument("--csv", action="store_true")
parser.add_argument("--output", type=str, default="results")
args = parser.parse_args()
if args.queries == ['all']:
args.queries = QUERY_NAMES
if (args.verbose):
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
sem = asyncio.Semaphore(args.users)
def custom_exception_handler(loop, context):
"""Stop event loop if exception occurs."""
loop.default_exception_handler(context)
exception = context.get('exception')
if isinstance(exception, Exception):
print(context)
loop.stop()
async def run_query(query_runner, sample, semaphore, pool):
"""Run query with limit on concurrent connections."""
async with semaphore:
return await query_runner.run(sample, pool)
async def run(query, samples, pool):
"""Run query benchmark tasks."""
query_runner = get_query_runner(query, samples)
logger.info("Initializing query data.")
await asyncio.gather(query_runner.initialize())
queries = []
logger.info("Running benchmark.")
for i in range(samples):
queries.append(asyncio.create_task(run_query(query_runner, i, sem, pool)))
results = await asyncio.gather(*queries)
logger.info(f"Successful queries: {query_runner.succeded}")
logger.info(f"Failed queries: {query_runner.failed}")
benchmark_results = [result for result in results if result]
return benchmark_results, query_runner.succeded, query_runner.failed
def stats(results):
"""Print statistics for benchmark results."""
print(f"Samples: {args.samples}")
print(f"Mean: {statistics.mean(results)}s")
print(f"Median: {statistics.median(results)}s")
a = np.array(results)
for percentile in [50, 90, 99, 99.9, 99.99]:
result = np.percentile(a, percentile)
print(f"{percentile} percentile: {result}s")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.set_exception_handler(custom_exception_handler)
pool = NeptuneConnectionPool(args.users)
try:
loop.run_until_complete(pool.create())
for query in args.queries:
logger.info(f"Benchmarking query: {query}")
logger.info(f"Concurrent users: {args.users}")
results, succeded, failed = loop.run_until_complete(run(query, args.samples, pool))
stats([measure[2] for measure in results])
if args.csv:
dst = f"{args.output}/{query}-{args.samples}-{args.users}.csv"
with open(dst, "w") as f:
writer = csv.writer(f)
for measure in results:
writer.writerow(measure)
query_stats = f"{args.output}/{query}-{args.samples}-{args.users}-stats.csv"
with open(query_stats, "w") as f:
writer = csv.writer(f)
writer.writerow([succeded, failed])
finally:
loop.run_until_complete(pool.destroy())
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
|
1666775
|
import torch
import os
import argparse
import numpy as np
import hparams as hp
from data.utils import parse_path_file
from model.generator.melgan import MelGANGenerator
from .synthesize import Synthesizer
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_data(audio_index_path, mel_index_path, index_list):
audio_index = parse_path_file(audio_index_path)
mel_index = parse_path_file(mel_index_path)
audio_list = []
mel_list = []
for index in index_list:
audio_list.append(np.load(audio_index[index]))
mel_list.append(torch.from_numpy(np.load(mel_index[index])))
return audio_list, mel_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str)
parser.add_argument('--audio_index_path', type=str, default=os.path.join("dataset", "audio", "eval"))
parser.add_argument('--mel_index_path', type=str, default=os.path.join("dataset", "mel", "eval"))
args = parser.parse_args()
synthesizer = Synthesizer(args.checkpoint_path)
audio_list, mel_list = load_data(args.audio_index_path, args.mel_index_path, [0, 1, 2, 3, 4, 5])
|
1666781
|
from .core import Command, Resolvable, EntityRef, SimpleResolve, WorldPos
from .nbt import NBTStorable
from .scoreboard import ScoreRef
from .selector import ScoreRange
class Execute(Command):
def __init__(self, chain):
self.chain = SimpleResolve(*chain._components)
def resolve(self, scope):
return 'execute %s' % self.chain.resolve(scope)
def ensure_selector(sel_arg):
assert isinstance(sel_arg, EntityRef), sel_arg
return sel_arg
class ExecuteChain:
def __init__(self):
self._components = []
self.can_terminate = False
def add(self, *args):
for arg in args:
if type(arg) in [str, int, float]:
self._components.append(str(arg))
elif isinstance(arg, Resolvable):
self._components.append(arg)
else:
assert False, type(arg)
return self
def run(self, cmd):
self.add('run', cmd)
return Execute(self)
def finish(self):
assert self.can_terminate
return Execute(self)
def as_entity(self, select_arg):
self.can_terminate = False
return self.add('as', ensure_selector(select_arg))
def at(self, select_arg):
self.can_terminate = False
return self.add('at', ensure_selector(select_arg))
def at_pos(self, pos):
self.can_terminate = False
return self.add('positioned', pos)
def at_entity_pos(self, select_arg):
self.can_terminate = False
return self.add('positioned', 'as', ensure_selector(select_arg))
def align(self, axes):
self.can_terminate = False
assert ''.join(axis for axis in axes if axis in 'xyz') == axes
return self.add('align', axes)
def facing(self, pos):
self.can_terminate = False
return self.add('facing', pos)
def facing_entity(self, select_arg, feature):
self.can_terminate = False
assert feature == 'eyes' or feature == 'feet'
return self.add('facing', 'entity', ensure_selector(select_arg), \
feature)
def rotated(self, y, x):
self.can_terminate = False
return self.add('rotated', y, x)
def rotated_as_entity(self, select_arg):
self.can_terminate = False
return self.add('rotated', 'as', ensure_selector(select_arg))
def anchored(self, anchor):
self.can_terminate = False
assert anchor == 'feet' or anchor == 'eyes'
return self.add('anchored', anchor)
def cond(self, cond_type):
self.can_terminate = False
assert cond_type == 'if' or cond_type == 'unless'
return ExecuteChain.Cond(self, cond_type)
class Cond:
def add(self, *args):
self.parent.can_terminate = True
return self.parent.add(*((self.cond_type,) + args))
def __init__(self, parent, cond_type):
self.parent = parent
self.cond_type = cond_type
def entity(self, entityref):
return self.add('entity', ensure_selector(entityref))
def score(self, targetref, operator, sourceref):
assert isinstance(targetref, ScoreRef)
assert isinstance(sourceref, ScoreRef)
assert operator in ['<', '<=', '=', '>=', '>']
return self.add('score', targetref, operator, sourceref)
def score_range(self, scoreref, range):
assert isinstance(scoreref, ScoreRef)
assert isinstance(range, ScoreRange)
return self.add('score', scoreref, 'matches', range)
def block(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
return self.add('block', pos, block)
def blocks_match(self, begin, end, dest, type):
assert type in ['all', 'masked']
return self.add('blocks', begin, end, dest, type)
def data(self, dataref, path):
assert isinstance(dataref, NBTStorable)
return self.add('data', dataref, path)
def store(self, store_type):
assert store_type in ['result', 'success']
self.can_terminate = False
return ExecuteChain.Store(self, store_type)
class Store:
def add(self, *args):
return self.parent.add(*(('store', self.store_type) + args))
def __init__(self, parent, store_type):
self.parent = parent
self.store_type = store_type
def score(self, scoreref):
assert isinstance(scoreref, ScoreRef)
return self.add('score', scoreref)
def nbt(self, storeref, path, data_type, scale=1):
assert isinstance(storeref, NBTStorable)
return self.add(storeref, path, data_type, scale)
def bossbar(self, bar, attr):
assert attr in ['value', 'max']
return self.add('bossbar', bar, attr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.