code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from cleverhans.attacks import MultiModelIterativeMethod
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow.contrib.slim.nets import inception
import inception_resnet_v2
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path1', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path2', '', 'Path to checkpoint for adversarial trained inception network.')
tf.flags.DEFINE_string(
'checkpoint_path3', '', 'Path to checkpoint for adversarial trained inception-resnet network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', '', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 10, 'How many images process at one time.')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
filepaths = tf.gfile.Glob(os.path.join(input_dir, '*.png'))
for count, filepath in enumerate(filepaths):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
# img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
img = np.round(255.0 * (images[i, :, :, :] + 1.0) * 0.5).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
class InceptionModel(object):
"""Model class for CleverHans library."""
def __init__(self, num_classes, scope=''):
self.num_classes = num_classes
self.built = False
self.scope = scope
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, end_points = inception.inception_v3(
x_input, num_classes=self.num_classes, is_training=False, reuse=reuse, scope=self.scope)
self.built = True
output = end_points['Predictions']
# Strip off the extra reshape op at the output
probs = output.op.inputs[0]
return probs
class IrNetModel(object):
"""Model class for CleverHans library."""
def __init__(self, num_classes, scope=''):
self.num_classes = num_classes
self.built = False
self.scope = scope
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
_, end_points = inception_resnet_v2.inception_resnet_v2(x_input, num_classes=self.num_classes,
reuse=reuse, is_training=False, scope=self.scope)
self.built = True
output = end_points['Predictions']
# Strip off the extra reshape op at the output
probs = output.op.inputs[0]
return probs
def main(_):
start_time = time.time()
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
num_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# ---------------------------------
# define graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
model1 = InceptionModel(num_classes, scope='sc1')
model2 = InceptionModel(num_classes, scope='sc2')
model3 = IrNetModel(num_classes, scope='sc3')
method = MultiModelIterativeMethod([model1, model2, model3])
x_adv = method.generate(x_input, eps=eps, clip_min=-1., clip_max=1., nb_iter=17)
# ---------------------------------
# set input
all_vars = tf.global_variables()
# print(all_vars)
unique_name_headers = set([k.name.split('/')[0] for k in all_vars])
model1_vars = [k for k in all_vars if k.name.startswith('sc1')]
model2_vars = [k for k in all_vars if k.name.startswith('sc2')]
model3_vars = [k for k in all_vars if k.name.startswith('sc3')]
# name of variable `my_var:0` corresponds `my_var` for loader
model1_keys = [s.name.replace('sc1', 'InceptionV3')[:-2] for s in model1_vars]
model2_keys = [s.name.replace('sc2', 'InceptionV3')[:-2] for s in model2_vars]
model3_keys = [s.name.replace('sc3', 'InceptionResnetV2')[:-2] for s in model3_vars]
saver1 = tf.train.Saver(dict(zip(model1_keys, model1_vars)))
saver2 = tf.train.Saver(dict(zip(model2_keys, model2_vars)))
saver3 = tf.train.Saver(dict(zip(model3_keys, model3_vars)))
session_creator = tf.train.ChiefSessionCreator(master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
saver1.restore(sess, FLAGS.checkpoint_path1)
saver2.restore(sess, FLAGS.checkpoint_path2)
saver3.restore(sess, FLAGS.checkpoint_path3)
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
adv_images = sess.run(x_adv, feed_dict={x_input: images})
save_images(adv_images, filenames, FLAGS.output_dir)
elapsed_time = time.time() - start_time
print('elapsed time: {0:.0f} [s]'.format(elapsed_time))
if __name__ == '__main__':
tf.app.run() | winners/nontargeted-attack/teaflow/attack_iter.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from cleverhans.attacks import MultiModelIterativeMethod
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow.contrib.slim.nets import inception
import inception_resnet_v2
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path1', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path2', '', 'Path to checkpoint for adversarial trained inception network.')
tf.flags.DEFINE_string(
'checkpoint_path3', '', 'Path to checkpoint for adversarial trained inception-resnet network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', '', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 10, 'How many images process at one time.')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
filepaths = tf.gfile.Glob(os.path.join(input_dir, '*.png'))
for count, filepath in enumerate(filepaths):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
# img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
img = np.round(255.0 * (images[i, :, :, :] + 1.0) * 0.5).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
class InceptionModel(object):
"""Model class for CleverHans library."""
def __init__(self, num_classes, scope=''):
self.num_classes = num_classes
self.built = False
self.scope = scope
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, end_points = inception.inception_v3(
x_input, num_classes=self.num_classes, is_training=False, reuse=reuse, scope=self.scope)
self.built = True
output = end_points['Predictions']
# Strip off the extra reshape op at the output
probs = output.op.inputs[0]
return probs
class IrNetModel(object):
"""Model class for CleverHans library."""
def __init__(self, num_classes, scope=''):
self.num_classes = num_classes
self.built = False
self.scope = scope
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
_, end_points = inception_resnet_v2.inception_resnet_v2(x_input, num_classes=self.num_classes,
reuse=reuse, is_training=False, scope=self.scope)
self.built = True
output = end_points['Predictions']
# Strip off the extra reshape op at the output
probs = output.op.inputs[0]
return probs
def main(_):
start_time = time.time()
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
num_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# ---------------------------------
# define graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
model1 = InceptionModel(num_classes, scope='sc1')
model2 = InceptionModel(num_classes, scope='sc2')
model3 = IrNetModel(num_classes, scope='sc3')
method = MultiModelIterativeMethod([model1, model2, model3])
x_adv = method.generate(x_input, eps=eps, clip_min=-1., clip_max=1., nb_iter=17)
# ---------------------------------
# set input
all_vars = tf.global_variables()
# print(all_vars)
unique_name_headers = set([k.name.split('/')[0] for k in all_vars])
model1_vars = [k for k in all_vars if k.name.startswith('sc1')]
model2_vars = [k for k in all_vars if k.name.startswith('sc2')]
model3_vars = [k for k in all_vars if k.name.startswith('sc3')]
# name of variable `my_var:0` corresponds `my_var` for loader
model1_keys = [s.name.replace('sc1', 'InceptionV3')[:-2] for s in model1_vars]
model2_keys = [s.name.replace('sc2', 'InceptionV3')[:-2] for s in model2_vars]
model3_keys = [s.name.replace('sc3', 'InceptionResnetV2')[:-2] for s in model3_vars]
saver1 = tf.train.Saver(dict(zip(model1_keys, model1_vars)))
saver2 = tf.train.Saver(dict(zip(model2_keys, model2_vars)))
saver3 = tf.train.Saver(dict(zip(model3_keys, model3_vars)))
session_creator = tf.train.ChiefSessionCreator(master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
saver1.restore(sess, FLAGS.checkpoint_path1)
saver2.restore(sess, FLAGS.checkpoint_path2)
saver3.restore(sess, FLAGS.checkpoint_path3)
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
adv_images = sess.run(x_adv, feed_dict={x_input: images})
save_images(adv_images, filenames, FLAGS.output_dir)
elapsed_time = time.time() - start_time
print('elapsed time: {0:.0f} [s]'.format(elapsed_time))
if __name__ == '__main__':
tf.app.run() | 0.881417 | 0.361616 |
import bpy
import os
data = bpy.context.active_object.data
name = bpy.context.active_object.name
# Extract vertex color information. Translate color into a bitfield
# so that exact colors do not matter.
vert_colors = {}
for i in range(0, len(data.vertex_colors[0].data)):
r = int(data.vertex_colors[0].data[i].color[0] + 0.5)
g = int(data.vertex_colors[0].data[i].color[1] + 0.5)
b = int(data.vertex_colors[0].data[i].color[2] + 0.5)
c = (r * 4) + (g * 2) + b
vert_colors[data.loops[i].vertex_index] = c
# Translation table for vertex color to corner/edge piece color
# index used during the cube model creation code.
color_table = {0: -1, 7: 0, 4: 1, 2: 2}
shared_edge_color = 1
# Process polygons. We will add vertices as we need them because we
# want to duplicate shared edges (marked in blue in the model).
verts = []
vert_map = {}
shared_vert_map = {0: {}, 2: {}, 4: {}, 7: {}}
index = []
for poly in data.polygons:
# Determine the primary (non-shared) color of this polygon
primary = None
for i in poly.vertices:
if vert_colors[i] != shared_edge_color:
if (primary is not None) and (primary != vert_colors[i]):
raise Exception("Shared colors not marked shared")
primary = vert_colors[i]
if len(poly.vertices) != 3:
raise Exception("Polygon is not a triangle")
for i in poly.vertices:
if vert_colors[i] == shared_edge_color:
# This is a shared edge. Use the shared_vert_map, which
# also includes the primary color for unique checks. This
# will copy the vertex and allow a hard color edge in
# the final model.
if i not in shared_vert_map[primary]:
shared_vert_map[primary][i] = len(verts)
verts.append((data.vertices[i], color_table[primary]))
index.append(shared_vert_map[primary][i])
else:
if i not in vert_map:
vert_map[i] = len(verts)
verts.append((data.vertices[i], color_table[primary]))
index.append(vert_map[i])
# Output model
path = __file__
while os.path.basename(path) != 'tools':
path = os.path.dirname(path)
path = os.path.join(os.path.dirname(path), f"src/{name}_generated.rs")
f = open(path, 'w')
f.write('// This file was autogenerated by tools/blend_to_struct.py\n')
f.write('use crate::cube::SourceVertex;\n\n')
f.write('pub const ' + name.upper() + '_SOURCE_VERTS: &\'static [SourceVertex] = &[\n')
for vert in verts:
pos = vert[0].co
normal = vert[0].normal
normal = [normal.x, normal.y, normal.z]
# Correct normals of faces
if abs(pos.x) == 1 or abs(pos.y) == 1 or abs(pos.z) == 1:
normal[0] = float(int(normal[0] * 1.5))
normal[1] = float(int(normal[1] * 1.5))
normal[2] = float(int(normal[2] * 1.5))
f.write(' SourceVertex {\n')
f.write(f' pos: [{pos.x:.7}, {pos.y:.7}, {pos.z:.7}],\n')
f.write(f' normal: [{normal[0]:.7}, {normal[1]:.7}, {normal[2]:.7}],\n')
f.write(f' color: {vert[1]},\n')
f.write(' },\n')
f.write('];\n\n')
f.write('pub const ' + name.upper() + '_INDEX: &\'static [u16] = &[\n ')
for i in range(0, len(index)):
if i != 0 and (i % 32) == 0:
f.write('\n ')
f.write(f'{index[i]}')
if (i + 1) < len(index):
f.write(', ')
f.write('\n];\n')
f.close() | tools/blend_to_struct.py | import bpy
import os
data = bpy.context.active_object.data
name = bpy.context.active_object.name
# Extract vertex color information. Translate color into a bitfield
# so that exact colors do not matter.
vert_colors = {}
for i in range(0, len(data.vertex_colors[0].data)):
r = int(data.vertex_colors[0].data[i].color[0] + 0.5)
g = int(data.vertex_colors[0].data[i].color[1] + 0.5)
b = int(data.vertex_colors[0].data[i].color[2] + 0.5)
c = (r * 4) + (g * 2) + b
vert_colors[data.loops[i].vertex_index] = c
# Translation table for vertex color to corner/edge piece color
# index used during the cube model creation code.
color_table = {0: -1, 7: 0, 4: 1, 2: 2}
shared_edge_color = 1
# Process polygons. We will add vertices as we need them because we
# want to duplicate shared edges (marked in blue in the model).
verts = []
vert_map = {}
shared_vert_map = {0: {}, 2: {}, 4: {}, 7: {}}
index = []
for poly in data.polygons:
# Determine the primary (non-shared) color of this polygon
primary = None
for i in poly.vertices:
if vert_colors[i] != shared_edge_color:
if (primary is not None) and (primary != vert_colors[i]):
raise Exception("Shared colors not marked shared")
primary = vert_colors[i]
if len(poly.vertices) != 3:
raise Exception("Polygon is not a triangle")
for i in poly.vertices:
if vert_colors[i] == shared_edge_color:
# This is a shared edge. Use the shared_vert_map, which
# also includes the primary color for unique checks. This
# will copy the vertex and allow a hard color edge in
# the final model.
if i not in shared_vert_map[primary]:
shared_vert_map[primary][i] = len(verts)
verts.append((data.vertices[i], color_table[primary]))
index.append(shared_vert_map[primary][i])
else:
if i not in vert_map:
vert_map[i] = len(verts)
verts.append((data.vertices[i], color_table[primary]))
index.append(vert_map[i])
# Output model
path = __file__
while os.path.basename(path) != 'tools':
path = os.path.dirname(path)
path = os.path.join(os.path.dirname(path), f"src/{name}_generated.rs")
f = open(path, 'w')
f.write('// This file was autogenerated by tools/blend_to_struct.py\n')
f.write('use crate::cube::SourceVertex;\n\n')
f.write('pub const ' + name.upper() + '_SOURCE_VERTS: &\'static [SourceVertex] = &[\n')
for vert in verts:
pos = vert[0].co
normal = vert[0].normal
normal = [normal.x, normal.y, normal.z]
# Correct normals of faces
if abs(pos.x) == 1 or abs(pos.y) == 1 or abs(pos.z) == 1:
normal[0] = float(int(normal[0] * 1.5))
normal[1] = float(int(normal[1] * 1.5))
normal[2] = float(int(normal[2] * 1.5))
f.write(' SourceVertex {\n')
f.write(f' pos: [{pos.x:.7}, {pos.y:.7}, {pos.z:.7}],\n')
f.write(f' normal: [{normal[0]:.7}, {normal[1]:.7}, {normal[2]:.7}],\n')
f.write(f' color: {vert[1]},\n')
f.write(' },\n')
f.write('];\n\n')
f.write('pub const ' + name.upper() + '_INDEX: &\'static [u16] = &[\n ')
for i in range(0, len(index)):
if i != 0 and (i % 32) == 0:
f.write('\n ')
f.write(f'{index[i]}')
if (i + 1) < len(index):
f.write(', ')
f.write('\n];\n')
f.close() | 0.429788 | 0.41253 |
from flask import Flask
from decision_tree import DecisionTree
import json
from flask import request
from flask import render_template
import random
import sys
__author__ = "fatRabit"
__version__ = 1.6
app = Flask(__name__)
#The p_tree,which stored in P_TREE.txt, is a decison
#tree which decide how to prevent the player winning game.
#The w_tree,which stored in W_TREE.txt,is a decison
#tree which decide how to win the game.
p_labels = ["your self","neighbor1","neighbor2","neighbor3"]
p_tree = DecisionTree(path="P_TREE.txt")
w_labels = ["your self","neighbor1","neighbor2","neighbor3","neighbor4"]
w_tree = DecisionTree(path="W_TREE.txt")
#Return the home page to client.
@app.route("/five",methods=["GET","POST"])
def home_page():
return render_template("index.html")
#To create a vector based on the direction.
#Number 1 mean that there is a chess,which have same color with the chess of AI,here.
#Number 0 mean that it is empty here.
#Number -1 mean that this position can't put any chess.
#This function is used in the function named answer.
def build_vector(i:int,j:int,table:list,des:str,sta:bool):
step = 4 if sta else 5
vector = [1]
chess = table[i][j]
if des == "LEFT":
for k in range(1,step):
try:
if j - k < 0:
vector.append(-1)
elif table[i][j-k] == chess:
vector.append(1)
elif table[i][j-k] == 0 :
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "RIGHT":
for k in range(1, step):
try:
if table[i][j+k] == chess:
vector.append(1)
elif table[i][j+k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "UP":
for k in range(1, step):
try:
if i-k < 0:
vector.append(-1)
elif table[i-k][j] == chess:
vector.append(1)
elif table[i-k][j] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "DOWN":
for k in range(1, step):
try:
if table[i + k][j] == chess:
vector.append(1)
elif table[i + k][j] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "UP LEFT":
for k in range(1, step):
try:
if i-k < 0 or j-k < 0:
vector.append(-1)
elif table[i - k][j-k] == chess:
vector.append(1)
elif table[i - k][j-k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "UP RIGHT":
for k in range(1, step):
try:
if i - k <0:
vector.append(-1)
elif table[i - k][j+k] == chess:
vector.append(1)
elif table[i - k][j+k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "DOWN LEFT":
for k in range(1, step):
try:
if j-k < 0:
vector.append(-1)
elif table[i+k][j-k] == chess:
vector.append(1)
elif table[i+k][j-k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "DOWN RIGHT":
for k in range(1, step):
try:
if table[i+k][j+k] == chess:
vector.append(1)
elif table[i+k][j+k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
return vector
#To get the new point where AI plan to put the chess
#The param named des and step was decide by the decison tree.
#This function is used in the function named answer.
def get_new_p(i:int,j:int,des:str,step:int):
if des == "LEFT":
return i,j-step
elif des == "RIGHT":
return i,j+step
elif des == "UP":
return i-step,j
elif des == "DOWN":
return i+step,j
elif des =="UP LEFT":
return i-step,j-step
elif des == "UP RIGHT":
return i-step,j+step
elif des == "DOWN LEFT":
return i+step,j-step
else:
return i+step,j+step
#To get the the coordinates of place where AI want to put chess.
#AI will think whether it should stop you winning the game or not first.
#Then the AI will think about how to beat you.
#If AI can't get answer from these two methods,it will give two
#random number from between 0 and 18 as answer.
#By the way,The file named log.txt will record that how does the AI think.
#This function is used in the function named ai_answer.
def answer(ai_color:int,color:str,table):
ai_color = int(ai_color)
des = ("LEFT","RIGHT","UP","DOWN","UP LEFT","UP RIGHT",\
"DOWN LEFT","DOWN RIGHT")
win_answer = None
win_w = 0
pre_w = 0
for i in range(len(table)):
for j in range(len(table[i])):
if table[i][j] != 0:
if table[i][j] != ai_color:
record = [-1,"",[]]
for d in des:
vector = build_vector(i, j, table, d,True)
assert len(vector) == len(p_labels), \
"p_tree %s error,vector is %s" % (d,vector)
res = p_tree.decision((vector,p_labels))
if res:
res = json.loads(res)
if res[0] != -1 and res[1] > pre_w:
record[0] = res[0]
record[1] = d
record[2] =vector
pre_w = res[1]
if res[1] == 5:
return get_new_p(i, j, record[1], record[0])
if record[0] != -1:
new_p = get_new_p(i,j,record[1],record[0])
log_info(str(vector)+"---->I prevent it! I put on %s,%s\n"%(new_p))
return new_p
else:
w_record = [-1, ""]
for d in des:
vector = build_vector(i, j, table, d,False)
assert len(vector) == len(w_labels),\
"w_tree %s error,vector is %s" % (d,vector)
res = w_tree.decision((vector,w_labels))
if res:
res = json.loads(res)
if res[0] != -1 and res[1] > win_w:
w_record[0] = res[0]
w_record[1] = d
if res[1] == 5:
return get_new_p(i, j, w_record[1], w_record[0])
win_w = res[1]
#这时才进行决策?
if w_record[0] != -1:
win_answer = get_new_p(i, j, w_record[1], w_record[0])
if win_answer:
log_info("Maybe I will win! I put on %s,%s\n"%(win_answer))
return win_answer
i = random.randrange(0,15,1)
j = random.randrange(0,15,1)
while table[i][j] !=0:
i = random.randrange(0, 15, 1)
j = random.randrange(0, 15, 1)
log_info("I don't know where I should put! But I still put on %s,%s\n"%(i,j))
return i,j
def log_info(message):
log = open("log.txt", "a")
log.write("cxm:" + message)
log.flush()
log.close()
#Return the coordinates of place where AI want to put chess to client.
@app.route("/five/ai",methods=["POST"])
def ai_answer():
ai_color = request.form.get("ai_color",None)
if not ai_color:
return json.dumps({"sta":"failed","reason":"give me ai_color!"})
elif "1" == str(ai_color):
color = "BLACK"
else:
color ="WHITE"
table = request.form.get("data",None)
if not table:
return json.dumps({"sta":"failed","reason":"give me table!"})
table = json.loads(table)
i,j = answer(ai_color,color,table)
res = {
"sta":"succ",
"location":[i,j]
}
return json.dumps(res)
if __name__ == "__main__":
try:
port = int(sys.argv[1])
except IndexError:
port = 80
app.run(port=port) | BetaMeow/ai.py | from flask import Flask
from decision_tree import DecisionTree
import json
from flask import request
from flask import render_template
import random
import sys
__author__ = "fatRabit"
__version__ = 1.6
app = Flask(__name__)
#The p_tree,which stored in P_TREE.txt, is a decison
#tree which decide how to prevent the player winning game.
#The w_tree,which stored in W_TREE.txt,is a decison
#tree which decide how to win the game.
p_labels = ["your self","neighbor1","neighbor2","neighbor3"]
p_tree = DecisionTree(path="P_TREE.txt")
w_labels = ["your self","neighbor1","neighbor2","neighbor3","neighbor4"]
w_tree = DecisionTree(path="W_TREE.txt")
#Return the home page to client.
@app.route("/five",methods=["GET","POST"])
def home_page():
return render_template("index.html")
#To create a vector based on the direction.
#Number 1 mean that there is a chess,which have same color with the chess of AI,here.
#Number 0 mean that it is empty here.
#Number -1 mean that this position can't put any chess.
#This function is used in the function named answer.
def build_vector(i:int,j:int,table:list,des:str,sta:bool):
step = 4 if sta else 5
vector = [1]
chess = table[i][j]
if des == "LEFT":
for k in range(1,step):
try:
if j - k < 0:
vector.append(-1)
elif table[i][j-k] == chess:
vector.append(1)
elif table[i][j-k] == 0 :
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "RIGHT":
for k in range(1, step):
try:
if table[i][j+k] == chess:
vector.append(1)
elif table[i][j+k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "UP":
for k in range(1, step):
try:
if i-k < 0:
vector.append(-1)
elif table[i-k][j] == chess:
vector.append(1)
elif table[i-k][j] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "DOWN":
for k in range(1, step):
try:
if table[i + k][j] == chess:
vector.append(1)
elif table[i + k][j] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "UP LEFT":
for k in range(1, step):
try:
if i-k < 0 or j-k < 0:
vector.append(-1)
elif table[i - k][j-k] == chess:
vector.append(1)
elif table[i - k][j-k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "UP RIGHT":
for k in range(1, step):
try:
if i - k <0:
vector.append(-1)
elif table[i - k][j+k] == chess:
vector.append(1)
elif table[i - k][j+k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "DOWN LEFT":
for k in range(1, step):
try:
if j-k < 0:
vector.append(-1)
elif table[i+k][j-k] == chess:
vector.append(1)
elif table[i+k][j-k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
elif des == "DOWN RIGHT":
for k in range(1, step):
try:
if table[i+k][j+k] == chess:
vector.append(1)
elif table[i+k][j+k] == 0:
vector.append(0)
else:
vector.append(-1)
except IndexError:
vector.append(-1)
return vector
#To get the new point where AI plan to put the chess
#The param named des and step was decide by the decison tree.
#This function is used in the function named answer.
def get_new_p(i:int,j:int,des:str,step:int):
if des == "LEFT":
return i,j-step
elif des == "RIGHT":
return i,j+step
elif des == "UP":
return i-step,j
elif des == "DOWN":
return i+step,j
elif des =="UP LEFT":
return i-step,j-step
elif des == "UP RIGHT":
return i-step,j+step
elif des == "DOWN LEFT":
return i+step,j-step
else:
return i+step,j+step
#To get the the coordinates of place where AI want to put chess.
#AI will think whether it should stop you winning the game or not first.
#Then the AI will think about how to beat you.
#If AI can't get answer from these two methods,it will give two
#random number from between 0 and 18 as answer.
#By the way,The file named log.txt will record that how does the AI think.
#This function is used in the function named ai_answer.
def answer(ai_color:int,color:str,table):
ai_color = int(ai_color)
des = ("LEFT","RIGHT","UP","DOWN","UP LEFT","UP RIGHT",\
"DOWN LEFT","DOWN RIGHT")
win_answer = None
win_w = 0
pre_w = 0
for i in range(len(table)):
for j in range(len(table[i])):
if table[i][j] != 0:
if table[i][j] != ai_color:
record = [-1,"",[]]
for d in des:
vector = build_vector(i, j, table, d,True)
assert len(vector) == len(p_labels), \
"p_tree %s error,vector is %s" % (d,vector)
res = p_tree.decision((vector,p_labels))
if res:
res = json.loads(res)
if res[0] != -1 and res[1] > pre_w:
record[0] = res[0]
record[1] = d
record[2] =vector
pre_w = res[1]
if res[1] == 5:
return get_new_p(i, j, record[1], record[0])
if record[0] != -1:
new_p = get_new_p(i,j,record[1],record[0])
log_info(str(vector)+"---->I prevent it! I put on %s,%s\n"%(new_p))
return new_p
else:
w_record = [-1, ""]
for d in des:
vector = build_vector(i, j, table, d,False)
assert len(vector) == len(w_labels),\
"w_tree %s error,vector is %s" % (d,vector)
res = w_tree.decision((vector,w_labels))
if res:
res = json.loads(res)
if res[0] != -1 and res[1] > win_w:
w_record[0] = res[0]
w_record[1] = d
if res[1] == 5:
return get_new_p(i, j, w_record[1], w_record[0])
win_w = res[1]
#这时才进行决策?
if w_record[0] != -1:
win_answer = get_new_p(i, j, w_record[1], w_record[0])
if win_answer:
log_info("Maybe I will win! I put on %s,%s\n"%(win_answer))
return win_answer
i = random.randrange(0,15,1)
j = random.randrange(0,15,1)
while table[i][j] !=0:
i = random.randrange(0, 15, 1)
j = random.randrange(0, 15, 1)
log_info("I don't know where I should put! But I still put on %s,%s\n"%(i,j))
return i,j
def log_info(message):
log = open("log.txt", "a")
log.write("cxm:" + message)
log.flush()
log.close()
#Return the coordinates of place where AI want to put chess to client.
@app.route("/five/ai",methods=["POST"])
def ai_answer():
ai_color = request.form.get("ai_color",None)
if not ai_color:
return json.dumps({"sta":"failed","reason":"give me ai_color!"})
elif "1" == str(ai_color):
color = "BLACK"
else:
color ="WHITE"
table = request.form.get("data",None)
if not table:
return json.dumps({"sta":"failed","reason":"give me table!"})
table = json.loads(table)
i,j = answer(ai_color,color,table)
res = {
"sta":"succ",
"location":[i,j]
}
return json.dumps(res)
if __name__ == "__main__":
try:
port = int(sys.argv[1])
except IndexError:
port = 80
app.run(port=port) | 0.293506 | 0.363845 |
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import time
import logging
logging.basicConfig(filename="/var/log/python-log/rain-read-log", filemode="w", level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
def extract_number(data):
if data == "neměřitelné" or data == '':
return 0
try:
return float(data.split()[0].replace(',', '.'))
except ValueError as e:
logging.error(f"Data in record is not number or anyting expected and error is: {e}")
raise OSError
class WaterFallData:
def __init__(self) -> None:
self.record = None
def get_html(self):
try:
browser = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
except FileNotFoundError as e:
logging.error(f"Could not open new browser tab with error: {e}")
raise OSError
browser.get("https://www.chmi.cz/aktualni-situace/aktualni-stav-pocasi/ceska-republika/stanice/profesionalni-stanice/tabulky/srazky")
self.html = browser.page_source
time.sleep(2)
browser.close()
def get_record(self):
soup = BeautifulSoup(self.html, features="html.parser")
table = soup.findAll("table", attrs={"style": "border-top: solid #14387f 1pt; border-bottom: solid #14387f 1pt"})[1]
if table == []:
logging.info("No table found in recieved html code")
raise OSError
datasets = []
for row in table.find_all("tr")[1:]:
dataset = [td.get_text() for td in row.find_all("td")]
datasets.append(dataset)
try:
self.record = datasets[5]
except IndexError as e:
logging.info("Dataset from table is find properly with error: {e}")
raise OSError
def get_rain_value(self):
if self.record[0] != "Plzeň-Mikulka":
logging.error("Wrong dataset read from webpage")
raise OSError
self.rain = 0
for i in range(2, 5):
self.rain += extract_number(self.record[i])
return self.rain
def display_html(self):
print(self.html)
def display_record(self):
print(self.record)
if __name__ == '__main__':
klassa = WaterFallData()
klassa.get_html()
klassa.get_record()
klassa.display_record()
print(klassa.get_rain_value()) | Backend/water_fall.py | from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import time
import logging
logging.basicConfig(filename="/var/log/python-log/rain-read-log", filemode="w", level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
def extract_number(data):
if data == "neměřitelné" or data == '':
return 0
try:
return float(data.split()[0].replace(',', '.'))
except ValueError as e:
logging.error(f"Data in record is not number or anyting expected and error is: {e}")
raise OSError
class WaterFallData:
def __init__(self) -> None:
self.record = None
def get_html(self):
try:
browser = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
except FileNotFoundError as e:
logging.error(f"Could not open new browser tab with error: {e}")
raise OSError
browser.get("https://www.chmi.cz/aktualni-situace/aktualni-stav-pocasi/ceska-republika/stanice/profesionalni-stanice/tabulky/srazky")
self.html = browser.page_source
time.sleep(2)
browser.close()
def get_record(self):
soup = BeautifulSoup(self.html, features="html.parser")
table = soup.findAll("table", attrs={"style": "border-top: solid #14387f 1pt; border-bottom: solid #14387f 1pt"})[1]
if table == []:
logging.info("No table found in recieved html code")
raise OSError
datasets = []
for row in table.find_all("tr")[1:]:
dataset = [td.get_text() for td in row.find_all("td")]
datasets.append(dataset)
try:
self.record = datasets[5]
except IndexError as e:
logging.info("Dataset from table is find properly with error: {e}")
raise OSError
def get_rain_value(self):
if self.record[0] != "Plzeň-Mikulka":
logging.error("Wrong dataset read from webpage")
raise OSError
self.rain = 0
for i in range(2, 5):
self.rain += extract_number(self.record[i])
return self.rain
def display_html(self):
print(self.html)
def display_record(self):
print(self.record)
if __name__ == '__main__':
klassa = WaterFallData()
klassa.get_html()
klassa.get_record()
klassa.display_record()
print(klassa.get_rain_value()) | 0.28587 | 0.139631 |
import logging
import traceback
from fastapi import APIRouter, Cookie, HTTPException, Request, Response
from httpx import AsyncClient
from starlette.responses import RedirectResponse
from pixels.constants import Discord, Server
from pixels.utils import auth
log = logging.getLogger(__name__)
router = APIRouter(include_in_schema=False)
@router.get("/authorize")
async def authorize() -> Response:
"""
Redirect the user to Discord authorization, the flow continues in /callback.
Unlike other endpoints, you should open this one in the browser, since it redirects to the Discord OAuth2 flow.
"""
return RedirectResponse(url=Discord.AUTH_URL)
@router.get("/show_token")
async def show_token(request: Request, token: str = Cookie(None)) -> Response: # noqa: B008
"""Show the token from the URL path to the user."""
template_name = "cookie_disabled.html"
context = {"request": request}
if token:
context["token"] = token
template_name = "api_token.html"
return Server.TEMPLATES.TemplateResponse(template_name, context)
def build_oauth_token_request(code: str) -> tuple[dict, dict]:
"""Given a code, return a dict of query params needed to complete the OAuth2 flow."""
query = {
"client_id": Discord.CLIENT_ID,
"client_secret": Discord.CLIENT_SECRET,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": f"{Server.BASE_URL}/callback",
"scope": "identify",
}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
return query, headers
@router.get("/callback")
async def auth_callback(request: Request) -> Response:
"""
Create the user given the authorization code and output the token.
This endpoint is only used as a redirect target from Discord.
"""
try:
async with AsyncClient() as client:
token_params, token_headers = build_oauth_token_request(request.query_params["code"])
auth_token = (await client.post(Discord.TOKEN_URL, data=token_params, headers=token_headers)).json()
auth_header = {"Authorization": f"Bearer {auth_token['access_token']}"}
user = (await client.get(Discord.USER_URL, headers=auth_header)).json()
token = await auth.reset_user_token(request.state.db_conn, user["id"])
except KeyError:
# Ensure that users don't land on the show_pixel page
log.error(traceback.format_exc())
raise HTTPException(401, "Unknown error while creating token")
except PermissionError:
raise HTTPException(401, "You are banned")
# Redirect so that a user doesn't refresh the page and spam discord
redirect = RedirectResponse("/show_token", status_code=303)
redirect.set_cookie(
key='token',
value=token,
httponly=True,
max_age=10,
path='/show_token',
)
return redirect | pixels/endpoints/authorization.py | import logging
import traceback
from fastapi import APIRouter, Cookie, HTTPException, Request, Response
from httpx import AsyncClient
from starlette.responses import RedirectResponse
from pixels.constants import Discord, Server
from pixels.utils import auth
log = logging.getLogger(__name__)
router = APIRouter(include_in_schema=False)
@router.get("/authorize")
async def authorize() -> Response:
"""
Redirect the user to Discord authorization, the flow continues in /callback.
Unlike other endpoints, you should open this one in the browser, since it redirects to the Discord OAuth2 flow.
"""
return RedirectResponse(url=Discord.AUTH_URL)
@router.get("/show_token")
async def show_token(request: Request, token: str = Cookie(None)) -> Response: # noqa: B008
"""Show the token from the URL path to the user."""
template_name = "cookie_disabled.html"
context = {"request": request}
if token:
context["token"] = token
template_name = "api_token.html"
return Server.TEMPLATES.TemplateResponse(template_name, context)
def build_oauth_token_request(code: str) -> tuple[dict, dict]:
"""Given a code, return a dict of query params needed to complete the OAuth2 flow."""
query = {
"client_id": Discord.CLIENT_ID,
"client_secret": Discord.CLIENT_SECRET,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": f"{Server.BASE_URL}/callback",
"scope": "identify",
}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
return query, headers
@router.get("/callback")
async def auth_callback(request: Request) -> Response:
"""
Create the user given the authorization code and output the token.
This endpoint is only used as a redirect target from Discord.
"""
try:
async with AsyncClient() as client:
token_params, token_headers = build_oauth_token_request(request.query_params["code"])
auth_token = (await client.post(Discord.TOKEN_URL, data=token_params, headers=token_headers)).json()
auth_header = {"Authorization": f"Bearer {auth_token['access_token']}"}
user = (await client.get(Discord.USER_URL, headers=auth_header)).json()
token = await auth.reset_user_token(request.state.db_conn, user["id"])
except KeyError:
# Ensure that users don't land on the show_pixel page
log.error(traceback.format_exc())
raise HTTPException(401, "Unknown error while creating token")
except PermissionError:
raise HTTPException(401, "You are banned")
# Redirect so that a user doesn't refresh the page and spam discord
redirect = RedirectResponse("/show_token", status_code=303)
redirect.set_cookie(
key='token',
value=token,
httponly=True,
max_age=10,
path='/show_token',
)
return redirect | 0.682468 | 0.083516 |
from sqlalchemy import Column, create_engine, orm, types
from sqlalchemy.ext.declarative import declarative_base
from django.http import Http404
from django.test import SimpleTestCase
from rest_framework.test import APIRequestFactory
from rest_witchcraft import serializers, viewsets
factory = APIRequestFactory()
engine = create_engine("sqlite://")
session = orm.scoped_session(orm.sessionmaker(bind=engine))
Base = declarative_base()
Base.query = session.query_property()
class RouterTestModel(Base):
__tablename__ = "routertest"
id = Column(types.Integer(), default=3, primary_key=True)
text = Column(types.String(length=200))
Base.metadata.create_all(engine)
class RouterTestModelSerializer(serializers.ModelSerializer):
class Meta:
model = RouterTestModel
session = session
fields = "__all__"
class TestModelRoutes(SimpleTestCase):
def test_get_model_using_queryset(self):
class RouterTestViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.query
serializer_class = RouterTestModelSerializer
model = RouterTestViewSet.get_model()
self.assertEqual(model, RouterTestModel)
def test_get_model_using_serializer(self):
class RouterTestViewSet(viewsets.ModelViewSet):
serializer_class = RouterTestModelSerializer
model = RouterTestViewSet.get_model()
self.assertEqual(model, RouterTestModel)
def test_get_model_fails_with_assert_error(self):
class RouterTestViewSet(viewsets.ModelViewSet):
pass
with self.assertRaises(AssertionError):
RouterTestViewSet.get_model()
def test_get_object_raises_404(self):
class RouterTestViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.query
serializer_class = RouterTestModelSerializer
lookup_field = "id"
lookup_url_kwarg = "pk"
viewset = RouterTestViewSet()
viewset.kwargs = {"pk": 1}
with self.assertRaises(Http404):
viewset.get_object() | tests/test_generics.py | from sqlalchemy import Column, create_engine, orm, types
from sqlalchemy.ext.declarative import declarative_base
from django.http import Http404
from django.test import SimpleTestCase
from rest_framework.test import APIRequestFactory
from rest_witchcraft import serializers, viewsets
factory = APIRequestFactory()
engine = create_engine("sqlite://")
session = orm.scoped_session(orm.sessionmaker(bind=engine))
Base = declarative_base()
Base.query = session.query_property()
class RouterTestModel(Base):
__tablename__ = "routertest"
id = Column(types.Integer(), default=3, primary_key=True)
text = Column(types.String(length=200))
Base.metadata.create_all(engine)
class RouterTestModelSerializer(serializers.ModelSerializer):
class Meta:
model = RouterTestModel
session = session
fields = "__all__"
class TestModelRoutes(SimpleTestCase):
def test_get_model_using_queryset(self):
class RouterTestViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.query
serializer_class = RouterTestModelSerializer
model = RouterTestViewSet.get_model()
self.assertEqual(model, RouterTestModel)
def test_get_model_using_serializer(self):
class RouterTestViewSet(viewsets.ModelViewSet):
serializer_class = RouterTestModelSerializer
model = RouterTestViewSet.get_model()
self.assertEqual(model, RouterTestModel)
def test_get_model_fails_with_assert_error(self):
class RouterTestViewSet(viewsets.ModelViewSet):
pass
with self.assertRaises(AssertionError):
RouterTestViewSet.get_model()
def test_get_object_raises_404(self):
class RouterTestViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.query
serializer_class = RouterTestModelSerializer
lookup_field = "id"
lookup_url_kwarg = "pk"
viewset = RouterTestViewSet()
viewset.kwargs = {"pk": 1}
with self.assertRaises(Http404):
viewset.get_object() | 0.562177 | 0.266662 |
import argparse
import numpy as np
import torch
import os
import sys
import IPython
import logging
sys.path.append("..")
import time
import IPython
from rlmamr.my_env.osd_ma_single_room import ObjSearchDelivery_v4 as OSD_S_4
from rlmamr.MA_cen_condi_ddrqn.utils.utils import Linear_Decay, get_conditional_argmax, get_conditional_action
from rlmamr.MA_cen_condi_ddrqn.utils.Cen_ctrl import Cen_Controller
from IPython.core.debugger import set_trace
ENVIRONMENTS = {
'OSD_S_4':OSD_S_4}
formatter = logging.Formatter('%(message)s')
def setup_logger(name, log_file, level=logging.INFO):
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
def get_actions_and_h_states(env, agent, joint_obs, h_state, last_action, last_valid):
with torch.no_grad():
if max(last_valid) == 1.0:
Q, h = agent.policy_net(torch.cat(joint_obs).view(1,1,np.sum(env.obs_size)), h_state)
a = get_conditional_argmax(Q, get_conditional_action(torch.cat(last_action).view(1,-1), torch.cat(last_valid).view(1,-1)), env.n_action).item()
actions = np.unravel_index(a, env.n_action)
new_h_state = h
else:
actions = [-1] * 3
new_h_state = h_state
return actions, new_h_state
def get_init_inputs(env,n_agent):
return [torch.from_numpy(i).float() for i in env.reset(True)], None
def test(env_name, env_terminate_step,n_agent, n_episode, tbot_speed):
ENV = ENVIRONMENTS[env_name]
env = ENV(tbot_speed=tbot_speed)
agent = Cen_Controller()
agent.idx = 0
agent.policy_net = torch.load("./policy_nns/" + "cen_controller.pt")
agent.policy_net.eval()
R = 0
for e in range(n_episode):
t = 0
last_obs, h_states = get_init_inputs(env, n_agent)
if e==0:
set_trace()
last_valid = [torch.tensor([[1]]).byte()] * n_agent
last_action = [torch.tensor([[-1]])] * n_agent
step = 0
while not t:
a, h_states = get_actions_and_h_states(env, agent, last_obs, h_states, last_action, last_valid)
time.sleep(0.4)
a, last_obs, r, t, v = env.step(a,True)
last_obs = [torch.from_numpy(o).float() for o in last_obs]
last_action = [torch.tensor(a_idx).view(1,1) for a_idx in a]
last_valid = [torch.tensor(_v, dtype=torch.uint8).view(1,-1) for _v in v]
R += r
step += 1
set_trace()
time.sleep(0.2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', action='store', type=str, default='OSD_S_4')
parser.add_argument('--env_terminate_step', action='store', type=int, default=150)
parser.add_argument('--n_agent', action='store', type=int, default=3)
parser.add_argument('--n_episode', action='store', type=int, default=1)
parser.add_argument('--tbot_speed', action='store', type=float, default=0.6)
test(**vars(parser.parse_args()))
if __name__ == '__main__':
main() | test/test_osd_s_policy.py | import argparse
import numpy as np
import torch
import os
import sys
import IPython
import logging
sys.path.append("..")
import time
import IPython
from rlmamr.my_env.osd_ma_single_room import ObjSearchDelivery_v4 as OSD_S_4
from rlmamr.MA_cen_condi_ddrqn.utils.utils import Linear_Decay, get_conditional_argmax, get_conditional_action
from rlmamr.MA_cen_condi_ddrqn.utils.Cen_ctrl import Cen_Controller
from IPython.core.debugger import set_trace
ENVIRONMENTS = {
'OSD_S_4':OSD_S_4}
formatter = logging.Formatter('%(message)s')
def setup_logger(name, log_file, level=logging.INFO):
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
def get_actions_and_h_states(env, agent, joint_obs, h_state, last_action, last_valid):
with torch.no_grad():
if max(last_valid) == 1.0:
Q, h = agent.policy_net(torch.cat(joint_obs).view(1,1,np.sum(env.obs_size)), h_state)
a = get_conditional_argmax(Q, get_conditional_action(torch.cat(last_action).view(1,-1), torch.cat(last_valid).view(1,-1)), env.n_action).item()
actions = np.unravel_index(a, env.n_action)
new_h_state = h
else:
actions = [-1] * 3
new_h_state = h_state
return actions, new_h_state
def get_init_inputs(env,n_agent):
return [torch.from_numpy(i).float() for i in env.reset(True)], None
def test(env_name, env_terminate_step,n_agent, n_episode, tbot_speed):
ENV = ENVIRONMENTS[env_name]
env = ENV(tbot_speed=tbot_speed)
agent = Cen_Controller()
agent.idx = 0
agent.policy_net = torch.load("./policy_nns/" + "cen_controller.pt")
agent.policy_net.eval()
R = 0
for e in range(n_episode):
t = 0
last_obs, h_states = get_init_inputs(env, n_agent)
if e==0:
set_trace()
last_valid = [torch.tensor([[1]]).byte()] * n_agent
last_action = [torch.tensor([[-1]])] * n_agent
step = 0
while not t:
a, h_states = get_actions_and_h_states(env, agent, last_obs, h_states, last_action, last_valid)
time.sleep(0.4)
a, last_obs, r, t, v = env.step(a,True)
last_obs = [torch.from_numpy(o).float() for o in last_obs]
last_action = [torch.tensor(a_idx).view(1,1) for a_idx in a]
last_valid = [torch.tensor(_v, dtype=torch.uint8).view(1,-1) for _v in v]
R += r
step += 1
set_trace()
time.sleep(0.2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', action='store', type=str, default='OSD_S_4')
parser.add_argument('--env_terminate_step', action='store', type=int, default=150)
parser.add_argument('--n_agent', action='store', type=int, default=3)
parser.add_argument('--n_episode', action='store', type=int, default=1)
parser.add_argument('--tbot_speed', action='store', type=float, default=0.6)
test(**vars(parser.parse_args()))
if __name__ == '__main__':
main() | 0.32146 | 0.181844 |
import sys
import code
import re
from typing import Callable
from contextlib import redirect_stdout, redirect_stderr
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class LineEdit(QLineEdit):
"""QLIneEdit with a history buffer for recalling previous lines.
I also accept tab as input (4 spaces).
"""
newline = pyqtSignal(str) # Signal when return key pressed
def __init__(self, history: int = 100) -> 'LineEdit':
super().__init__()
self.historymax = history
self.clearhistory()
self.promptpattern = re.compile('^[>\.]')
def clearhistory(self) -> None:
"""Clear history buffer"""
self.historyindex = 0
self.historylist = []
def event(self, ev: QEvent) -> bool:
"""Intercept tab and arrow key presses. Insert 4 spaces
when tab pressed instead of moving to next contorl. WHen
arrow up or down are pressed select a line from the history
buffer. Emit newline signal when return key is pressed.
"""
if ev.type() == QEvent.KeyPress:
if ev.key() == int(Qt.Key_Tab):
self.insert(' ')
return True
elif ev.key() == int(Qt.Key_Up):
self.recall(self.historyindex - 1)
return True
elif ev.key() == int(Qt.Key_Down):
self.recall(self.historyindex + 1)
return True
elif ev.key() == int(Qt.Key_Home):
self.recall(0)
return True
elif ev.key() == int(Qt.Key_End):
self.recall(len(self.historylist) - 1)
return True
elif ev.key() == int(Qt.Key_Return):
self.returnkey()
return True
return super().event(ev)
def returnkey(self) -> None:
"""Return key was pressed. Add line to history and emit
the newline signal.
"""
text = self.text().rstrip()
self.record(text)
self.newline.emit(text)
self.setText('')
def recall(self, index: int) -> None:
"""Select a line from the history list"""
length = len(self.historylist)
if length > 0:
index = max(0, min(index, length - 1))
self.setText(self.historylist[index])
self.historyindex = index
def record(self, line: str) -> None:
"""Add line to history buffer"""
self.historyindex += 1
while len(self.historylist) >= self.historymax - 1:
self.historylist.pop()
self.historylist.append(line)
self.historyindex = min(self.historyindex, len(self.historylist))
class Redirect:
"""Map self.write to a function"""
def __init__(self, func: Callable) -> 'Redirect':
self.func = func
def write(self, line: str) -> None:
self.func(line)
class Console(QWidget):
"""A GUI version of code.InteractiveConsole."""
def __init__(
self,
context=locals(), # context for interpreter
history: int = 20, # max lines in history buffer
blockcount: int = 500 # max lines in output buffer
) -> 'Console':
super().__init__()
self.setcontext(context)
self.buffer = []
self.content = QGridLayout(self)
self.content.setContentsMargins(0, 0, 0, 0)
self.content.setSpacing(0)
# Display for output and stderr
self.outdisplay = QPlainTextEdit(self)
self.outdisplay.setMaximumBlockCount(blockcount)
self.outdisplay.setReadOnly(True)
self.content.addWidget(self.outdisplay, 0, 0, 1, 2)
# Use color to differentiate input, output and stderr
self.inpfmt = self.outdisplay.currentCharFormat()
self.outfmt = QTextCharFormat(self.inpfmt)
self.outfmt.setForeground(QBrush(QColor(0, 0, 255)))
self.errfmt = QTextCharFormat(self.inpfmt)
self.errfmt.setForeground(QBrush(QColor(255, 0, 0)))
# Display input prompt left of input edit
self.promptdisp = QLineEdit(self)
self.promptdisp.setReadOnly(True)
self.promptdisp.setFixedWidth(15)
self.promptdisp.setFrame(False)
self.content.addWidget(self.promptdisp, 1, 0)
self.setprompt('> ')
# Enter commands here
self.inpedit = LineEdit(history=history)
self.inpedit.newline.connect(self.push)
self.inpedit.setFrame(False)
self.content.addWidget(self.inpedit, 1, 1)
def setcontext(self, context):
"""Set context for interpreter"""
self.interp = code.InteractiveInterpreter(context)
def resetbuffer(self) -> None:
"""Reset the input buffer."""
self.buffer = []
def setprompt(self, text: str):
self.prompt = text
self.promptdisp.setText(text)
def push(self, line: str) -> None:
"""Execute entered command. Command may span multiple lines"""
if line == 'clear':
self.inpedit.clearhistory()
self.outdisplay.clear()
else:
lines = line.split('\n')
for line in lines:
if re.match('^[\>\.] ', line):
line = line[2:]
self.writeoutput(self.prompt + line, self.inpfmt)
self.setprompt('. ')
self.buffer.append(line)
# Built a command string from lines in the buffer
source = "\n".join(self.buffer)
more = self.interp.runsource(source, '<console>')
if not more:
self.setprompt('> ')
self.resetbuffer()
def setfont(self, font: QFont) -> None:
"""Set font for input and display widgets. Should be monospaced"""
self.outdisplay.setFont(font)
self.inpedit.setFont(font)
def write(self, line: str) -> None:
"""Capture stdout and display in outdisplay"""
if len(line) != 1 or ord(line[0]) != 10:
self.writeoutput(line.rstrip(), self.outfmt)
def errorwrite(self, line: str) -> None:
"""Capture stderr and display in outdisplay"""
self.writeoutput(line, self.errfmt)
def writeoutput(self, line: str, fmt: QTextCharFormat = None) -> None:
"""Set text formatting and display line in outdisplay"""
if fmt is not None:
self.outdisplay.setCurrentCharFormat(fmt)
self.outdisplay.appendPlainText(line.rstrip())
if __name__ == '__main__':
app = QApplication(sys.argv)
console = Console()
console.setWindowTitle('Console')
console.setfont(QFont('Lucida Sans Typewriter', 10))
# Redirect stdout to console.write and stderr to console.errorwrite
redirect = Redirect(console.errorwrite)
with redirect_stdout(console), redirect_stderr(redirect):
console.show()
sys.exit(app.exec_()) | vvs_app/Terminal_Test.py | import sys
import code
import re
from typing import Callable
from contextlib import redirect_stdout, redirect_stderr
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class LineEdit(QLineEdit):
"""QLIneEdit with a history buffer for recalling previous lines.
I also accept tab as input (4 spaces).
"""
newline = pyqtSignal(str) # Signal when return key pressed
def __init__(self, history: int = 100) -> 'LineEdit':
super().__init__()
self.historymax = history
self.clearhistory()
self.promptpattern = re.compile('^[>\.]')
def clearhistory(self) -> None:
"""Clear history buffer"""
self.historyindex = 0
self.historylist = []
def event(self, ev: QEvent) -> bool:
"""Intercept tab and arrow key presses. Insert 4 spaces
when tab pressed instead of moving to next contorl. WHen
arrow up or down are pressed select a line from the history
buffer. Emit newline signal when return key is pressed.
"""
if ev.type() == QEvent.KeyPress:
if ev.key() == int(Qt.Key_Tab):
self.insert(' ')
return True
elif ev.key() == int(Qt.Key_Up):
self.recall(self.historyindex - 1)
return True
elif ev.key() == int(Qt.Key_Down):
self.recall(self.historyindex + 1)
return True
elif ev.key() == int(Qt.Key_Home):
self.recall(0)
return True
elif ev.key() == int(Qt.Key_End):
self.recall(len(self.historylist) - 1)
return True
elif ev.key() == int(Qt.Key_Return):
self.returnkey()
return True
return super().event(ev)
def returnkey(self) -> None:
"""Return key was pressed. Add line to history and emit
the newline signal.
"""
text = self.text().rstrip()
self.record(text)
self.newline.emit(text)
self.setText('')
def recall(self, index: int) -> None:
"""Select a line from the history list"""
length = len(self.historylist)
if length > 0:
index = max(0, min(index, length - 1))
self.setText(self.historylist[index])
self.historyindex = index
def record(self, line: str) -> None:
"""Add line to history buffer"""
self.historyindex += 1
while len(self.historylist) >= self.historymax - 1:
self.historylist.pop()
self.historylist.append(line)
self.historyindex = min(self.historyindex, len(self.historylist))
class Redirect:
"""Map self.write to a function"""
def __init__(self, func: Callable) -> 'Redirect':
self.func = func
def write(self, line: str) -> None:
self.func(line)
class Console(QWidget):
"""A GUI version of code.InteractiveConsole."""
def __init__(
self,
context=locals(), # context for interpreter
history: int = 20, # max lines in history buffer
blockcount: int = 500 # max lines in output buffer
) -> 'Console':
super().__init__()
self.setcontext(context)
self.buffer = []
self.content = QGridLayout(self)
self.content.setContentsMargins(0, 0, 0, 0)
self.content.setSpacing(0)
# Display for output and stderr
self.outdisplay = QPlainTextEdit(self)
self.outdisplay.setMaximumBlockCount(blockcount)
self.outdisplay.setReadOnly(True)
self.content.addWidget(self.outdisplay, 0, 0, 1, 2)
# Use color to differentiate input, output and stderr
self.inpfmt = self.outdisplay.currentCharFormat()
self.outfmt = QTextCharFormat(self.inpfmt)
self.outfmt.setForeground(QBrush(QColor(0, 0, 255)))
self.errfmt = QTextCharFormat(self.inpfmt)
self.errfmt.setForeground(QBrush(QColor(255, 0, 0)))
# Display input prompt left of input edit
self.promptdisp = QLineEdit(self)
self.promptdisp.setReadOnly(True)
self.promptdisp.setFixedWidth(15)
self.promptdisp.setFrame(False)
self.content.addWidget(self.promptdisp, 1, 0)
self.setprompt('> ')
# Enter commands here
self.inpedit = LineEdit(history=history)
self.inpedit.newline.connect(self.push)
self.inpedit.setFrame(False)
self.content.addWidget(self.inpedit, 1, 1)
def setcontext(self, context):
"""Set context for interpreter"""
self.interp = code.InteractiveInterpreter(context)
def resetbuffer(self) -> None:
"""Reset the input buffer."""
self.buffer = []
def setprompt(self, text: str):
self.prompt = text
self.promptdisp.setText(text)
def push(self, line: str) -> None:
"""Execute entered command. Command may span multiple lines"""
if line == 'clear':
self.inpedit.clearhistory()
self.outdisplay.clear()
else:
lines = line.split('\n')
for line in lines:
if re.match('^[\>\.] ', line):
line = line[2:]
self.writeoutput(self.prompt + line, self.inpfmt)
self.setprompt('. ')
self.buffer.append(line)
# Built a command string from lines in the buffer
source = "\n".join(self.buffer)
more = self.interp.runsource(source, '<console>')
if not more:
self.setprompt('> ')
self.resetbuffer()
def setfont(self, font: QFont) -> None:
"""Set font for input and display widgets. Should be monospaced"""
self.outdisplay.setFont(font)
self.inpedit.setFont(font)
def write(self, line: str) -> None:
"""Capture stdout and display in outdisplay"""
if len(line) != 1 or ord(line[0]) != 10:
self.writeoutput(line.rstrip(), self.outfmt)
def errorwrite(self, line: str) -> None:
"""Capture stderr and display in outdisplay"""
self.writeoutput(line, self.errfmt)
def writeoutput(self, line: str, fmt: QTextCharFormat = None) -> None:
"""Set text formatting and display line in outdisplay"""
if fmt is not None:
self.outdisplay.setCurrentCharFormat(fmt)
self.outdisplay.appendPlainText(line.rstrip())
if __name__ == '__main__':
app = QApplication(sys.argv)
console = Console()
console.setWindowTitle('Console')
console.setfont(QFont('Lucida Sans Typewriter', 10))
# Redirect stdout to console.write and stderr to console.errorwrite
redirect = Redirect(console.errorwrite)
with redirect_stdout(console), redirect_stderr(redirect):
console.show()
sys.exit(app.exec_()) | 0.561215 | 0.155687 |
import requests
import json
import re
from scraper import Scraper
from house import House
from bs4 import BeautifulSoup
class BeumerUtrecht(Scraper):
url = 'https://beumerutrecht.nl/woningen/'
def getPossiblePrices(self) -> list[int]:
return [
75000,
100000,
150000,
250000,
350000,
450000,
550000,
650000,
]
def getHouses(self) -> list[House]:
urls = self.getDetailsUrls()
houses = []
for url in urls:
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
address = soup.find('h1').span.text
price = soup.find('div', {'class': 'wonen__price'})
price = self.onlyDigits(price.text)
size = soup.find(text='Woonoppervlakte')
size = size.find_next('td').text.split()[0]
houses.append(
House(
address=address,
link=url,
price=price,
size=size
)
)
return houses
def getDetailsUrls(self):
min, max = self.getPriceRange()
post_data = {
'__live': '1',
'__templates[]': ['search', 'loop'],
'__maps': 'all',
'makelaar[]': ['beumermaarssen.nl', 'beumerutrecht.nl', 'beumervleutendemeern.nl', 'beumerwijkbijduurstede.nl'],
'koophuur': 'koop',
'plaats_postcode': 'Utrecht',
'radiuscustom': '',
'typewoning': '',
'prijs[min]': str(min),
'prijs[max]': str(max),
'status[]': ['beschikbaar', ''],
'woningsoort[]': None,
'liggingen[]': None,
'buitenruimtes[]': None,
'bouwperiode[]': None,
'energielabel[]': None,
'voorzieningen[]': None,
'openHuis[]': None,
'nieuwAanbod[]': None,
'woonOppervlakte': '',
'perceelOppervlakte': '',
'aantalKamers': '',
'slaapkamers': '',
'subscribe_email': '',
'orderby': 'custom_order:asc,publicatiedatum:desc',
}
response = requests.post(self.url, data=post_data)
data = json.loads(response.text)['maps']
urls = list()
for entry in data:
urls.append(self.url + entry['a'])
return urls | scrapers/beumerutrecht.py | import requests
import json
import re
from scraper import Scraper
from house import House
from bs4 import BeautifulSoup
class BeumerUtrecht(Scraper):
url = 'https://beumerutrecht.nl/woningen/'
def getPossiblePrices(self) -> list[int]:
return [
75000,
100000,
150000,
250000,
350000,
450000,
550000,
650000,
]
def getHouses(self) -> list[House]:
urls = self.getDetailsUrls()
houses = []
for url in urls:
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
address = soup.find('h1').span.text
price = soup.find('div', {'class': 'wonen__price'})
price = self.onlyDigits(price.text)
size = soup.find(text='Woonoppervlakte')
size = size.find_next('td').text.split()[0]
houses.append(
House(
address=address,
link=url,
price=price,
size=size
)
)
return houses
def getDetailsUrls(self):
min, max = self.getPriceRange()
post_data = {
'__live': '1',
'__templates[]': ['search', 'loop'],
'__maps': 'all',
'makelaar[]': ['beumermaarssen.nl', 'beumerutrecht.nl', 'beumervleutendemeern.nl', 'beumerwijkbijduurstede.nl'],
'koophuur': 'koop',
'plaats_postcode': 'Utrecht',
'radiuscustom': '',
'typewoning': '',
'prijs[min]': str(min),
'prijs[max]': str(max),
'status[]': ['beschikbaar', ''],
'woningsoort[]': None,
'liggingen[]': None,
'buitenruimtes[]': None,
'bouwperiode[]': None,
'energielabel[]': None,
'voorzieningen[]': None,
'openHuis[]': None,
'nieuwAanbod[]': None,
'woonOppervlakte': '',
'perceelOppervlakte': '',
'aantalKamers': '',
'slaapkamers': '',
'subscribe_email': '',
'orderby': 'custom_order:asc,publicatiedatum:desc',
}
response = requests.post(self.url, data=post_data)
data = json.loads(response.text)['maps']
urls = list()
for entry in data:
urls.append(self.url + entry['a'])
return urls | 0.308607 | 0.113555 |
from csv import reader
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from collections import Counter
class JobData(object):
"""JobData class for working with data from CSV file."""
def __init__(self, environment=None):
"""Initialize instance of JobData with necessary properties."""
self.col_names = [
'date',
'link',
'title',
'posted',
'company',
'location',
'tags',
]
if environment == 'test':
self.environment = 'test'
self.df = pd.read_csv(
'src/sample_jobs.csv',
delimiter='$',
names=self.col_names,
header=None)
if environment is None: # pragma no cover
self.environment = 'live'
self.df = pd.read_csv(
'jobs.csv',
delimiter='$',
names=self.col_names,
header=None)
self.date = None
self.state_dict = None
self.time_dict = None
self.top_ten = None
def count_states(self):
"""Function to count instances of states."""
state_list = []
for item in self.df['location']:
index = item.find(',')
state = str(item[index + 2:])
state_list.append(state)
self.state_dict = dict(Counter(state_list))
return self.state_dict
def chart_states(self):
"""Function to chart state count on bar graph."""
if self.state_dict:
states = list(self.state_dict.keys())
y_pos = np.arange(len(states))
count = list(self.state_dict.values())
plt.bar(y_pos, count, align='center', alpha=0.5)
plt.xticks(y_pos, states)
plt.ylabel('Number of Posts')
plt.title('Number of Posts Per State')
if self.environment == 'live':
plt.show() # pragma no cover
return [states, count]
else:
raise ValueError('No states to chart.')
def count_days(self):
"""Function to organize times of posts."""
self.time_dict = {}
for i in range(8):
self.time_dict[str(i)] = 0
today = self.df['date'][0]
self.date = datetime.datetime.strptime(today, '%Y-%m-%d %H:%M:%S.%f')
for item in self.df['posted']:
if item == 'yesterday':
self.time_dict['1'] += 1
index = item.find(' ago')
time = item[:index]
if time[-1] == 'h':
hours = time.replace("h", "")
if '<' in hours:
hours = 0
get_date = self.date - datetime.timedelta(hours=int(hours))
if self.date.date() == get_date.date():
self.time_dict['0'] += 1
self.time_dict['1'] += 1
if time[-1] == 'd':
days = time.replace("d", "")
self.time_dict[days] += 1
if time[-1] == 'w':
self.time_dict['7'] += 1
return self.time_dict
def chart_days(self):
"""Function to chart the number of posts over the previous week."""
if self.time_dict:
days = list(self.time_dict.keys())
count = list(self.time_dict.values())
plt.plot(days, count)
x = np.arange(8)
week_list = self._get_weekdays()
plt.ylabel('Posts')
plt.title('Post Frequency')
plt.xticks(x, week_list)
if self.environment == 'live':
plt.show() # pragma no cover
return [days, count]
else:
raise ValueError('No days available')
def _get_weekdays(self):
"""Function to get day of the week and create list accordingly."""
days = ['Sunday',
'Saturday',
'Friday',
'Thursday',
'Wednesday',
'Tuesday',
'Monday']
weekday = self.date.weekday()
if weekday == 0:
week = days[:]
else:
week = days[weekday:] + days[:weekday]
week[0] = 'Today'
week.append('One Week')
return week
def count_tags(self):
"""Function to return top ten tags for all posts."""
tag_list = []
tag_count = []
for item in self.df['tags']:
to_list = [x.strip() for x in item.split(',')]
filter_list = list(filter(None, to_list))
tag_list = tag_list + filter_list
tag_dict = dict(Counter(tag_list))
tag_names = sorted(tag_dict, key=tag_dict.get, reverse=True)[:10]
for item in tag_names:
tag_count.append(tag_dict[item])
self.top_ten = dict(zip(tag_names, tag_count))
return self.top_ten
def chart_tags(self):
"""Function to chart tag count on bar graph."""
if self.top_ten:
count = list(self.top_ten.values())
tags = list(self.top_ten.keys())
y_pos = np.arange(len(tags))
plt.bar(y_pos, count, align='center', alpha=0.5)
plt.xticks(y_pos, tags)
plt.ylabel('Number of Posts')
plt.title('Top 10 Tags')
if self.environment == 'live':
plt.show() # pragma no cover
return [tags, count]
else:
raise ValueError('Top Ten not available') | src/job_data.py | from csv import reader
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from collections import Counter
class JobData(object):
"""JobData class for working with data from CSV file."""
def __init__(self, environment=None):
"""Initialize instance of JobData with necessary properties."""
self.col_names = [
'date',
'link',
'title',
'posted',
'company',
'location',
'tags',
]
if environment == 'test':
self.environment = 'test'
self.df = pd.read_csv(
'src/sample_jobs.csv',
delimiter='$',
names=self.col_names,
header=None)
if environment is None: # pragma no cover
self.environment = 'live'
self.df = pd.read_csv(
'jobs.csv',
delimiter='$',
names=self.col_names,
header=None)
self.date = None
self.state_dict = None
self.time_dict = None
self.top_ten = None
def count_states(self):
"""Function to count instances of states."""
state_list = []
for item in self.df['location']:
index = item.find(',')
state = str(item[index + 2:])
state_list.append(state)
self.state_dict = dict(Counter(state_list))
return self.state_dict
def chart_states(self):
"""Function to chart state count on bar graph."""
if self.state_dict:
states = list(self.state_dict.keys())
y_pos = np.arange(len(states))
count = list(self.state_dict.values())
plt.bar(y_pos, count, align='center', alpha=0.5)
plt.xticks(y_pos, states)
plt.ylabel('Number of Posts')
plt.title('Number of Posts Per State')
if self.environment == 'live':
plt.show() # pragma no cover
return [states, count]
else:
raise ValueError('No states to chart.')
def count_days(self):
"""Function to organize times of posts."""
self.time_dict = {}
for i in range(8):
self.time_dict[str(i)] = 0
today = self.df['date'][0]
self.date = datetime.datetime.strptime(today, '%Y-%m-%d %H:%M:%S.%f')
for item in self.df['posted']:
if item == 'yesterday':
self.time_dict['1'] += 1
index = item.find(' ago')
time = item[:index]
if time[-1] == 'h':
hours = time.replace("h", "")
if '<' in hours:
hours = 0
get_date = self.date - datetime.timedelta(hours=int(hours))
if self.date.date() == get_date.date():
self.time_dict['0'] += 1
self.time_dict['1'] += 1
if time[-1] == 'd':
days = time.replace("d", "")
self.time_dict[days] += 1
if time[-1] == 'w':
self.time_dict['7'] += 1
return self.time_dict
def chart_days(self):
"""Function to chart the number of posts over the previous week."""
if self.time_dict:
days = list(self.time_dict.keys())
count = list(self.time_dict.values())
plt.plot(days, count)
x = np.arange(8)
week_list = self._get_weekdays()
plt.ylabel('Posts')
plt.title('Post Frequency')
plt.xticks(x, week_list)
if self.environment == 'live':
plt.show() # pragma no cover
return [days, count]
else:
raise ValueError('No days available')
def _get_weekdays(self):
"""Function to get day of the week and create list accordingly."""
days = ['Sunday',
'Saturday',
'Friday',
'Thursday',
'Wednesday',
'Tuesday',
'Monday']
weekday = self.date.weekday()
if weekday == 0:
week = days[:]
else:
week = days[weekday:] + days[:weekday]
week[0] = 'Today'
week.append('One Week')
return week
def count_tags(self):
"""Function to return top ten tags for all posts."""
tag_list = []
tag_count = []
for item in self.df['tags']:
to_list = [x.strip() for x in item.split(',')]
filter_list = list(filter(None, to_list))
tag_list = tag_list + filter_list
tag_dict = dict(Counter(tag_list))
tag_names = sorted(tag_dict, key=tag_dict.get, reverse=True)[:10]
for item in tag_names:
tag_count.append(tag_dict[item])
self.top_ten = dict(zip(tag_names, tag_count))
return self.top_ten
def chart_tags(self):
"""Function to chart tag count on bar graph."""
if self.top_ten:
count = list(self.top_ten.values())
tags = list(self.top_ten.keys())
y_pos = np.arange(len(tags))
plt.bar(y_pos, count, align='center', alpha=0.5)
plt.xticks(y_pos, tags)
plt.ylabel('Number of Posts')
plt.title('Top 10 Tags')
if self.environment == 'live':
plt.show() # pragma no cover
return [tags, count]
else:
raise ValueError('Top Ten not available') | 0.660501 | 0.349089 |
from docsisMon.ipDevices import ipDevice
from docsisMon.mibs import mibs
class Cmts(ipDevice):
""" Represents a CMTS: This is a inheritance from ipDevice
public methods and attributes:
- ipMngmt: Device Ip management
- snmpIf: SNMP InterFace used to get all the infomation
- getCm(): return CmInCmts Object
"""
def __init__(self, ip):
#inheritance from IP_device
ipDevice.__init__(self,ip)
#setting deviceType to CMTS type
self.__deviceType="CMTS"
#cm virtual is a CM object in a CMTS
self.cm=CmInCmts
def getCm(self, cmMac):
return self.cm(self.snmpIf, cmMac)
class CmInCmts():
"""Represents a CM in a CMTS acceced via snmpIf"""
def __init__(self,snmpIf, cmMac):
self.snmpIf=snmpIf
self.__deviceType="CmInCTMS"
self.__mac=cmMac
self.__ptr=self.getPtr()
self.__ip=self.getIP()
def getPtr(self):
oid = (mibs.oid['docsIfCmtsCmPtr']+'.'+getMacDec(self.__mac),)
SnmpObj = self.snmpIf.get(*oid)
return SnmpObj[mibs.oid['docsIfCmtsCmPtr']+'.'+getMacDec(self.__mac)]
def getIP(self):
if self.__ptr == None: return None
oid = (mibs.oid['docsIfCmtsCmStatusIpAddress']+'.'+self.__ptr,)
SnmpObj = self.snmpIf.get( *oid)
return SnmpObj[mibs.oid['docsIfCmtsCmStatusIpAddress']+'.'+self.__ptr]
def getStatus(self):
if self.__ptr == None: return None
oid = (mibs.oid['docsIfCmtsCmStatusValue']+'.'+self.__ptr,)
SnmpObj = self.snmpIf.get( *oid)
return SnmpObj[mibs.oid['docsIfCmtsCmStatusValue']+'.'+self.__ptr]
def getMacDec(cmMac):
macDec= str(int('0x'+cmMac[0:2],16))+\
'.'+ str(int('0x'+cmMac[2:4],16))+\
'.'+str(int('0x'+cmMac[4:6],16))+\
'.'+str(int('0x'+cmMac[6:8],16))+\
'.'+str(int('0x'+cmMac[8:10],16))+\
'.'+str(int('0x'+cmMac[10:12],16))
return macDec | docsisMon/cmtsDevices.py | from docsisMon.ipDevices import ipDevice
from docsisMon.mibs import mibs
class Cmts(ipDevice):
""" Represents a CMTS: This is a inheritance from ipDevice
public methods and attributes:
- ipMngmt: Device Ip management
- snmpIf: SNMP InterFace used to get all the infomation
- getCm(): return CmInCmts Object
"""
def __init__(self, ip):
#inheritance from IP_device
ipDevice.__init__(self,ip)
#setting deviceType to CMTS type
self.__deviceType="CMTS"
#cm virtual is a CM object in a CMTS
self.cm=CmInCmts
def getCm(self, cmMac):
return self.cm(self.snmpIf, cmMac)
class CmInCmts():
"""Represents a CM in a CMTS acceced via snmpIf"""
def __init__(self,snmpIf, cmMac):
self.snmpIf=snmpIf
self.__deviceType="CmInCTMS"
self.__mac=cmMac
self.__ptr=self.getPtr()
self.__ip=self.getIP()
def getPtr(self):
oid = (mibs.oid['docsIfCmtsCmPtr']+'.'+getMacDec(self.__mac),)
SnmpObj = self.snmpIf.get(*oid)
return SnmpObj[mibs.oid['docsIfCmtsCmPtr']+'.'+getMacDec(self.__mac)]
def getIP(self):
if self.__ptr == None: return None
oid = (mibs.oid['docsIfCmtsCmStatusIpAddress']+'.'+self.__ptr,)
SnmpObj = self.snmpIf.get( *oid)
return SnmpObj[mibs.oid['docsIfCmtsCmStatusIpAddress']+'.'+self.__ptr]
def getStatus(self):
if self.__ptr == None: return None
oid = (mibs.oid['docsIfCmtsCmStatusValue']+'.'+self.__ptr,)
SnmpObj = self.snmpIf.get( *oid)
return SnmpObj[mibs.oid['docsIfCmtsCmStatusValue']+'.'+self.__ptr]
def getMacDec(cmMac):
macDec= str(int('0x'+cmMac[0:2],16))+\
'.'+ str(int('0x'+cmMac[2:4],16))+\
'.'+str(int('0x'+cmMac[4:6],16))+\
'.'+str(int('0x'+cmMac[6:8],16))+\
'.'+str(int('0x'+cmMac[8:10],16))+\
'.'+str(int('0x'+cmMac[10:12],16))
return macDec | 0.434101 | 0.155719 |
from itertools import chain
import time
import os
import math
from tornado_sqlalchemy import as_future
from tornado.gen import multi
from PIL import Image, ImageDraw, ImageColor, ImageFont
from models import DotaProPlayer, DotaHeroes, DotaItem, DotaProTeam
from image_generation.helpers import draw_text_outlined_center_align, draw_text_right_align, draw_image, \
draw_image_centered
class PostGameMixin:
async def generate_post_game(self, game_id):
generated_path = os.path.join(self.generated_root, "post_game-" + str(game_id) + ".png")
if os.path.exists(generated_path):
os.remove(generated_path)
# Generate image
composition = Image.open(os.path.join(self.assets_root, 'background3.png')).convert('RGBA')
image_draw = ImageDraw.Draw(composition)
# Prepare fonts
rift_player_nickname = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold_italic.otf'), 46)
noto_cjk_player_nickname = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'noto', 'noto_sans_cjk_bold.otf'), 38)
rift_player_name = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_regular.otf'), 26)
rift_kda = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold.otf'), 32)
rift_dmg = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_regular.otf'), 32)
rift_team = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold.otf'), 50)
# Get data
game_json = self.download_opendata_if_necessary(game_id)
if game_json is None or game_json['version'] is None:
image_draw.text([100, 100], str(time.time()), font=rift_player_nickname, fill=self.colors['light_red'])
image_draw.text([100, 200], "ERROR WITH OPENDOTA", font=rift_player_nickname, fill=self.colors['light_red'])
composition.save(generated_path)
return
hero_x = 350
hero_y_side_padding = 30
hero_height = 90
hero_width = int(256 * hero_height / 144)
hero_y_padding = 10
item_padding = 4
item_height = int((hero_height - item_padding) / 2)
item_width = int(88 * item_height / 64)
player_name_x_padding = -40
player_name_y_padding = 0
player_nickname_y_padding = 50
kda_padding_x = 5
hero_y = {0: hero_y_side_padding,
1: hero_y_side_padding + hero_height + hero_y_padding,
2: hero_y_side_padding + 2 * (hero_height + hero_y_padding),
3: hero_y_side_padding + 3 * (hero_height + hero_y_padding),
4: hero_y_side_padding + 4 * (hero_height + hero_y_padding),
128: 1080 - hero_y_side_padding - hero_height * 5 - hero_y_padding * 4,
129: 1080 - hero_y_side_padding - hero_height * 4 - hero_y_padding * 3,
130: 1080 - hero_y_side_padding - hero_height * 3 - hero_y_padding * 2,
131: 1080 - hero_y_side_padding - hero_height * 2 - hero_y_padding,
132: 1080 - hero_y_side_padding - hero_height}
hero_color = {0: self.colors['hero_blue'],
1: self.colors['hero_teal'],
2: self.colors['hero_purple'],
3: self.colors['hero_yellow'],
4: self.colors['hero_orange'],
128: self.colors['hero_pink'],
129: self.colors['hero_grey'],
130: self.colors['hero_aqua'],
131: self.colors['hero_green'],
132: self.colors['hero_brown']}
hero_color_width = 10
# Get database data
heroes, items, players, teams = await multi([as_future(self.session.query(DotaHeroes).all),
as_future(self.session.query(DotaItem).all),
as_future(self.session.query(DotaProPlayer).all),
as_future(self.session.query(DotaProTeam).all)])
# Draw Heroes & Items
for player in game_json['players']:
hero = next((hero for hero in heroes if hero.id == player['hero_id']), None)
if hero is None:
short_name = 'error'
else:
short_name = hero.short_name
hero_image = Image.open(os.path.join(self.assets_root, 'dota', 'hero_rectangle', short_name + '.png')) \
.convert('RGBA')
draw_image(composition, hero_image, [hero_x, hero_y[player['player_slot']]], [None, hero_height])
# Draw Items
for j in range(0, 2):
for i in range(0, 3):
key = 'item_{0}'.format(j * 3 + i)
if player[key] != 0:
item = next((item for item in items if item.id == player[key]), None)
if item is None:
short_name = 'error'
else:
short_name = item.name
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', short_name + '.png')
if not os.path.exists(item_path):
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', 'default.png')
item_image = Image.open(item_path).convert('RGBA')
else:
item_image = Image.open(os.path.join(self.assets_root, 'dota', 'item_rectangle', 'empty.png')) \
.convert('RGBA')
draw_image(composition,
item_image,
[hero_x + hero_width + (i + 1) * item_padding + i * item_width,
hero_y[player['player_slot']] + j * (item_height + item_padding)],
[None, item_height])
# Draw neutral item
if player["item_neutral"] != 0:
item = next((item for item in items if item.id == player["item_neutral"]), None)
if item is None:
short_name = 'error'
else:
short_name = item.name
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', short_name + '.png')
if not os.path.exists(item_path):
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', 'default.png')
item_image = Image.open(item_path).convert('RGBA')
else:
item_image = Image.open(os.path.join(self.assets_root, 'dota', 'item_rectangle', 'empty.png')) \
.convert('RGBA')
draw_image(composition,
item_image,
[hero_x + hero_width + 5*item_padding + 3*item_width,
hero_y[player['player_slot']] + int((item_height + item_padding)/2)],
[None, item_height])
# Draw icons
sword_image = Image.open(os.path.join(self.assets_root, 'icons', 'sword.png')).convert('RGBA')
sword_image = sword_image.resize([int(item_height / 2), int(item_height / 2)], Image.LANCZOS)
in_place_sword = Image.new('RGBA', (composition.size[0], composition.size[1]))
in_place_sword.paste(sword_image,
box=[hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x),
hero_y[player['player_slot']] + item_height + 15],
mask=sword_image)
composition = Image.alpha_composite(composition, in_place_sword)
# Draw kda skull
skull_image = Image.open(os.path.join(self.assets_root, 'icons', 'skull.png')).convert('RGBA')
skull_image = skull_image.resize([int(item_height / 2), int(item_height / 2)], Image.LANCZOS)
in_place_skull = Image.new('RGBA', (composition.size[0], composition.size[1]))
in_place_skull.paste(skull_image,
box=[hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x),
hero_y[player['player_slot']] + 12],
mask=skull_image)
composition = Image.alpha_composite(composition, in_place_skull)
# Draw colors
image_draw = ImageDraw.Draw(composition)
for player in game_json['players']:
image_draw.rectangle([hero_x - hero_color_width,
hero_y[player['player_slot']],
hero_x,
hero_y[player['player_slot']] + hero_height - 1],
fill=hero_color[player['player_slot']])
# Draw player names & pseudo
for player in game_json['players']:
pro_player = next((pro_player for pro_player in players if pro_player.account_id == player['account_id']),
None)
player_name_font = rift_player_nickname
if pro_player is None:
name = '-'
nickname = '-'
else:
name = pro_player.name
nickname = pro_player.nickname
if not len(nickname) == len(nickname.encode()):
player_name_font = noto_cjk_player_nickname
draw_text_right_align(image_draw, [hero_x + player_name_x_padding,
hero_y[player['player_slot']] + player_name_y_padding],
nickname, player_name_font, fill=self.colors['white'])
draw_text_right_align(image_draw, [hero_x + player_name_x_padding,
hero_y[player[
'player_slot']] + player_name_y_padding + player_nickname_y_padding],
name, rift_player_name, fill=self.colors['white'])
kda = "{0}/{1}/{2}".format(player['kills'], player['deaths'], player['assists'])
image_draw.text([hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x) + int(
item_height / 2) + 3 * item_padding,
hero_y[player['player_slot']]],
text=kda, font=rift_kda, fill=self.colors['white'])
image_draw.text([hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x) + int(
item_height / 2) + 3 * item_padding,
hero_y[player['player_slot']] + item_height + item_padding],
text=str(player['hero_damage']), font=rift_dmg, fill=self.colors['white'])
# Draw graph
radiant_gold_adv = game_json['radiant_gold_adv']
radiant_xp_adv = game_json['radiant_xp_adv']
graph_start_x = 910
graph_end_x = 1850
graph_y = 400
graph_width = 4
graph_graduation_x = 10
gold_xp_max = 0
for item in chain(radiant_gold_adv, radiant_xp_adv):
if abs(item) > gold_xp_max: gold_xp_max = abs(item)
gold_xp_max = int((gold_xp_max - gold_xp_max % 1000) / 1000 + 1)
duration = math.ceil(game_json['duration'] / 60)
graph_x_step = math.floor((graph_end_x - graph_start_x) / duration)
graph_y_step = math.floor(graph_y / gold_xp_max)
image_draw.line([graph_start_x, 540 - int(graph_width / 2), graph_end_x, 540 - int(graph_width / 2)],
fill=self.colors['white'], width=graph_width)
image_draw.line(
[graph_start_x - int(graph_width / 2), 540 - graph_y, graph_start_x - int(graph_width / 2), 540 + graph_y],
fill=self.colors['white'], width=graph_width)
i = 5
while i < gold_xp_max:
image_draw.line([graph_start_x,
540 + graph_y_step * i,
graph_end_x,
540 + graph_y_step * i],
fill=self.colors['grey'], width=1)
image_draw.line([graph_start_x,
540 - graph_y_step * i,
graph_end_x,
540 - graph_y_step * i],
fill=self.colors['grey'], width=1)
i += 5
i = 5
while i < duration:
image_draw.line([graph_start_x + i * graph_x_step,
540 - graph_graduation_x - 2,
graph_start_x + i * graph_x_step,
540 + graph_graduation_x - 1],
fill=self.colors['white'], width=graph_width)
i += 5
for i in range(1, duration):
image_draw.line([graph_start_x + (i - 1) * graph_x_step,
540 - int(graph_y_step * (radiant_xp_adv[i - 1] / 1000)),
graph_start_x + i * graph_x_step,
540 - int(graph_y_step * (radiant_xp_adv[i] / 1000))], fill=self.colors['blue'], width=6)
image_draw.line([graph_start_x + (i - 1) * graph_x_step,
540 - int(graph_y_step * (radiant_gold_adv[i - 1] / 1000)),
graph_start_x + i * graph_x_step,
540 - int(graph_y_step * (radiant_gold_adv[i] / 1000))], fill=self.colors['yellow'],
width=6)
for objectif in game_json['objectives']:
objectif_x = 0
objectif_y = 0
image = 'error'
if objectif['type'] in ['CHAT_MESSAGE_COURIER_LOST', 'building_kill', 'CHAT_MESSAGE_ROSHAN_KILL']:
objectif_x = graph_start_x + int(graph_x_step * objectif['time'] / 60)
if objectif['type'] == 'CHAT_MESSAGE_COURIER_LOST':
image = 'chick_kill'
if objectif['team'] == 2:
objectif_y = 540 - graph_y - 35
else:
objectif_y = 540 + graph_y + 35
elif objectif['type'] == 'CHAT_MESSAGE_ROSHAN_KILL':
image = 'roshan_kill'
if objectif['team'] == 2:
objectif_y = 540 - graph_y - 35
else:
objectif_y = 540 + graph_y + 35
else:
if 'badguys' in objectif['key']:
objectif_y = 540 - graph_y - 35
else:
objectif_y = 540 + graph_y + 35
if 'tower' in objectif['key']:
image = 'tower_kill'
elif 'healers' in objectif['key']:
image = 'shrine_kill'
elif 'melee_rax' in objectif['key']:
image = 'rax_kill'
if image == 'error':
continue
image_icon = Image.open(os.path.join(self.assets_root, 'icons', image + '.png')).convert('RGBA')
composition = draw_image_centered(composition, image_icon, [objectif_x, objectif_y], [35, 35])
for player in game_json['players']:
for item_purchase in player['purchase_log']:
if item_purchase['key'] in ['black_king_bar', 'blink', 'sheepstick', 'silver_edge', 'refresher',
'orchid']:
if player['player_slot'] > 100:
item_y = 540 + graph_y
else:
item_y = 540 - graph_y
item_x = graph_start_x + int(graph_x_step * item_purchase['time'] / 60)
image_icon = Image.open(
os.path.join(self.assets_root, 'icons', 'item_' + item_purchase['key'] + '.png')).convert(
'RGBA')
composition = draw_image_centered(composition, image_icon, [item_x, item_y], [35, 35])
# Draw titles
image_draw = ImageDraw.Draw(composition)
radiant_team = '?'
dire_team = '?'
radiant_team_info = next((team for team in teams if team.id == game_json['radiant_team_id']), None)
if radiant_team_info is not None:
radiant_team = radiant_team_info.name
dire_team_info = next((team for team in teams if team.id == game_json['dire_team_id']), None)
if dire_team_info is not None:
dire_team = dire_team_info.name
radiant_color = self.colors['ti_green']
dire_color = self.colors['ti_green']
laurels_icon = Image.open(os.path.join(self.assets_root, 'icons', 'laurels.png')).convert('RGBA')
if game_json['radiant_win']:
laurels_x = [int((graph_start_x + graph_end_x - image_draw.textsize(radiant_team, rift_team)[0]) / 2) - 40,
int((graph_start_x + graph_end_x + image_draw.textsize(radiant_team, rift_team)[0]) / 2) + 40]
laurels_y = 45
dire_color = self.colors['grey']
else:
laurels_x = [int((graph_start_x + graph_end_x - image_draw.textsize(dire_team, rift_team)[0]) / 2) - 40,
int((graph_start_x + graph_end_x + image_draw.textsize(dire_team, rift_team)[0]) / 2) + 40]
laurels_y = 1035
radiant_color = self.colors['grey']
draw_text_outlined_center_align(image_draw, [int((graph_start_x + graph_end_x) / 2), 15], radiant_team,
font=rift_team, fill=radiant_color, outline_fill=self.colors['black'],
outline_width=4)
draw_text_outlined_center_align(image_draw, [int((graph_start_x + graph_end_x) / 2), 1005], dire_team,
font=rift_team, fill=dire_color, outline_fill=self.colors['black'],
outline_width=4)
composition = draw_image_centered(composition, laurels_icon, [laurels_x[0], laurels_y], [40, 40])
composition = draw_image_centered(composition, laurels_icon, [laurels_x[1], laurels_y], [40, 40])
composition.save(generated_path) | backend/api/image_generation/mixins/post_game.py | from itertools import chain
import time
import os
import math
from tornado_sqlalchemy import as_future
from tornado.gen import multi
from PIL import Image, ImageDraw, ImageColor, ImageFont
from models import DotaProPlayer, DotaHeroes, DotaItem, DotaProTeam
from image_generation.helpers import draw_text_outlined_center_align, draw_text_right_align, draw_image, \
draw_image_centered
class PostGameMixin:
async def generate_post_game(self, game_id):
generated_path = os.path.join(self.generated_root, "post_game-" + str(game_id) + ".png")
if os.path.exists(generated_path):
os.remove(generated_path)
# Generate image
composition = Image.open(os.path.join(self.assets_root, 'background3.png')).convert('RGBA')
image_draw = ImageDraw.Draw(composition)
# Prepare fonts
rift_player_nickname = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold_italic.otf'), 46)
noto_cjk_player_nickname = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'noto', 'noto_sans_cjk_bold.otf'), 38)
rift_player_name = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_regular.otf'), 26)
rift_kda = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold.otf'), 32)
rift_dmg = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_regular.otf'), 32)
rift_team = ImageFont.truetype(
os.path.join(self.assets_root, 'fonts', 'rift', 'fort_foundry_rift_bold.otf'), 50)
# Get data
game_json = self.download_opendata_if_necessary(game_id)
if game_json is None or game_json['version'] is None:
image_draw.text([100, 100], str(time.time()), font=rift_player_nickname, fill=self.colors['light_red'])
image_draw.text([100, 200], "ERROR WITH OPENDOTA", font=rift_player_nickname, fill=self.colors['light_red'])
composition.save(generated_path)
return
hero_x = 350
hero_y_side_padding = 30
hero_height = 90
hero_width = int(256 * hero_height / 144)
hero_y_padding = 10
item_padding = 4
item_height = int((hero_height - item_padding) / 2)
item_width = int(88 * item_height / 64)
player_name_x_padding = -40
player_name_y_padding = 0
player_nickname_y_padding = 50
kda_padding_x = 5
hero_y = {0: hero_y_side_padding,
1: hero_y_side_padding + hero_height + hero_y_padding,
2: hero_y_side_padding + 2 * (hero_height + hero_y_padding),
3: hero_y_side_padding + 3 * (hero_height + hero_y_padding),
4: hero_y_side_padding + 4 * (hero_height + hero_y_padding),
128: 1080 - hero_y_side_padding - hero_height * 5 - hero_y_padding * 4,
129: 1080 - hero_y_side_padding - hero_height * 4 - hero_y_padding * 3,
130: 1080 - hero_y_side_padding - hero_height * 3 - hero_y_padding * 2,
131: 1080 - hero_y_side_padding - hero_height * 2 - hero_y_padding,
132: 1080 - hero_y_side_padding - hero_height}
hero_color = {0: self.colors['hero_blue'],
1: self.colors['hero_teal'],
2: self.colors['hero_purple'],
3: self.colors['hero_yellow'],
4: self.colors['hero_orange'],
128: self.colors['hero_pink'],
129: self.colors['hero_grey'],
130: self.colors['hero_aqua'],
131: self.colors['hero_green'],
132: self.colors['hero_brown']}
hero_color_width = 10
# Get database data
heroes, items, players, teams = await multi([as_future(self.session.query(DotaHeroes).all),
as_future(self.session.query(DotaItem).all),
as_future(self.session.query(DotaProPlayer).all),
as_future(self.session.query(DotaProTeam).all)])
# Draw Heroes & Items
for player in game_json['players']:
hero = next((hero for hero in heroes if hero.id == player['hero_id']), None)
if hero is None:
short_name = 'error'
else:
short_name = hero.short_name
hero_image = Image.open(os.path.join(self.assets_root, 'dota', 'hero_rectangle', short_name + '.png')) \
.convert('RGBA')
draw_image(composition, hero_image, [hero_x, hero_y[player['player_slot']]], [None, hero_height])
# Draw Items
for j in range(0, 2):
for i in range(0, 3):
key = 'item_{0}'.format(j * 3 + i)
if player[key] != 0:
item = next((item for item in items if item.id == player[key]), None)
if item is None:
short_name = 'error'
else:
short_name = item.name
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', short_name + '.png')
if not os.path.exists(item_path):
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', 'default.png')
item_image = Image.open(item_path).convert('RGBA')
else:
item_image = Image.open(os.path.join(self.assets_root, 'dota', 'item_rectangle', 'empty.png')) \
.convert('RGBA')
draw_image(composition,
item_image,
[hero_x + hero_width + (i + 1) * item_padding + i * item_width,
hero_y[player['player_slot']] + j * (item_height + item_padding)],
[None, item_height])
# Draw neutral item
if player["item_neutral"] != 0:
item = next((item for item in items if item.id == player["item_neutral"]), None)
if item is None:
short_name = 'error'
else:
short_name = item.name
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', short_name + '.png')
if not os.path.exists(item_path):
item_path = os.path.join(self.assets_root, 'dota', 'item_rectangle', 'default.png')
item_image = Image.open(item_path).convert('RGBA')
else:
item_image = Image.open(os.path.join(self.assets_root, 'dota', 'item_rectangle', 'empty.png')) \
.convert('RGBA')
draw_image(composition,
item_image,
[hero_x + hero_width + 5*item_padding + 3*item_width,
hero_y[player['player_slot']] + int((item_height + item_padding)/2)],
[None, item_height])
# Draw icons
sword_image = Image.open(os.path.join(self.assets_root, 'icons', 'sword.png')).convert('RGBA')
sword_image = sword_image.resize([int(item_height / 2), int(item_height / 2)], Image.LANCZOS)
in_place_sword = Image.new('RGBA', (composition.size[0], composition.size[1]))
in_place_sword.paste(sword_image,
box=[hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x),
hero_y[player['player_slot']] + item_height + 15],
mask=sword_image)
composition = Image.alpha_composite(composition, in_place_sword)
# Draw kda skull
skull_image = Image.open(os.path.join(self.assets_root, 'icons', 'skull.png')).convert('RGBA')
skull_image = skull_image.resize([int(item_height / 2), int(item_height / 2)], Image.LANCZOS)
in_place_skull = Image.new('RGBA', (composition.size[0], composition.size[1]))
in_place_skull.paste(skull_image,
box=[hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x),
hero_y[player['player_slot']] + 12],
mask=skull_image)
composition = Image.alpha_composite(composition, in_place_skull)
# Draw colors
image_draw = ImageDraw.Draw(composition)
for player in game_json['players']:
image_draw.rectangle([hero_x - hero_color_width,
hero_y[player['player_slot']],
hero_x,
hero_y[player['player_slot']] + hero_height - 1],
fill=hero_color[player['player_slot']])
# Draw player names & pseudo
for player in game_json['players']:
pro_player = next((pro_player for pro_player in players if pro_player.account_id == player['account_id']),
None)
player_name_font = rift_player_nickname
if pro_player is None:
name = '-'
nickname = '-'
else:
name = pro_player.name
nickname = pro_player.nickname
if not len(nickname) == len(nickname.encode()):
player_name_font = noto_cjk_player_nickname
draw_text_right_align(image_draw, [hero_x + player_name_x_padding,
hero_y[player['player_slot']] + player_name_y_padding],
nickname, player_name_font, fill=self.colors['white'])
draw_text_right_align(image_draw, [hero_x + player_name_x_padding,
hero_y[player[
'player_slot']] + player_name_y_padding + player_nickname_y_padding],
name, rift_player_name, fill=self.colors['white'])
kda = "{0}/{1}/{2}".format(player['kills'], player['deaths'], player['assists'])
image_draw.text([hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x) + int(
item_height / 2) + 3 * item_padding,
hero_y[player['player_slot']]],
text=kda, font=rift_kda, fill=self.colors['white'])
image_draw.text([hero_x + hero_width + 4 * (item_width + item_padding + kda_padding_x) + int(
item_height / 2) + 3 * item_padding,
hero_y[player['player_slot']] + item_height + item_padding],
text=str(player['hero_damage']), font=rift_dmg, fill=self.colors['white'])
# Draw graph
radiant_gold_adv = game_json['radiant_gold_adv']
radiant_xp_adv = game_json['radiant_xp_adv']
graph_start_x = 910
graph_end_x = 1850
graph_y = 400
graph_width = 4
graph_graduation_x = 10
gold_xp_max = 0
for item in chain(radiant_gold_adv, radiant_xp_adv):
if abs(item) > gold_xp_max: gold_xp_max = abs(item)
gold_xp_max = int((gold_xp_max - gold_xp_max % 1000) / 1000 + 1)
duration = math.ceil(game_json['duration'] / 60)
graph_x_step = math.floor((graph_end_x - graph_start_x) / duration)
graph_y_step = math.floor(graph_y / gold_xp_max)
image_draw.line([graph_start_x, 540 - int(graph_width / 2), graph_end_x, 540 - int(graph_width / 2)],
fill=self.colors['white'], width=graph_width)
image_draw.line(
[graph_start_x - int(graph_width / 2), 540 - graph_y, graph_start_x - int(graph_width / 2), 540 + graph_y],
fill=self.colors['white'], width=graph_width)
i = 5
while i < gold_xp_max:
image_draw.line([graph_start_x,
540 + graph_y_step * i,
graph_end_x,
540 + graph_y_step * i],
fill=self.colors['grey'], width=1)
image_draw.line([graph_start_x,
540 - graph_y_step * i,
graph_end_x,
540 - graph_y_step * i],
fill=self.colors['grey'], width=1)
i += 5
i = 5
while i < duration:
image_draw.line([graph_start_x + i * graph_x_step,
540 - graph_graduation_x - 2,
graph_start_x + i * graph_x_step,
540 + graph_graduation_x - 1],
fill=self.colors['white'], width=graph_width)
i += 5
for i in range(1, duration):
image_draw.line([graph_start_x + (i - 1) * graph_x_step,
540 - int(graph_y_step * (radiant_xp_adv[i - 1] / 1000)),
graph_start_x + i * graph_x_step,
540 - int(graph_y_step * (radiant_xp_adv[i] / 1000))], fill=self.colors['blue'], width=6)
image_draw.line([graph_start_x + (i - 1) * graph_x_step,
540 - int(graph_y_step * (radiant_gold_adv[i - 1] / 1000)),
graph_start_x + i * graph_x_step,
540 - int(graph_y_step * (radiant_gold_adv[i] / 1000))], fill=self.colors['yellow'],
width=6)
for objectif in game_json['objectives']:
objectif_x = 0
objectif_y = 0
image = 'error'
if objectif['type'] in ['CHAT_MESSAGE_COURIER_LOST', 'building_kill', 'CHAT_MESSAGE_ROSHAN_KILL']:
objectif_x = graph_start_x + int(graph_x_step * objectif['time'] / 60)
if objectif['type'] == 'CHAT_MESSAGE_COURIER_LOST':
image = 'chick_kill'
if objectif['team'] == 2:
objectif_y = 540 - graph_y - 35
else:
objectif_y = 540 + graph_y + 35
elif objectif['type'] == 'CHAT_MESSAGE_ROSHAN_KILL':
image = 'roshan_kill'
if objectif['team'] == 2:
objectif_y = 540 - graph_y - 35
else:
objectif_y = 540 + graph_y + 35
else:
if 'badguys' in objectif['key']:
objectif_y = 540 - graph_y - 35
else:
objectif_y = 540 + graph_y + 35
if 'tower' in objectif['key']:
image = 'tower_kill'
elif 'healers' in objectif['key']:
image = 'shrine_kill'
elif 'melee_rax' in objectif['key']:
image = 'rax_kill'
if image == 'error':
continue
image_icon = Image.open(os.path.join(self.assets_root, 'icons', image + '.png')).convert('RGBA')
composition = draw_image_centered(composition, image_icon, [objectif_x, objectif_y], [35, 35])
for player in game_json['players']:
for item_purchase in player['purchase_log']:
if item_purchase['key'] in ['black_king_bar', 'blink', 'sheepstick', 'silver_edge', 'refresher',
'orchid']:
if player['player_slot'] > 100:
item_y = 540 + graph_y
else:
item_y = 540 - graph_y
item_x = graph_start_x + int(graph_x_step * item_purchase['time'] / 60)
image_icon = Image.open(
os.path.join(self.assets_root, 'icons', 'item_' + item_purchase['key'] + '.png')).convert(
'RGBA')
composition = draw_image_centered(composition, image_icon, [item_x, item_y], [35, 35])
# Draw titles
image_draw = ImageDraw.Draw(composition)
radiant_team = '?'
dire_team = '?'
radiant_team_info = next((team for team in teams if team.id == game_json['radiant_team_id']), None)
if radiant_team_info is not None:
radiant_team = radiant_team_info.name
dire_team_info = next((team for team in teams if team.id == game_json['dire_team_id']), None)
if dire_team_info is not None:
dire_team = dire_team_info.name
radiant_color = self.colors['ti_green']
dire_color = self.colors['ti_green']
laurels_icon = Image.open(os.path.join(self.assets_root, 'icons', 'laurels.png')).convert('RGBA')
if game_json['radiant_win']:
laurels_x = [int((graph_start_x + graph_end_x - image_draw.textsize(radiant_team, rift_team)[0]) / 2) - 40,
int((graph_start_x + graph_end_x + image_draw.textsize(radiant_team, rift_team)[0]) / 2) + 40]
laurels_y = 45
dire_color = self.colors['grey']
else:
laurels_x = [int((graph_start_x + graph_end_x - image_draw.textsize(dire_team, rift_team)[0]) / 2) - 40,
int((graph_start_x + graph_end_x + image_draw.textsize(dire_team, rift_team)[0]) / 2) + 40]
laurels_y = 1035
radiant_color = self.colors['grey']
draw_text_outlined_center_align(image_draw, [int((graph_start_x + graph_end_x) / 2), 15], radiant_team,
font=rift_team, fill=radiant_color, outline_fill=self.colors['black'],
outline_width=4)
draw_text_outlined_center_align(image_draw, [int((graph_start_x + graph_end_x) / 2), 1005], dire_team,
font=rift_team, fill=dire_color, outline_fill=self.colors['black'],
outline_width=4)
composition = draw_image_centered(composition, laurels_icon, [laurels_x[0], laurels_y], [40, 40])
composition = draw_image_centered(composition, laurels_icon, [laurels_x[1], laurels_y], [40, 40])
composition.save(generated_path) | 0.314471 | 0.064418 |
import os, xml.dom.minidom, unicodedata, shutil, glob
cldrnames = {}
_document = xml.dom.minidom.parse("CLDR/annotations/en.xml")
for _i in _document.getElementsByTagName("annotation"):
if _i.hasAttribute("type") and _i.getAttribute("type") == "tts":
cldrnames[_i.getAttribute("cp")] = _i.firstChild.wholeText
_document = xml.dom.minidom.parse("CLDR/annotationsDerived/en.xml")
for _i in _document.getElementsByTagName("annotation"):
if _i.hasAttribute("type") and _i.getAttribute("type") == "tts":
cldrnames[_i.getAttribute("cp")] = _i.firstChild.wholeText
rcldrnames = dict(zip(cldrnames.values(), (i.casefold() for i in cldrnames.keys())))
for i in list(cldrnames.keys())[:]:
cldrnames[i.replace("\u200D", "")] = cldrnames[i]
def get_cldrname(ucs):
if ucs in cldrnames:
return cldrnames[ucs]
if len(ucs) == 1:
altname = unicodedata.name(ucs, None)
if altname:
altname = altname.title()
if altname.casefold() in rcldrnames:
altname = "Unicode " + altname
return altname
if len(ucs) > 1:
n = [get_cldrname(i) for i in ucs]
if None not in n:
ret = ", ".join(n)
if ret.endswith("skin tone"):
ret = ":".join(ret.rsplit(",", 1))
return ret
return None
print("Sorting out names")
for pn in glob.glob("**/*.svg", recursive=True):
if "node_modules" in pn or "other" in pn or "sprites" in pn:
continue
i = os.path.basename(pn)
if "draft" in i.casefold():
continue
ucs = "".join(chr(int(j, 16)) for j in os.path.splitext(i)[0].replace("-BW", "").split("-"))
document = xml.dom.minidom.parse(pn)
cldrname = get_cldrname(ucs)
if cldrname:
if not document.getElementsByTagName("title"):
title = document.createElement("title")
title.appendChild(document.createTextNode(cldrname))
document.documentElement.insertBefore(title, document.documentElement.firstChild)
document.documentElement.insertBefore(document.createTextNode("\n "), title)
#print("Adding name to", i)
else:
title = document.getElementsByTagName("title")[0]
if "," not in cldrname and title.firstChild.wholeText.strip().casefold() != cldrname.casefold():
for j in title.childNodes:
title.removeChild(j)
title.appendChild(document.createTextNode(cldrname))
#print("Updating name of", i)
if cldrname.startswith("Unicode "):
cfn = cldrname[8:].casefold()
collision = rcldrnames[cfn]
collision = "-".join("{:04x}".format(ord(j)) for j in collision)
comment = "CLDR {!r} is {}".format(cfn, collision)
print(comment)
if title.lastChild.nodeName != "#comment": # i.e. if not already added.
title.appendChild(document.createComment(comment))
shutil.move(pn, pn + "~")
with open(pn, "w") as f:
x = document.toxml().replace("<?xml version=\"1.0\" ?>", "")
f.write(x)
os.unlink(pn + "~") | generate_names.py |
import os, xml.dom.minidom, unicodedata, shutil, glob
cldrnames = {}
_document = xml.dom.minidom.parse("CLDR/annotations/en.xml")
for _i in _document.getElementsByTagName("annotation"):
if _i.hasAttribute("type") and _i.getAttribute("type") == "tts":
cldrnames[_i.getAttribute("cp")] = _i.firstChild.wholeText
_document = xml.dom.minidom.parse("CLDR/annotationsDerived/en.xml")
for _i in _document.getElementsByTagName("annotation"):
if _i.hasAttribute("type") and _i.getAttribute("type") == "tts":
cldrnames[_i.getAttribute("cp")] = _i.firstChild.wholeText
rcldrnames = dict(zip(cldrnames.values(), (i.casefold() for i in cldrnames.keys())))
for i in list(cldrnames.keys())[:]:
cldrnames[i.replace("\u200D", "")] = cldrnames[i]
def get_cldrname(ucs):
if ucs in cldrnames:
return cldrnames[ucs]
if len(ucs) == 1:
altname = unicodedata.name(ucs, None)
if altname:
altname = altname.title()
if altname.casefold() in rcldrnames:
altname = "Unicode " + altname
return altname
if len(ucs) > 1:
n = [get_cldrname(i) for i in ucs]
if None not in n:
ret = ", ".join(n)
if ret.endswith("skin tone"):
ret = ":".join(ret.rsplit(",", 1))
return ret
return None
print("Sorting out names")
for pn in glob.glob("**/*.svg", recursive=True):
if "node_modules" in pn or "other" in pn or "sprites" in pn:
continue
i = os.path.basename(pn)
if "draft" in i.casefold():
continue
ucs = "".join(chr(int(j, 16)) for j in os.path.splitext(i)[0].replace("-BW", "").split("-"))
document = xml.dom.minidom.parse(pn)
cldrname = get_cldrname(ucs)
if cldrname:
if not document.getElementsByTagName("title"):
title = document.createElement("title")
title.appendChild(document.createTextNode(cldrname))
document.documentElement.insertBefore(title, document.documentElement.firstChild)
document.documentElement.insertBefore(document.createTextNode("\n "), title)
#print("Adding name to", i)
else:
title = document.getElementsByTagName("title")[0]
if "," not in cldrname and title.firstChild.wholeText.strip().casefold() != cldrname.casefold():
for j in title.childNodes:
title.removeChild(j)
title.appendChild(document.createTextNode(cldrname))
#print("Updating name of", i)
if cldrname.startswith("Unicode "):
cfn = cldrname[8:].casefold()
collision = rcldrnames[cfn]
collision = "-".join("{:04x}".format(ord(j)) for j in collision)
comment = "CLDR {!r} is {}".format(cfn, collision)
print(comment)
if title.lastChild.nodeName != "#comment": # i.e. if not already added.
title.appendChild(document.createComment(comment))
shutil.move(pn, pn + "~")
with open(pn, "w") as f:
x = document.toxml().replace("<?xml version=\"1.0\" ?>", "")
f.write(x)
os.unlink(pn + "~") | 0.128116 | 0.094177 |
import torch
from torch import nn
class BatchNormConv1d(nn.Module):
r"""A wrapper for Conv1d with BatchNorm. It sets the activation
function between Conv and BatchNorm layers. BatchNorm layer
is initialized with the TF default values for momentum and eps.
Args:
in_channels: size of each input sample
out_channels: size of each output samples
kernel_size: kernel size of conv filters
stride: stride of conv filters
padding: padding of conv filters
activation: activation function set b/w Conv1d and BatchNorm
Shapes:
- input: batch x dims
- output: batch x dims
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
activation=None):
super(BatchNormConv1d, self).__init__()
self.padding = padding
self.padder = nn.ConstantPad1d(padding, 0)
self.conv1d = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=0,
bias=False)
# Following tensorflow's default parameters
self.bn = nn.BatchNorm1d(out_channels, momentum=0.99, eps=1e-3)
self.activation = activation
# self.init_layers()
def init_layers(self):
if type(self.activation) == torch.nn.ReLU:
w_gain = 'relu'
elif type(self.activation) == torch.nn.Tanh:
w_gain = 'tanh'
elif self.activation is None:
w_gain = 'linear'
else:
raise RuntimeError('Unknown activation function')
torch.nn.init.xavier_uniform_(
self.conv1d.weight, gain=torch.nn.init.calculate_gain(w_gain))
def forward(self, x):
x = self.padder(x)
x = self.conv1d(x)
x = self.bn(x)
if self.activation is not None:
x = self.activation(x)
return x
class Highway(nn.Module):
# TODO: Try GLU layer
def __init__(self, in_size, out_size):
super(Highway, self).__init__()
self.H = nn.Linear(in_size, out_size)
self.H.bias.data.zero_()
self.T = nn.Linear(in_size, out_size)
self.T.bias.data.fill_(-1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
# self.init_layers()
def init_layers(self):
torch.nn.init.xavier_uniform_(
self.H.weight, gain=torch.nn.init.calculate_gain('relu'))
torch.nn.init.xavier_uniform_(
self.T.weight, gain=torch.nn.init.calculate_gain('sigmoid'))
def forward(self, inputs):
H = self.relu(self.H(inputs))
T = self.sigmoid(self.T(inputs))
return H * T + inputs * (1.0 - T)
class CBHG(nn.Module):
"""CBHG module: a recurrent neural network composed of:
- 1-d convolution banks
- Highway networks + residual connections
- Bidirectional gated recurrent units
Args:
in_features (int): sample size
K (int): max filter size in conv bank
projections (list): conv channel sizes for conv projections
num_highways (int): number of highways layers
Shapes:
- input: B x D x T_in
- output: B x T_in x D*2
"""
def __init__(self,
in_features,
K=16,
conv_bank_features=128,
conv_projections=[128, 128],
highway_features=128,
gru_features=128,
num_highways=4):
super(CBHG, self).__init__()
self.in_features = in_features
self.conv_bank_features = conv_bank_features
self.highway_features = highway_features
self.gru_features = gru_features
self.conv_projections = conv_projections
self.relu = nn.ReLU()
# list of conv1d bank with filter size k=1...K
# TODO: try dilational layers instead
self.conv1d_banks = nn.ModuleList([
BatchNormConv1d(in_features,
conv_bank_features,
kernel_size=k,
stride=1,
padding=[(k - 1) // 2, k // 2],
activation=self.relu) for k in range(1, K + 1)
])
# max pooling of conv bank, with padding
# TODO: try average pooling OR larger kernel size
out_features = [K * conv_bank_features] + conv_projections[:-1]
activations = [self.relu] * (len(conv_projections) - 1)
activations += [None]
# setup conv1d projection layers
layer_set = []
for (in_size, out_size, ac) in zip(out_features, conv_projections,
activations):
layer = BatchNormConv1d(in_size,
out_size,
kernel_size=3,
stride=1,
padding=[1, 1],
activation=ac)
layer_set.append(layer)
self.conv1d_projections = nn.ModuleList(layer_set)
# setup Highway layers
if self.highway_features != conv_projections[-1]:
self.pre_highway = nn.Linear(conv_projections[-1],
highway_features,
bias=False)
self.highways = nn.ModuleList([
Highway(highway_features, highway_features)
for _ in range(num_highways)
])
# bi-directional GPU layer
self.gru = nn.GRU(gru_features,
gru_features,
1,
batch_first=True,
bidirectional=True)
def forward(self, inputs):
# (B, in_features, T_in)
x = inputs
# (B, hid_features*K, T_in)
# Concat conv1d bank outputs
outs = []
for conv1d in self.conv1d_banks:
out = conv1d(x)
outs.append(out)
x = torch.cat(outs, dim=1)
assert x.size(1) == self.conv_bank_features * len(self.conv1d_banks)
for conv1d in self.conv1d_projections:
x = conv1d(x)
x += inputs
x = x.transpose(1, 2)
if self.highway_features != self.conv_projections[-1]:
x = self.pre_highway(x)
# Residual connection
# TODO: try residual scaling as in Deep Voice 3
# TODO: try plain residual layers
for highway in self.highways:
x = highway(x)
# (B, T_in, hid_features*2)
# TODO: replace GRU with convolution as in Deep Voice 3
self.gru.flatten_parameters()
outputs, _ = self.gru(x)
return outputs
if __name__ == '__main__':
from cn_tacotron.text import label_to_sequence
import numpy as np
data = np.asanyarray(label_to_sequence("datasets/labels/000001.lab"))
input_data = torch.LongTensor(data)
model = CBHG(113)
model(input_data) | cn_tacotron/cbgh.py | import torch
from torch import nn
class BatchNormConv1d(nn.Module):
r"""A wrapper for Conv1d with BatchNorm. It sets the activation
function between Conv and BatchNorm layers. BatchNorm layer
is initialized with the TF default values for momentum and eps.
Args:
in_channels: size of each input sample
out_channels: size of each output samples
kernel_size: kernel size of conv filters
stride: stride of conv filters
padding: padding of conv filters
activation: activation function set b/w Conv1d and BatchNorm
Shapes:
- input: batch x dims
- output: batch x dims
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
activation=None):
super(BatchNormConv1d, self).__init__()
self.padding = padding
self.padder = nn.ConstantPad1d(padding, 0)
self.conv1d = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=0,
bias=False)
# Following tensorflow's default parameters
self.bn = nn.BatchNorm1d(out_channels, momentum=0.99, eps=1e-3)
self.activation = activation
# self.init_layers()
def init_layers(self):
if type(self.activation) == torch.nn.ReLU:
w_gain = 'relu'
elif type(self.activation) == torch.nn.Tanh:
w_gain = 'tanh'
elif self.activation is None:
w_gain = 'linear'
else:
raise RuntimeError('Unknown activation function')
torch.nn.init.xavier_uniform_(
self.conv1d.weight, gain=torch.nn.init.calculate_gain(w_gain))
def forward(self, x):
x = self.padder(x)
x = self.conv1d(x)
x = self.bn(x)
if self.activation is not None:
x = self.activation(x)
return x
class Highway(nn.Module):
# TODO: Try GLU layer
def __init__(self, in_size, out_size):
super(Highway, self).__init__()
self.H = nn.Linear(in_size, out_size)
self.H.bias.data.zero_()
self.T = nn.Linear(in_size, out_size)
self.T.bias.data.fill_(-1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
# self.init_layers()
def init_layers(self):
torch.nn.init.xavier_uniform_(
self.H.weight, gain=torch.nn.init.calculate_gain('relu'))
torch.nn.init.xavier_uniform_(
self.T.weight, gain=torch.nn.init.calculate_gain('sigmoid'))
def forward(self, inputs):
H = self.relu(self.H(inputs))
T = self.sigmoid(self.T(inputs))
return H * T + inputs * (1.0 - T)
class CBHG(nn.Module):
"""CBHG module: a recurrent neural network composed of:
- 1-d convolution banks
- Highway networks + residual connections
- Bidirectional gated recurrent units
Args:
in_features (int): sample size
K (int): max filter size in conv bank
projections (list): conv channel sizes for conv projections
num_highways (int): number of highways layers
Shapes:
- input: B x D x T_in
- output: B x T_in x D*2
"""
def __init__(self,
in_features,
K=16,
conv_bank_features=128,
conv_projections=[128, 128],
highway_features=128,
gru_features=128,
num_highways=4):
super(CBHG, self).__init__()
self.in_features = in_features
self.conv_bank_features = conv_bank_features
self.highway_features = highway_features
self.gru_features = gru_features
self.conv_projections = conv_projections
self.relu = nn.ReLU()
# list of conv1d bank with filter size k=1...K
# TODO: try dilational layers instead
self.conv1d_banks = nn.ModuleList([
BatchNormConv1d(in_features,
conv_bank_features,
kernel_size=k,
stride=1,
padding=[(k - 1) // 2, k // 2],
activation=self.relu) for k in range(1, K + 1)
])
# max pooling of conv bank, with padding
# TODO: try average pooling OR larger kernel size
out_features = [K * conv_bank_features] + conv_projections[:-1]
activations = [self.relu] * (len(conv_projections) - 1)
activations += [None]
# setup conv1d projection layers
layer_set = []
for (in_size, out_size, ac) in zip(out_features, conv_projections,
activations):
layer = BatchNormConv1d(in_size,
out_size,
kernel_size=3,
stride=1,
padding=[1, 1],
activation=ac)
layer_set.append(layer)
self.conv1d_projections = nn.ModuleList(layer_set)
# setup Highway layers
if self.highway_features != conv_projections[-1]:
self.pre_highway = nn.Linear(conv_projections[-1],
highway_features,
bias=False)
self.highways = nn.ModuleList([
Highway(highway_features, highway_features)
for _ in range(num_highways)
])
# bi-directional GPU layer
self.gru = nn.GRU(gru_features,
gru_features,
1,
batch_first=True,
bidirectional=True)
def forward(self, inputs):
# (B, in_features, T_in)
x = inputs
# (B, hid_features*K, T_in)
# Concat conv1d bank outputs
outs = []
for conv1d in self.conv1d_banks:
out = conv1d(x)
outs.append(out)
x = torch.cat(outs, dim=1)
assert x.size(1) == self.conv_bank_features * len(self.conv1d_banks)
for conv1d in self.conv1d_projections:
x = conv1d(x)
x += inputs
x = x.transpose(1, 2)
if self.highway_features != self.conv_projections[-1]:
x = self.pre_highway(x)
# Residual connection
# TODO: try residual scaling as in Deep Voice 3
# TODO: try plain residual layers
for highway in self.highways:
x = highway(x)
# (B, T_in, hid_features*2)
# TODO: replace GRU with convolution as in Deep Voice 3
self.gru.flatten_parameters()
outputs, _ = self.gru(x)
return outputs
if __name__ == '__main__':
from cn_tacotron.text import label_to_sequence
import numpy as np
data = np.asanyarray(label_to_sequence("datasets/labels/000001.lab"))
input_data = torch.LongTensor(data)
model = CBHG(113)
model(input_data) | 0.838316 | 0.648209 |
import pytest
def test_audit_antibody_mismatched_in_review(testapp,
base_antibody_characterization,
inconsistent_biosample_type):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list[0]['biosample_ontology'] = inconsistent_biosample_type['uuid']
testapp.patch_json(base_antibody_characterization['@id'],
{'characterization_reviews': characterization_review_list})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent ontology term' for error in errors_list)
def test_audit_antibody_duplicate_review_subobject(testapp, base_antibody_characterization, base_characterization_review, base_document):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list.append(base_characterization_review)
testapp.patch_json(base_antibody_characterization['@id'], {'characterization_reviews': characterization_review_list})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'duplicate lane review' for error in errors_list)
def test_audit_antibody_target_mismatch(testapp, base_antibody_characterization, base_target):
testapp.patch_json(base_antibody_characterization['@id'], {'target': base_target['@id']})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent target' for error in errors_list)
def test_audit_antibody_not_tag_antibody(testapp, base_antibody_characterization, recombinant_target):
testapp.patch_json(base_antibody_characterization['@id'], {'target': recombinant_target['@id']})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'not tagged antibody' for error in errors_list)
def test_audit_antibody_target_tag_antibody(testapp, base_antibody_characterization, base_antibody, recombinant_target, tag_target):
base_antibody['targets'] = [tag_target['@id']]
tag_antibody = testapp.post_json('/antibody_lot', base_antibody).json['@graph'][0]
testapp.patch_json(base_antibody_characterization['@id'], {'target': recombinant_target['@id'], 'characterizes': tag_antibody['@id']})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched tag target' for error in errors_list)
def test_audit_antibody_lane_status_pending_mismatch1(testapp, base_antibody_characterization, base_antibody, wrangler, standards_document):
reviewed_by = "/users/" + wrangler['uuid'] + "/"
testapp.patch_json(base_antibody_characterization['@id'], {'status': 'compliant', 'reviewed_by': reviewed_by, 'documents': [standards_document['uuid']]})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched lane status' for error in errors_list)
def test_audit_antibody_lane_status_pending_mismatch2(testapp, base_antibody_characterization, base_antibody, ):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list[0]['lane_status'] = 'compliant'
testapp.patch_json(base_antibody_characterization['@id'], {'characterization_reviews': characterization_review_list, 'primary_characterization_method': 'immunoblot', 'status': 'pending dcc review'})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched lane status' for error in errors_list)
def test_audit_antibody_lane_status_compliant_mismatch(testapp, base_antibody_characterization, base_antibody, base_characterization_review2, wrangler, standards_document):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list[0]['lane_status'] = 'not compliant'
characterization_review_list.append(base_characterization_review2)
reviewed_by = "/users/" + wrangler['uuid'] + "/"
testapp.patch_json(base_antibody_characterization['@id'], {'characterization_reviews': characterization_review_list, 'status': 'not compliant', 'reviewed_by': reviewed_by, 'documents': [standards_document['uuid']]})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched lane status' for error in errors_list) | src/encoded/tests/test_audit_antibody_characterization.py | import pytest
def test_audit_antibody_mismatched_in_review(testapp,
base_antibody_characterization,
inconsistent_biosample_type):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list[0]['biosample_ontology'] = inconsistent_biosample_type['uuid']
testapp.patch_json(base_antibody_characterization['@id'],
{'characterization_reviews': characterization_review_list})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent ontology term' for error in errors_list)
def test_audit_antibody_duplicate_review_subobject(testapp, base_antibody_characterization, base_characterization_review, base_document):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list.append(base_characterization_review)
testapp.patch_json(base_antibody_characterization['@id'], {'characterization_reviews': characterization_review_list})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'duplicate lane review' for error in errors_list)
def test_audit_antibody_target_mismatch(testapp, base_antibody_characterization, base_target):
testapp.patch_json(base_antibody_characterization['@id'], {'target': base_target['@id']})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent target' for error in errors_list)
def test_audit_antibody_not_tag_antibody(testapp, base_antibody_characterization, recombinant_target):
testapp.patch_json(base_antibody_characterization['@id'], {'target': recombinant_target['@id']})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'not tagged antibody' for error in errors_list)
def test_audit_antibody_target_tag_antibody(testapp, base_antibody_characterization, base_antibody, recombinant_target, tag_target):
base_antibody['targets'] = [tag_target['@id']]
tag_antibody = testapp.post_json('/antibody_lot', base_antibody).json['@graph'][0]
testapp.patch_json(base_antibody_characterization['@id'], {'target': recombinant_target['@id'], 'characterizes': tag_antibody['@id']})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched tag target' for error in errors_list)
def test_audit_antibody_lane_status_pending_mismatch1(testapp, base_antibody_characterization, base_antibody, wrangler, standards_document):
reviewed_by = "/users/" + wrangler['uuid'] + "/"
testapp.patch_json(base_antibody_characterization['@id'], {'status': 'compliant', 'reviewed_by': reviewed_by, 'documents': [standards_document['uuid']]})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched lane status' for error in errors_list)
def test_audit_antibody_lane_status_pending_mismatch2(testapp, base_antibody_characterization, base_antibody, ):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list[0]['lane_status'] = 'compliant'
testapp.patch_json(base_antibody_characterization['@id'], {'characterization_reviews': characterization_review_list, 'primary_characterization_method': 'immunoblot', 'status': 'pending dcc review'})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched lane status' for error in errors_list)
def test_audit_antibody_lane_status_compliant_mismatch(testapp, base_antibody_characterization, base_antibody, base_characterization_review2, wrangler, standards_document):
characterization_review_list = base_antibody_characterization.get('characterization_reviews')
characterization_review_list[0]['lane_status'] = 'not compliant'
characterization_review_list.append(base_characterization_review2)
reviewed_by = "/users/" + wrangler['uuid'] + "/"
testapp.patch_json(base_antibody_characterization['@id'], {'characterization_reviews': characterization_review_list, 'status': 'not compliant', 'reviewed_by': reviewed_by, 'documents': [standards_document['uuid']]})
res = testapp.get(base_antibody_characterization['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'mismatched lane status' for error in errors_list) | 0.389547 | 0.274353 |
# imported necessary library
import tkinter
from tkinter import *
import tkinter as tk
import tkinter.messagebox as mbox
import re
# created main window
window = Tk()
window.geometry("1000x700")
window.title("Credit Card Numbers Authentication")
# ------------------ this is for adding gif image in the main window of application ---------------------------------------
frameCnt = 3
frames = [PhotoImage(file='Images/card.gif',format = 'gif -index %i' %(i)) for i in range(frameCnt)]
cnt = 0.0
def update(ind):
global cnt
frame = frames[ind]
if(cnt == 1.0):
cnt = 0
cnt = cnt + 0.2
ind += int(cnt)
if ind == frameCnt:
ind = 0
label.configure(image=frame)
window.after(100, update, ind)
label = Label(window)
label.place(x = 120, y = 80)
window.after(0, update, 0)
# --------------------------------------------------------------------
# function for showing validity rules
def valid_fun():
mbox.showinfo("Authentication Rules","A valid Credit Card Number need to fulfill following requirements :\n\n1.) Must start with 4, 5 or 6.\n\n2.) Must contains exactly 16 digits.\n\n3.) Must consist of only digits from 0 - 9.\n\n4.) Must have digits in groups of 4, separated by a hyphen '-'.\n\n5.) Must NOT use any other separator like ' ' , '_', etc.\n\n6.) Must not have 4 or more consecutive digits.")
# function to check authenticity of credit card number
def authenticate_fun():
global valid
entered_card = card_entry.get().strip()
pre_match = re.search(r'^[456]\d{3}(-?)\d{4}\1\d{4}\1\d{4}$', entered_card)
if pre_match:
processed_string = "".join(pre_match.group(0).split('-'))
final_match = re.search(r'(\d)\1{3,}', processed_string)
if final_match:
mbox.showerror("Authentication Error", "Entered Credit Card Number is NOT Authenticated.")
else:
mbox.showinfo("Authentication Success", "Entered Credit Card Number is Authenticated.")
else:
mbox.showerror("Authentication Error", "Entered Credit Card Number is NOT Authenticated.")
# top label
start1 = tk.Label(text = "Credit Card Numbers Authentication", font=("Arial", 45), fg="magenta") # same way bg
start1.place(x = 20, y = 10)
# Button for rules
vpcrb = Button(window, text="AUTHENTICATION RULES",command=valid_fun,font=("Arial", 25), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
vpcrb.place(x =250 , y =450 )
# label for Entering credit card number ---------------------------------------------------------------------------------
sel_label = tk.Label(text = "Enter Credit Card Number : ", font=("Arial", 27), fg="brown") # same way bg
sel_label.place(x = 30, y = 540)
# Created Entry Box
card_entry = Entry(window, font=("Arial", 30), fg='orange', bg="light yellow", borderwidth=3, width=20)
card_entry.place(x=500, y=535)
# created Authenticate Button
authenticateb = Button(window, text="AUTHENTICATE",command=authenticate_fun,font=("Arial", 25), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
authenticateb.place(x =80 , y =600 )
# function for clearing the entry box
def clear_entry():
card_entry.delete(0,END)
# created clear button
clearb = Button(window, text="CLEAR",command=clear_entry,font=("Arial", 25), bg = "orange", fg = "blue", borderwidth=3, relief="raised")
clearb.place(x =510 , y =600 )
# function for exiting
def exit_win():
if mbox.askokcancel("Exit", "Do you want to exit?"):
window.destroy()
# created exit button
exitb = Button(window, text="EXIT",command=exit_win,font=("Arial", 25), bg = "red", fg = "blue", borderwidth=3, relief="raised")
exitb.place(x =780 , y =600 )
window.protocol("WM_DELETE_WINDOW", exit_win)
window.mainloop() | GUIScripts/Credit Card Numbers Authentication/credit_card_numbers_authentication.py |
# imported necessary library
import tkinter
from tkinter import *
import tkinter as tk
import tkinter.messagebox as mbox
import re
# created main window
window = Tk()
window.geometry("1000x700")
window.title("Credit Card Numbers Authentication")
# ------------------ this is for adding gif image in the main window of application ---------------------------------------
frameCnt = 3
frames = [PhotoImage(file='Images/card.gif',format = 'gif -index %i' %(i)) for i in range(frameCnt)]
cnt = 0.0
def update(ind):
global cnt
frame = frames[ind]
if(cnt == 1.0):
cnt = 0
cnt = cnt + 0.2
ind += int(cnt)
if ind == frameCnt:
ind = 0
label.configure(image=frame)
window.after(100, update, ind)
label = Label(window)
label.place(x = 120, y = 80)
window.after(0, update, 0)
# --------------------------------------------------------------------
# function for showing validity rules
def valid_fun():
mbox.showinfo("Authentication Rules","A valid Credit Card Number need to fulfill following requirements :\n\n1.) Must start with 4, 5 or 6.\n\n2.) Must contains exactly 16 digits.\n\n3.) Must consist of only digits from 0 - 9.\n\n4.) Must have digits in groups of 4, separated by a hyphen '-'.\n\n5.) Must NOT use any other separator like ' ' , '_', etc.\n\n6.) Must not have 4 or more consecutive digits.")
# function to check authenticity of credit card number
def authenticate_fun():
global valid
entered_card = card_entry.get().strip()
pre_match = re.search(r'^[456]\d{3}(-?)\d{4}\1\d{4}\1\d{4}$', entered_card)
if pre_match:
processed_string = "".join(pre_match.group(0).split('-'))
final_match = re.search(r'(\d)\1{3,}', processed_string)
if final_match:
mbox.showerror("Authentication Error", "Entered Credit Card Number is NOT Authenticated.")
else:
mbox.showinfo("Authentication Success", "Entered Credit Card Number is Authenticated.")
else:
mbox.showerror("Authentication Error", "Entered Credit Card Number is NOT Authenticated.")
# top label
start1 = tk.Label(text = "Credit Card Numbers Authentication", font=("Arial", 45), fg="magenta") # same way bg
start1.place(x = 20, y = 10)
# Button for rules
vpcrb = Button(window, text="AUTHENTICATION RULES",command=valid_fun,font=("Arial", 25), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
vpcrb.place(x =250 , y =450 )
# label for Entering credit card number ---------------------------------------------------------------------------------
sel_label = tk.Label(text = "Enter Credit Card Number : ", font=("Arial", 27), fg="brown") # same way bg
sel_label.place(x = 30, y = 540)
# Created Entry Box
card_entry = Entry(window, font=("Arial", 30), fg='orange', bg="light yellow", borderwidth=3, width=20)
card_entry.place(x=500, y=535)
# created Authenticate Button
authenticateb = Button(window, text="AUTHENTICATE",command=authenticate_fun,font=("Arial", 25), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
authenticateb.place(x =80 , y =600 )
# function for clearing the entry box
def clear_entry():
card_entry.delete(0,END)
# created clear button
clearb = Button(window, text="CLEAR",command=clear_entry,font=("Arial", 25), bg = "orange", fg = "blue", borderwidth=3, relief="raised")
clearb.place(x =510 , y =600 )
# function for exiting
def exit_win():
if mbox.askokcancel("Exit", "Do you want to exit?"):
window.destroy()
# created exit button
exitb = Button(window, text="EXIT",command=exit_win,font=("Arial", 25), bg = "red", fg = "blue", borderwidth=3, relief="raised")
exitb.place(x =780 , y =600 )
window.protocol("WM_DELETE_WINDOW", exit_win)
window.mainloop() | 0.441191 | 0.15633 |
import inspect
from collections import namedtuple
from operator import attrgetter
import znc
Command = namedtuple("Command", "name func min_args max_args syntax help_msg include_cmd admin")
def command(name, min_args=0, max_args=None, syntax=None, help_msg=None, include_cmd=False, admin=False):
def _decorate(func):
nonlocal help_msg, syntax
try:
func_doc = func.__doc__
except AttributeError:
func_doc = None
if func_doc is None:
func_doc = ""
func_doc = inspect.cleandoc(func_doc).splitlines()
if help_msg is None and func_doc:
help_msg = func_doc.pop(0)
if syntax is None and func_doc:
syntax = func_doc.pop(0)
try:
handlers = func._cmd_handlers
except AttributeError:
handlers = []
func._cmd_handlers = handlers
handlers.append(Command(name, func, min_args, max_args, syntax, help_msg, include_cmd, admin))
return func
return _decorate
class SnooModule(znc.Module):
def __init__(self):
super().__init__()
self.cmd_handlers = self.find_cmds()
@classmethod
def find_cmds(cls):
handlers = []
classes = {cls, *cls.__bases__}
funcs = [obj for c in classes for obj in c.__dict__.values() if callable(obj)]
for obj in funcs:
try:
handlers.extend(obj._cmd_handlers)
except AttributeError:
pass
return {handler.name: handler for handler in handlers}
@command("help")
def cmd_help(self):
"""Returns help documentation for this module"""
help_table = znc.CTable()
help_table.AddColumn("Command")
help_table.AddColumn("Arguments")
help_table.AddColumn("Description")
for cmd in sorted(self.cmd_handlers.values(), key=attrgetter("name")):
help_table.AddRow()
help_table.SetCell("Command", cmd.name)
help_table.SetCell("Arguments", cmd.syntax or "")
help_table.SetCell("Description", cmd.help_msg or "")
return help_table
def OnModCommand(self, text):
cmd, *args = text.strip().split()
cmd = self.cmd_handlers.get(cmd.lower())
if not cmd:
self.PutModule("Unknown command")
return
user = self.GetUser()
if cmd.admin and not user.IsAdmin():
self.PutModule("Permission denied")
if len(args) < cmd.min_args:
self.PutModule("Invalid arguments for command")
return
args = list(args)
max_args = cmd.max_args
if max_args is None:
max_args = cmd.min_args
if max_args == 0:
args = []
if len(args) > max_args:
args[max_args] = " ".join(args[max_args:])
del args[max_args + 1:]
if cmd.include_cmd:
args = [cmd.name] + args
result = cmd.func(self, *args)
if result:
if isinstance(result, tuple):
for part in result:
self.PutModule(part)
else:
self.PutModule(result)
@property
def znc_core(self):
return znc.CZNC.Get() | snoomodule.py | import inspect
from collections import namedtuple
from operator import attrgetter
import znc
Command = namedtuple("Command", "name func min_args max_args syntax help_msg include_cmd admin")
def command(name, min_args=0, max_args=None, syntax=None, help_msg=None, include_cmd=False, admin=False):
def _decorate(func):
nonlocal help_msg, syntax
try:
func_doc = func.__doc__
except AttributeError:
func_doc = None
if func_doc is None:
func_doc = ""
func_doc = inspect.cleandoc(func_doc).splitlines()
if help_msg is None and func_doc:
help_msg = func_doc.pop(0)
if syntax is None and func_doc:
syntax = func_doc.pop(0)
try:
handlers = func._cmd_handlers
except AttributeError:
handlers = []
func._cmd_handlers = handlers
handlers.append(Command(name, func, min_args, max_args, syntax, help_msg, include_cmd, admin))
return func
return _decorate
class SnooModule(znc.Module):
def __init__(self):
super().__init__()
self.cmd_handlers = self.find_cmds()
@classmethod
def find_cmds(cls):
handlers = []
classes = {cls, *cls.__bases__}
funcs = [obj for c in classes for obj in c.__dict__.values() if callable(obj)]
for obj in funcs:
try:
handlers.extend(obj._cmd_handlers)
except AttributeError:
pass
return {handler.name: handler for handler in handlers}
@command("help")
def cmd_help(self):
"""Returns help documentation for this module"""
help_table = znc.CTable()
help_table.AddColumn("Command")
help_table.AddColumn("Arguments")
help_table.AddColumn("Description")
for cmd in sorted(self.cmd_handlers.values(), key=attrgetter("name")):
help_table.AddRow()
help_table.SetCell("Command", cmd.name)
help_table.SetCell("Arguments", cmd.syntax or "")
help_table.SetCell("Description", cmd.help_msg or "")
return help_table
def OnModCommand(self, text):
cmd, *args = text.strip().split()
cmd = self.cmd_handlers.get(cmd.lower())
if not cmd:
self.PutModule("Unknown command")
return
user = self.GetUser()
if cmd.admin and not user.IsAdmin():
self.PutModule("Permission denied")
if len(args) < cmd.min_args:
self.PutModule("Invalid arguments for command")
return
args = list(args)
max_args = cmd.max_args
if max_args is None:
max_args = cmd.min_args
if max_args == 0:
args = []
if len(args) > max_args:
args[max_args] = " ".join(args[max_args:])
del args[max_args + 1:]
if cmd.include_cmd:
args = [cmd.name] + args
result = cmd.func(self, *args)
if result:
if isinstance(result, tuple):
for part in result:
self.PutModule(part)
else:
self.PutModule(result)
@property
def znc_core(self):
return znc.CZNC.Get() | 0.456894 | 0.066782 |
__author__ = '<NAME>'
from pandas import (
concat,
read_csv,
Series
)
from sklearn.tree import DecisionTreeClassifier
class Titanic(object):
titanic_data = None
def __init__(self, titanic_csv):
self.titanic_data = read_csv(titanic_csv, index_col='PassengerId')
def _percents(self, field):
return self.titanic_data.groupby(field).size().apply(
lambda x: float(x) / self.titanic_data.groupby(field).size().sum() * 100
)
@property
def titanic_sex(self):
return '{male} {female}'.format(**self.titanic_data['Sex'].value_counts())
@property
def survived_percentage(self):
return '{0:.2f}'.format(self._percents('Survived')[1])
@property
def first_class_percentage(self):
return '{0:.2f}'.format(self._percents('Pclass')[1])
@property
def std_and_mean_for_age(self):
ages = self.titanic_data['Age'].dropna()
return '{0:.2f} {1:.2f}'.format(
ages.mean(),
ages.median()
)
@property
def correlation_sibsp_parch(self):
return '{0:.2f}'.format(
self.titanic_data['SibSp'].corr(self.titanic_data['Parch'])
)
def _get_first_name(self, full_name):
try:
return full_name.split('(')[1].replace(')', ' ').split(' ')[0].replace('"', '')
except IndexError:
try:
return full_name.split('Miss. ')[1].split(' ')[0].replace('"', '')
except IndexError:
try:
return full_name.split('Mrs. ')[1].split(' ')[0].replace('"', '')
except IndexError:
return None
@property
def most_popular_female_name(self):
return self.titanic_data.groupby(
self.titanic_data[self.titanic_data.Sex == 'female']['Name'].apply(self._get_first_name)
).count().idxmax().Name
@property
def survival_criteria(self):
values = self.titanic_data[['Pclass', 'Fare', 'Age', 'Sex', 'Survived']].dropna()
values['IsMale'] = values.Sex == 'male'
clf = DecisionTreeClassifier(random_state=241)
clf.fit(values[['Pclass', 'Fare', 'Age', 'IsMale']], values['Survived'])
criteria = concat([
Series(['Pclass', 'Fare', 'Age', 'Sex']),
Series(clf.feature_importances_)
], axis=1).sort_values(by=1, ascending=False)[:2][0]
return ' '.join(criteria) | titanic/__init__.py |
__author__ = '<NAME>'
from pandas import (
concat,
read_csv,
Series
)
from sklearn.tree import DecisionTreeClassifier
class Titanic(object):
titanic_data = None
def __init__(self, titanic_csv):
self.titanic_data = read_csv(titanic_csv, index_col='PassengerId')
def _percents(self, field):
return self.titanic_data.groupby(field).size().apply(
lambda x: float(x) / self.titanic_data.groupby(field).size().sum() * 100
)
@property
def titanic_sex(self):
return '{male} {female}'.format(**self.titanic_data['Sex'].value_counts())
@property
def survived_percentage(self):
return '{0:.2f}'.format(self._percents('Survived')[1])
@property
def first_class_percentage(self):
return '{0:.2f}'.format(self._percents('Pclass')[1])
@property
def std_and_mean_for_age(self):
ages = self.titanic_data['Age'].dropna()
return '{0:.2f} {1:.2f}'.format(
ages.mean(),
ages.median()
)
@property
def correlation_sibsp_parch(self):
return '{0:.2f}'.format(
self.titanic_data['SibSp'].corr(self.titanic_data['Parch'])
)
def _get_first_name(self, full_name):
try:
return full_name.split('(')[1].replace(')', ' ').split(' ')[0].replace('"', '')
except IndexError:
try:
return full_name.split('Miss. ')[1].split(' ')[0].replace('"', '')
except IndexError:
try:
return full_name.split('Mrs. ')[1].split(' ')[0].replace('"', '')
except IndexError:
return None
@property
def most_popular_female_name(self):
return self.titanic_data.groupby(
self.titanic_data[self.titanic_data.Sex == 'female']['Name'].apply(self._get_first_name)
).count().idxmax().Name
@property
def survival_criteria(self):
values = self.titanic_data[['Pclass', 'Fare', 'Age', 'Sex', 'Survived']].dropna()
values['IsMale'] = values.Sex == 'male'
clf = DecisionTreeClassifier(random_state=241)
clf.fit(values[['Pclass', 'Fare', 'Age', 'IsMale']], values['Survived'])
criteria = concat([
Series(['Pclass', 'Fare', 'Age', 'Sex']),
Series(clf.feature_importances_)
], axis=1).sort_values(by=1, ascending=False)[:2][0]
return ' '.join(criteria) | 0.725746 | 0.332812 |
import posixpath
import re
import six
import zipfile2
from attr import attr, attributes
from attr.validators import instance_of, optional
from okonomiyaki.errors import (
InvalidRequirementString, InvalidRequirementStringHyphen,
InvalidEggName, InvalidMetadataField,
MissingMetadata, UnsupportedMetadata)
from okonomiyaki.platforms.legacy import LegacyEPDPlatform
from okonomiyaki.platforms import (
EPDPlatform, PlatformABI, PythonABI, PythonImplementation)
from okonomiyaki.utils import (
compute_sha256, decode_if_needed, encode_if_needed, parse_assignments)
from okonomiyaki.utils.py3compat import StringIO, string_types
from okonomiyaki.versions import EnpkgVersion, MetadataVersion
from .legacy import (
_guess_abi_tag, _guess_platform_abi, _guess_platform_tag, _guess_python_tag)
from ._blacklist import (
EGG_PLATFORM_BLACK_LIST, EGG_PYTHON_TAG_BLACK_LIST,
may_be_in_platform_blacklist, may_be_in_python_tag_blacklist,
may_be_in_pkg_info_blacklist)
from ._package_info import (
PackageInfo, _convert_if_needed, _keep_position, _read_pkg_info)
_EGG_NAME_RE = re.compile(r"""
(?P<name>[\.\w]+)
-
(?P<version>[^-]+)
-
(?P<build>\d+)
\.egg$""", re.VERBOSE)
EGG_INFO_PREFIX = "EGG-INFO"
# Those may need to be public, depending on how well we can hide their
# locations or not.
_INFO_JSON_LOCATION = posixpath.join(EGG_INFO_PREFIX, "info.json")
_SPEC_DEPEND_LOCATION = posixpath.join(EGG_INFO_PREFIX, "spec", "depend")
_SPEC_LIB_DEPEND_LOCATION = posixpath.join(EGG_INFO_PREFIX, "spec",
"lib-depend")
_SPEC_SUMMARY_LOCATION = posixpath.join(EGG_INFO_PREFIX, "spec", "summary")
_USR_PREFIX_LOCATION = posixpath.join(EGG_INFO_PREFIX, "usr")
_TAG_METADATA_VERSION = "metadata_version"
_TAG_NAME = "name"
_TAG_VERSION = "version"
_TAG_BUILD = "build"
_TAG_ARCH = "arch"
_TAG_OSDIST = "osdist"
_TAG_PLATFORM = "platform"
_TAG_PYTHON = "python"
_TAG_PYTHON_PEP425_TAG = "python_tag"
_TAG_ABI_PEP425_TAG = "abi_tag"
_TAG_PLATFORM_PEP425_TAG = "platform_tag"
_TAG_PLATFORM_ABI = "platform_abi"
_TAG_PACKAGES = "packages"
M = MetadataVersion.from_string
_METADATA_VERSION_TO_KEYS = {
M("1.1"): (
_TAG_METADATA_VERSION, _TAG_NAME, _TAG_VERSION, _TAG_BUILD, _TAG_ARCH,
_TAG_PLATFORM, _TAG_OSDIST, _TAG_PYTHON, _TAG_PACKAGES
),
}
_METADATA_VERSION_TO_KEYS[M("1.2")] = \
_METADATA_VERSION_TO_KEYS[M("1.1")] + (_TAG_PYTHON_PEP425_TAG, )
_METADATA_VERSION_TO_KEYS[M("1.3")] = (
_METADATA_VERSION_TO_KEYS[M("1.2")]
+ (_TAG_ABI_PEP425_TAG, _TAG_PLATFORM_PEP425_TAG)
)
_METADATA_VERSION_TO_KEYS[M("1.4")] = (
_METADATA_VERSION_TO_KEYS[M("1.3")] + (_TAG_PLATFORM_ABI, )
)
_UNSUPPORTED = "unsupported"
def _are_compatible(left, right):
"""Return True if both arguments are compatible metadata versions.
Parameters
----------
left: MetadataVersion
right: MetadataVersion
"""
return left.major == right.major
def _highest_compatible(metadata_version):
""" Returns the highest metadata version supporting that is compatible with
the given version.
"""
compatible_versions = [
m for m in _METADATA_VERSION_TO_KEYS
if _are_compatible(m, metadata_version)
]
if len(compatible_versions) > 0:
return max(compatible_versions)
else:
raise UnsupportedMetadata(metadata_version)
def split_egg_name(s):
m = _EGG_NAME_RE.match(s)
if m is None:
raise InvalidEggName(s)
else:
name, version, build = m.groups()
return name, version, int(build)
def parse_rawspec(spec_string):
spec = parse_assignments(StringIO(spec_string.replace('\r', '')))
metadata_version_string = spec.get(_TAG_METADATA_VERSION)
if metadata_version_string is not None:
metadata_version = MetadataVersion.from_string(metadata_version_string)
else:
metadata_version = None
if metadata_version is None:
raise InvalidMetadataField('metadata_version', metadata_version_string)
elif metadata_version not in _METADATA_VERSION_TO_KEYS:
metadata_version = _highest_compatible(metadata_version)
res = {}
keys = _METADATA_VERSION_TO_KEYS.get(metadata_version)
for key in keys:
try:
res[key] = spec[key]
except KeyError:
raise InvalidMetadataField(key, InvalidMetadataField.undefined)
for k, v in res.items():
# Some values are not string-like, so filter on the type that needs
# conversion
if isinstance(v, six.binary_type):
res[k] = decode_if_needed(v)
res[_TAG_PACKAGES] = [decode_if_needed(v) for v in res[_TAG_PACKAGES]]
return res
def egg_name(name, version, build):
"""
Return the egg filename (including the .egg extension) for the given
arguments
"""
return "{0}-{1}-{2}.egg".format(name, version, build)
def is_egg_name_valid(s):
"""
Return True if the given string is a valid egg name (not including the
.egg, e.g. 'Qt-4.8.5-2')
"""
return _EGG_NAME_RE.match(s) is not None
_INVALID_REQUIREMENTS = {
u"numpy-1.8.0": u"numpy 1.8.0",
}
def _translate_invalid_requirement(s):
return _INVALID_REQUIREMENTS.get(s, s)
def text_attr(**kw):
""" An attrs.attr-like descriptor to describe fields that must be unicode.
"""
for k in ("validator", ):
if k in kw:
raise ValueError("Cannot pass '{0}' argument".format(k))
return attr(validator=instance_of(six.text_type), **kw)
def text_or_none_attr(**kw):
""" An attrs.attr-like descriptor to describe fields that must be unicode
or None.
"""
for k in ("validator", ):
if k in kw:
raise ValueError("Cannot pass '{0}' argument".format(k))
return attr(validator=optional(instance_of(six.text_type)), **kw)
@six.python_2_unicode_compatible
@attributes(frozen=True)
class Requirement(object):
"""
Model for entries in the package metadata inside EGG-INFO/spec/depend
"""
name = text_attr(default=u"")
version_string = text_attr(default=u"")
build_number = attr(-1, validator=instance_of(int))
@property
def strictness(self):
if len(self.version_string) == 0:
return 1
elif self.build_number < 0:
return 2
else:
return 3
@classmethod
def from_string(cls, s, strictness=2):
"""
Create a Requirement from string following a name-version-build
format.
Parameters
----------
s: text type
Egg name, e.g. u'Qt-4.8.5-2'.
strictness: int
Control strictness of string representation
"""
name, version, build = split_egg_name(u"{0}.egg".format(s))
if strictness >= 3:
build_number = build
else:
build_number = -1
if strictness >= 2:
version_string = version
else:
version_string = u""
return cls(name=name, version_string=version_string,
build_number=build_number)
@classmethod
def from_spec_string(cls, s):
"""
Create a Requirement from a spec string (as used in
EGG-INFO/spec/depend).
"""
s = _translate_invalid_requirement(s)
parts = s.split()
if len(parts) == 1:
name = parts[0]
if "-" in name:
raise InvalidRequirementStringHyphen(name)
return cls(name=name)
elif len(parts) == 2:
name, version = parts
parts = version.split("-")
if len(parts) == 2:
upstream, build_number = parts
build_number = int(build_number)
else:
upstream, build_number = version, -1
return cls(name=name, version_string=upstream,
build_number=build_number)
else:
raise InvalidRequirementString(name)
def __str__(self):
if len(self.version_string) > 0:
if self.build_number > 0:
return u"{0} {1}-{2}".format(
self.name, self.version_string, self.build_number
)
else:
return u"{0} {1}".format(self.name, self.version_string)
else:
return self.name
_METADATA_TEMPLATES = {
M("1.1"): """\
metadata_version = '1.1'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
packages = {packages}
""",
M("1.2"): """\
metadata_version = '1.2'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
python_tag = {python_tag!r}
packages = {packages}
""",
M("1.3"): """\
metadata_version = '1.3'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
python_tag = {python_tag!r}
abi_tag = {abi_tag!r}
platform_tag = {platform_tag!r}
packages = {packages}
""",
M("1.4"): """\
metadata_version = '1.4'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
python_tag = {python_tag!r}
abi_tag = {abi_tag!r}
platform_tag = {platform_tag!r}
platform_abi = {platform_abi!r}
packages = {packages}
"""
}
_METADATA_DEFAULT_VERSION_STRING = "1.4"
_METADATA_DEFAULT_VERSION = M(_METADATA_DEFAULT_VERSION_STRING)
def _epd_platform_from_raw_spec(raw_spec):
""" Create an EPDPlatform instance from the metadata info returned by
parse_rawspec.
if no platform is defined ('platform' and 'osdist' set to None), then
None is returned.
"""
platform = raw_spec[_TAG_PLATFORM]
osdist = raw_spec[_TAG_OSDIST]
if platform is None and osdist is None:
return None
else:
return EPDPlatform._from_spec_depend_data(
platform=platform,
osdist=osdist,
arch_name=raw_spec[_TAG_ARCH],
platform_abi=raw_spec.get(_TAG_PLATFORM_ABI, 'None'),
platform_tag=raw_spec.get(_TAG_PLATFORM_PEP425_TAG, 'None'),
python_version=raw_spec[_TAG_PYTHON])
@attributes
class LegacySpecDepend(object):
"""
This models the EGG-INFO/spec/depend content.
"""
# Name is taken from egg path, so may be upper case
name = text_attr()
"""
Egg name
"""
version = text_attr()
"""
Upstream version (as a string).
"""
build = attr(validator=instance_of(int))
"""
Build number
"""
python = text_or_none_attr()
"""
Python version
"""
python_tag = text_or_none_attr()
"""
Python tag (as defined in PEP 425).
"""
abi_tag = text_or_none_attr()
"""
ABI tag (as defined in PEP 425), except that 'none' is None.
"""
platform_tag = text_or_none_attr()
"""
Platform tag (as defined in PEP 425), except that 'any' is None.
"""
platform_abi = text_or_none_attr()
"""
Platform abi. None if no abi.
"""
packages = attr(validator=instance_of(list))
"""
List of dependencies for this egg
"""
_epd_legacy_platform = attr(
validator=optional(instance_of(LegacyEPDPlatform))
)
_metadata_version = attr(validator=instance_of(MetadataVersion))
@classmethod
def _from_data(cls, data, epd_platform):
args = data.copy()
args[_TAG_METADATA_VERSION] = M(
args.get(_TAG_METADATA_VERSION, _METADATA_DEFAULT_VERSION_STRING)
)
if epd_platform is None:
_epd_legacy_platform = None
else:
_epd_legacy_platform = LegacyEPDPlatform(epd_platform)
args["_epd_legacy_platform"] = _epd_legacy_platform
args[_TAG_PACKAGES] = [
Requirement.from_spec_string(s)
for s in args.get(_TAG_PACKAGES, [])
]
return cls(
args["name"],
args["version"],
args["build"],
args["python"],
args["python_tag"],
args["abi_tag"],
args["platform_tag"],
args["platform_abi"],
args["packages"],
args["_epd_legacy_platform"],
args["metadata_version"],
)
@classmethod
def from_egg(cls, path_or_file):
sha256 = None
if isinstance(path_or_file, string_types):
if (
may_be_in_platform_blacklist(path_or_file)
or may_be_in_python_tag_blacklist(path_or_file)
):
sha256 = compute_sha256(path_or_file)
else:
with _keep_position(path_or_file.fp):
sha256 = compute_sha256(path_or_file.fp)
return cls._from_egg(path_or_file, sha256)
@classmethod
def _from_egg(cls, path_or_file, sha256):
def _create_spec_depend(zp):
epd_platform_string = EGG_PLATFORM_BLACK_LIST.get(sha256)
if epd_platform_string is None:
epd_platform = None
else:
epd_platform = EPDPlatform.from_epd_string(epd_platform_string)
try:
spec_depend_string = zp.read(_SPEC_DEPEND_LOCATION).decode()
except KeyError:
msg = ("File {0!r} is not an Enthought egg (is missing {1})"
.format(path_or_file, _SPEC_DEPEND_LOCATION))
raise MissingMetadata(msg)
else:
data, epd_platform = _normalized_info_from_string(
spec_depend_string, epd_platform, sha256
)
return cls._from_data(data, epd_platform)
if isinstance(path_or_file, string_types):
with zipfile2.ZipFile(path_or_file) as zp:
return _create_spec_depend(zp)
else:
return _create_spec_depend(path_or_file)
@classmethod
def from_string(cls, spec_depend_string):
data, epd_platform = _normalized_info_from_string(spec_depend_string)
return cls._from_data(data, epd_platform)
@property
def arch(self):
"""
Egg architecture.
"""
if self._epd_legacy_platform is None:
return None
else:
return self._epd_legacy_platform.arch._legacy_name
@property
def egg_name(self):
"""
Full egg name (including .egg extension).
"""
return egg_name(self.name, self.version, self.build)
@property
def osdist(self):
if self._epd_legacy_platform is None:
return None
else:
return self._epd_legacy_platform.osdist
@property
def platform(self):
"""
The legacy platform name (sys.platform).
"""
if self._epd_legacy_platform is None:
return None
else:
return self._epd_legacy_platform.platform
@property
def metadata_version(self):
return self._metadata_version
@metadata_version.setter
def metadata_version(self, value):
self._metadata_version = value
def _to_dict(self):
raw_data = {
_TAG_NAME: self.name,
_TAG_VERSION: self.version,
_TAG_BUILD: self.build,
_TAG_ARCH: self.arch,
_TAG_PLATFORM: self.platform,
_TAG_OSDIST: self.osdist,
_TAG_PACKAGES: [str(p) for p in self.packages],
_TAG_PYTHON: self.python,
_TAG_PYTHON_PEP425_TAG: self.python_tag,
_TAG_ABI_PEP425_TAG: self.abi_tag,
_TAG_PLATFORM_PEP425_TAG: self.platform_tag,
_TAG_PLATFORM_ABI: self.platform_abi,
_TAG_METADATA_VERSION: self.metadata_version
}
return raw_data
def to_string(self):
"""
Returns a string that is suitable for the depend file inside our
legacy egg.
"""
template = _METADATA_TEMPLATES.get(self.metadata_version, None)
data = self._to_dict()
if six.PY2:
# Hack to avoid the 'u' prefix to appear in the spec/depend entries
for k, v in data.items():
data[k] = encode_if_needed(v)
# This is just to ensure the exact same string as the produced by the
# legacy buildsystem
if len(self.packages) == 0:
data[_TAG_PACKAGES] = "[]"
else:
if six.PY2:
packages = [decode_if_needed(p) for p in self.packages]
else:
packages = self.packages
data[_TAG_PACKAGES] = (
u"[\n{0}\n]".format(
"\n".join(" '{0}',".format(p) for p in packages)
)
)
return template.format(**data)
class Dependencies(object):
""" Object storing the various dependencies for an egg.
Each attribute is a tuple of Requirement instances.
"""
def __init__(self, runtime=None, build=None):
self.runtime = runtime or ()
self.build = runtime or ()
def _metadata_version_to_tuple(metadata_version):
""" Convert a metadata version string to a tuple for comparison."""
return tuple(int(s) for s in metadata_version.split("."))
def _normalized_info_from_string(spec_depend_string, epd_platform=None,
sha256=None):
""" Return a 'normalized' dictionary from the given spec/depend string.
Note: the name value is NOT lower-cased, so that the egg filename may
rebuilt from the data.
"""
raw_data = parse_rawspec(spec_depend_string)
data = {}
for k in (_TAG_METADATA_VERSION,
_TAG_NAME, _TAG_VERSION, _TAG_BUILD,
_TAG_ARCH, _TAG_OSDIST, _TAG_PLATFORM,
_TAG_PYTHON, _TAG_PACKAGES):
data[k] = raw_data[k]
epd_platform = epd_platform or _epd_platform_from_raw_spec(raw_data)
for k in (_TAG_ARCH, _TAG_PLATFORM, _TAG_OSDIST):
data.pop(k)
metadata_version = MetadataVersion.from_string(data[_TAG_METADATA_VERSION])
python_tag = EGG_PYTHON_TAG_BLACK_LIST.get(sha256)
if python_tag:
data[_TAG_PYTHON_PEP425_TAG] = python_tag
else:
if metadata_version < M("1.2"):
data[_TAG_PYTHON_PEP425_TAG] = _guess_python_tag(
raw_data[_TAG_PYTHON]
)
else:
data[_TAG_PYTHON_PEP425_TAG] = raw_data[_TAG_PYTHON_PEP425_TAG]
if metadata_version < M("1.3"):
python_tag = data[_TAG_PYTHON_PEP425_TAG]
data[_TAG_ABI_PEP425_TAG] = _guess_abi_tag(epd_platform, python_tag)
data[_TAG_PLATFORM_PEP425_TAG] = _guess_platform_tag(epd_platform)
else:
data[_TAG_ABI_PEP425_TAG] = raw_data[_TAG_ABI_PEP425_TAG]
data[_TAG_PLATFORM_PEP425_TAG] = raw_data[_TAG_PLATFORM_PEP425_TAG]
if metadata_version < M("1.4"):
python_tag = data[_TAG_PYTHON_PEP425_TAG]
platform_abi = _guess_platform_abi(epd_platform, python_tag)
else:
platform_abi = raw_data[_TAG_PLATFORM_ABI]
data[_TAG_PLATFORM_ABI] = platform_abi
return data, epd_platform
_JSON_METADATA_VERSION = "metadata_version"
_JSON__RAW_NAME = "_raw_name"
_JSON_VERSION = "version"
_JSON_EPD_PLATFORM = "epd_platform"
_JSON_PYTHON_TAG = "python_tag"
_JSON_ABI_TAG = "abi_tag"
_JSON_PLATFORM_TAG = "platform_tag"
_JSON_PLATFORM_ABI_TAG = "platform_abi_tag"
_JSON_RUNTIME_DEPENDENCIES = "runtime_dependencies"
_JSON_SUMMARY = "summary"
class EggMetadata(object):
""" Enthought egg metadata for format 1.x.
"""
HIGHEST_SUPPORTED_METADATA_VERSION = _METADATA_DEFAULT_VERSION
""" Highest supported metadata version (as a MetadataVersion object).
If the parsed metadata is higher, it will not be possible to write back
the metadata. If the parsed metadata version is not compatible (different
major version), then parsing will raise an UnsupportedMetadata exception as
well.
"""
@staticmethod
def _may_be_in_blacklist(path):
return (
may_be_in_platform_blacklist(path)
or may_be_in_pkg_info_blacklist(path)
or may_be_in_python_tag_blacklist(path)
)
@classmethod
def from_egg(cls, path_or_file, strict=True):
""" Create a EggMetadata instance from an existing Enthought egg.
Parameters
----------
path: str or file-like object.
If a string, understood as the path to the egg. Otherwise,
understood as a zipfile-like object.
strict: bool
If True, will fail if metadata cannot be decoded correctly (e.g.
unicode errors in EGG-INFO/PKG-INFO). If false, will ignore those
errors, at the risk of data loss.
"""
sha256 = None
if isinstance(path_or_file, string_types):
if cls._may_be_in_blacklist(path_or_file):
sha256 = compute_sha256(path_or_file)
else:
with _keep_position(path_or_file.fp):
sha256 = compute_sha256(path_or_file.fp)
return cls._from_egg(path_or_file, sha256, strict)
@classmethod
def from_json_dict(cls, json_dict, pkg_info):
version = EnpkgVersion.from_string(json_dict[_JSON_VERSION])
if json_dict[_JSON_PYTHON_TAG] is not None:
python = PythonImplementation.from_string(json_dict[_JSON_PYTHON_TAG])
else:
python = None
if json_dict[_JSON_EPD_PLATFORM] is None:
epd_platform = None
else:
epd_platform = EPDPlatform.from_epd_string(json_dict[_JSON_EPD_PLATFORM])
dependencies = Dependencies(tuple(json_dict[_JSON_RUNTIME_DEPENDENCIES]))
metadata_version = MetadataVersion.from_string(
json_dict[_JSON_METADATA_VERSION]
)
return cls(
json_dict[_JSON__RAW_NAME], version, epd_platform, python,
json_dict[_JSON_ABI_TAG], json_dict[_JSON_PLATFORM_ABI_TAG],
dependencies, pkg_info, json_dict[_JSON_SUMMARY],
metadata_version=metadata_version
)
@classmethod
def _from_egg(cls, path_or_file, sha256, strict=True):
def _read_summary(fp):
summary_arcname = "EGG-INFO/spec/summary"
try:
summary = fp.read(summary_arcname)
except KeyError:
# the summary file may not exist for eggs built with
# endist/repack
summary = b""
return summary.decode("utf8")
def _compute_all_metadata(fp):
summary = _read_summary(fp)
pkg_info_data = _read_pkg_info(fp)
if pkg_info_data is None:
pkg_info_string = None
else:
pkg_info_string = _convert_if_needed(
pkg_info_data, sha256, strict
)
spec_depend = LegacySpecDepend._from_egg(fp, sha256)
return summary, pkg_info_string, spec_depend
if isinstance(path_or_file, string_types):
with zipfile2.ZipFile(path_or_file) as zp:
summary, pkg_info_string, spec_depend = _compute_all_metadata(zp)
else:
summary, pkg_info_string, spec_depend = _compute_all_metadata(
path_or_file
)
return cls._from_spec_depend(spec_depend, pkg_info_string, summary)
@classmethod
def _from_spec_depend(cls, spec_depend, pkg_info, summary,
metadata_version=None):
raw_name = spec_depend.name
version = EnpkgVersion.from_upstream_and_build(spec_depend.version,
spec_depend.build)
python_tag = spec_depend.python_tag
abi_tag = spec_depend.abi_tag
platform_abi = spec_depend.platform_abi
if spec_depend._epd_legacy_platform is None:
platform = None
else:
platform = spec_depend._epd_legacy_platform._epd_platform
dependencies = Dependencies(
tuple(dep for dep in spec_depend.packages)
)
metadata_version = metadata_version or spec_depend.metadata_version
return cls(raw_name, version, platform, python_tag, abi_tag,
platform_abi, dependencies, pkg_info, summary,
metadata_version)
@classmethod
def from_egg_metadata(cls, egg_metadata, **kw):
""" Utility ctor to create a new EggMetadata instance from an existing
one, potentially updating some metadata.
Any keyword argument (except `egg_metadata`) is understood as an
argument to EggMetadata.__init__.
Parameters
----------
egg_metadata: EggMetadata
"""
passed_kw = {"raw_name": egg_metadata._raw_name}
for k in (
"version", "platform", "python", "abi_tag", "pkg_info", "summary",
"metadata_version", "platform_abi",
):
passed_kw[k] = getattr(egg_metadata, k)
passed_kw["dependencies"] = Dependencies(
egg_metadata.runtime_dependencies
)
passed_kw.update(**kw)
return cls(**passed_kw)
def __init__(self, raw_name, version, platform, python, abi_tag,
platform_abi, dependencies, pkg_info, summary,
metadata_version=None):
""" EggMetadata instances encompass Enthought egg metadata.
Note: the constructor is considered private, please use one of the
from_* class methods.
Parameters
----------
raw_name: str
The 'raw' name, i.e. the name value in spec/depend.
version: EnpkgVersion
The full version
platform: EPDPlatform
An EPDPlatform instance, or None for cross-platform eggs
python: Python
The python implementation
abi_tag: str
The ABI tag, e.g. 'cp27m'. May be None.
platform_abi: str
The platform abi, e.g. 'msvc2008', 'gnu', etc. May be None.
dependencies: Dependencies
A Dependencies instance.
pkg_info: PackageInfo or str or None
Instance modeling the PKG-INFO content of the egg. If a string is
passed, it is assumed to be the PKG-INFO content, and is lazily
parsed into a PackageInfo when pkg_info is accessed for the first
time.
summary: str
The summary. Models the string in EGG-INFO/spec/summary. May
be empty.
"""
self._raw_name = raw_name
self.version = version
""" The version, as an EnpkgVersion instance."""
self.platform = platform
""" The platform, as a Platform instance."""
if isinstance(python, string_types):
python = PythonImplementation.from_string(python)
self.python = python
""" The python implementation."""
if abi_tag is not None and isinstance(abi_tag, six.string_types):
abi_tag = PythonABI(abi_tag)
self.abi = abi_tag
""" The ABI tag, following the PEP425 format, except that no ABI
is sorted as None."""
if (
platform_abi is not None
and isinstance(platform_abi, six.string_types)):
platform_abi = PlatformABI(platform_abi)
self.platform_abi = platform_abi
self.runtime_dependencies = tuple(dependencies.runtime)
""" List of runtime dependencies (as strings)."""
self.metadata_version = metadata_version or _METADATA_DEFAULT_VERSION
""" The version format of the underlying metadata."""
self._pkg_info = pkg_info
""" A PackageInfo instance modeling the underlying PKG-INFO. May
be None for eggs without an PKG-INFO file."""
self.summary = summary
""" The summary string."""
@property
def abi_tag(self):
if self.abi is None:
return None
else:
return self.abi.pep425_tag
@property
def abi_tag_string(self):
return PythonABI.pep425_tag_string(self.abi)
@property
def build(self):
""" The build number."""
return self.version.build
@property
def egg_basename(self):
""" The egg "base name", i.e. the name part of the egg filename."""
return self._raw_name
@property
def egg_name(self):
""" The egg filename."""
return self._spec_depend.egg_name
@property
def is_strictly_supported(self):
""" Returns True if the given metadata_version is fully supported.
A metadata_version is fully supported iff:
- metadata_version.major ==
EggMetadata.HIGHEST_SUPPORTED_METADATA_VERSION.major
- and metadata_version.minor <=
EggMetadata.HIGHEST_SUPPORTED_METADATA_VERSION.minor
"""
max_supported = EggMetadata.HIGHEST_SUPPORTED_METADATA_VERSION
return (
_are_compatible(self.metadata_version, max_supported)
and self.metadata_version.minor <= max_supported.minor
)
@property
def kind(self):
return "egg"
@property
def name(self):
""" The package name."""
return self._raw_name.lower().replace("-", "_")
@property
def pkg_info(self):
if isinstance(self._pkg_info, six.string_types):
self._pkg_info = PackageInfo.from_string(self._pkg_info)
return self._pkg_info
@property
def platform_abi_tag(self):
if self.platform_abi is None:
return None
else:
return self.platform_abi.pep425_tag
@property
def platform_abi_tag_string(self):
return PlatformABI.pep425_tag_string(self.platform_abi)
@property
def platform_tag(self):
""" Platform tag following PEP425, except that no platform is
represented as None and not 'any'."""
if self.platform is None:
return None
else:
return self.platform.pep425_tag
@property
def platform_tag_string(self):
return EPDPlatform.pep425_tag_string(self.platform)
@property
def python_tag(self):
if self.python is None:
return None
else:
return self.python.pep425_tag
@property
def python_tag_string(self):
return PythonImplementation.pep425_tag_string(self.python)
@property
def spec_depend_string(self):
return self._spec_depend.to_string()
@property
def upstream_version(self):
return six.text_type(self.version.upstream)
@property
def _python(self):
if self.python is None:
return None
else:
return u"{0}.{1}".format(self.python.major, self.python.minor)
@property
def _spec_depend(self):
if not self.is_strictly_supported:
msg = "Cannot write back metadata with unsupported version {0!r}"
raise UnsupportedMetadata(
self.metadata_version, msg.format(str(self.metadata_version))
)
if self.platform is None:
epd_platform = None
else:
legacy_epd_platform = LegacyEPDPlatform(self.platform)
epd_platform = legacy_epd_platform._epd_platform
args = {
"name": self._raw_name,
"version": self.upstream_version,
"build": self.build,
"python": self._python,
"python_tag": self.python_tag,
"abi_tag": self.abi_tag,
"platform_tag": self.platform_tag,
"platform_abi": self.platform_abi_tag,
"packages": [six.text_type(p) for p in self.runtime_dependencies],
"metadata_version": six.text_type(self.metadata_version),
}
return LegacySpecDepend._from_data(args, epd_platform)
# Public methods
def dump(self, path):
""" Write the metadata to the given path as a metadata egg.
A metadata egg is a zipfile using the same structured as an egg, except
that it only contains metadata.
Parameters
----------
path : str
The path to write the zipped metadata into.
"""
with zipfile2.ZipFile(path, "w", zipfile2.ZIP_DEFLATED) as zp:
zp.writestr(
_SPEC_DEPEND_LOCATION, self.spec_depend_string.encode()
)
zp.writestr(
_SPEC_SUMMARY_LOCATION, self.summary.encode()
)
if self.pkg_info:
self.pkg_info._dump_as_zip(zp)
def to_json_dict(self):
if self.platform is None:
epd_platform = None
else:
epd_platform = six.text_type(self.platform)
return {
_JSON_METADATA_VERSION: six.text_type(self.metadata_version),
_JSON__RAW_NAME: self._raw_name,
_JSON_VERSION: six.text_type(self.version),
_JSON_EPD_PLATFORM: epd_platform,
_JSON_PYTHON_TAG: self.python_tag,
_JSON_ABI_TAG: self.abi_tag,
_JSON_PLATFORM_TAG: self.platform_tag,
_JSON_PLATFORM_ABI_TAG: self.platform_abi_tag,
_JSON_RUNTIME_DEPENDENCIES: [
six.text_type(p) for p in self.runtime_dependencies
],
_JSON_SUMMARY: self.summary,
}
# Protocol implementations
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.spec_depend_string == other.spec_depend_string
and self.summary == other.summary
and self.pkg_info == other.pkg_info
)
else:
raise TypeError(
"Only equality between EggMetadata instances is supported"
)
def __ne__(self, other):
return not self == other | okonomiyaki/file_formats/_egg_info.py | import posixpath
import re
import six
import zipfile2
from attr import attr, attributes
from attr.validators import instance_of, optional
from okonomiyaki.errors import (
InvalidRequirementString, InvalidRequirementStringHyphen,
InvalidEggName, InvalidMetadataField,
MissingMetadata, UnsupportedMetadata)
from okonomiyaki.platforms.legacy import LegacyEPDPlatform
from okonomiyaki.platforms import (
EPDPlatform, PlatformABI, PythonABI, PythonImplementation)
from okonomiyaki.utils import (
compute_sha256, decode_if_needed, encode_if_needed, parse_assignments)
from okonomiyaki.utils.py3compat import StringIO, string_types
from okonomiyaki.versions import EnpkgVersion, MetadataVersion
from .legacy import (
_guess_abi_tag, _guess_platform_abi, _guess_platform_tag, _guess_python_tag)
from ._blacklist import (
EGG_PLATFORM_BLACK_LIST, EGG_PYTHON_TAG_BLACK_LIST,
may_be_in_platform_blacklist, may_be_in_python_tag_blacklist,
may_be_in_pkg_info_blacklist)
from ._package_info import (
PackageInfo, _convert_if_needed, _keep_position, _read_pkg_info)
_EGG_NAME_RE = re.compile(r"""
(?P<name>[\.\w]+)
-
(?P<version>[^-]+)
-
(?P<build>\d+)
\.egg$""", re.VERBOSE)
EGG_INFO_PREFIX = "EGG-INFO"
# Those may need to be public, depending on how well we can hide their
# locations or not.
_INFO_JSON_LOCATION = posixpath.join(EGG_INFO_PREFIX, "info.json")
_SPEC_DEPEND_LOCATION = posixpath.join(EGG_INFO_PREFIX, "spec", "depend")
_SPEC_LIB_DEPEND_LOCATION = posixpath.join(EGG_INFO_PREFIX, "spec",
"lib-depend")
_SPEC_SUMMARY_LOCATION = posixpath.join(EGG_INFO_PREFIX, "spec", "summary")
_USR_PREFIX_LOCATION = posixpath.join(EGG_INFO_PREFIX, "usr")
_TAG_METADATA_VERSION = "metadata_version"
_TAG_NAME = "name"
_TAG_VERSION = "version"
_TAG_BUILD = "build"
_TAG_ARCH = "arch"
_TAG_OSDIST = "osdist"
_TAG_PLATFORM = "platform"
_TAG_PYTHON = "python"
_TAG_PYTHON_PEP425_TAG = "python_tag"
_TAG_ABI_PEP425_TAG = "abi_tag"
_TAG_PLATFORM_PEP425_TAG = "platform_tag"
_TAG_PLATFORM_ABI = "platform_abi"
_TAG_PACKAGES = "packages"
M = MetadataVersion.from_string
_METADATA_VERSION_TO_KEYS = {
M("1.1"): (
_TAG_METADATA_VERSION, _TAG_NAME, _TAG_VERSION, _TAG_BUILD, _TAG_ARCH,
_TAG_PLATFORM, _TAG_OSDIST, _TAG_PYTHON, _TAG_PACKAGES
),
}
_METADATA_VERSION_TO_KEYS[M("1.2")] = \
_METADATA_VERSION_TO_KEYS[M("1.1")] + (_TAG_PYTHON_PEP425_TAG, )
_METADATA_VERSION_TO_KEYS[M("1.3")] = (
_METADATA_VERSION_TO_KEYS[M("1.2")]
+ (_TAG_ABI_PEP425_TAG, _TAG_PLATFORM_PEP425_TAG)
)
_METADATA_VERSION_TO_KEYS[M("1.4")] = (
_METADATA_VERSION_TO_KEYS[M("1.3")] + (_TAG_PLATFORM_ABI, )
)
_UNSUPPORTED = "unsupported"
def _are_compatible(left, right):
"""Return True if both arguments are compatible metadata versions.
Parameters
----------
left: MetadataVersion
right: MetadataVersion
"""
return left.major == right.major
def _highest_compatible(metadata_version):
""" Returns the highest metadata version supporting that is compatible with
the given version.
"""
compatible_versions = [
m for m in _METADATA_VERSION_TO_KEYS
if _are_compatible(m, metadata_version)
]
if len(compatible_versions) > 0:
return max(compatible_versions)
else:
raise UnsupportedMetadata(metadata_version)
def split_egg_name(s):
m = _EGG_NAME_RE.match(s)
if m is None:
raise InvalidEggName(s)
else:
name, version, build = m.groups()
return name, version, int(build)
def parse_rawspec(spec_string):
spec = parse_assignments(StringIO(spec_string.replace('\r', '')))
metadata_version_string = spec.get(_TAG_METADATA_VERSION)
if metadata_version_string is not None:
metadata_version = MetadataVersion.from_string(metadata_version_string)
else:
metadata_version = None
if metadata_version is None:
raise InvalidMetadataField('metadata_version', metadata_version_string)
elif metadata_version not in _METADATA_VERSION_TO_KEYS:
metadata_version = _highest_compatible(metadata_version)
res = {}
keys = _METADATA_VERSION_TO_KEYS.get(metadata_version)
for key in keys:
try:
res[key] = spec[key]
except KeyError:
raise InvalidMetadataField(key, InvalidMetadataField.undefined)
for k, v in res.items():
# Some values are not string-like, so filter on the type that needs
# conversion
if isinstance(v, six.binary_type):
res[k] = decode_if_needed(v)
res[_TAG_PACKAGES] = [decode_if_needed(v) for v in res[_TAG_PACKAGES]]
return res
def egg_name(name, version, build):
"""
Return the egg filename (including the .egg extension) for the given
arguments
"""
return "{0}-{1}-{2}.egg".format(name, version, build)
def is_egg_name_valid(s):
"""
Return True if the given string is a valid egg name (not including the
.egg, e.g. 'Qt-4.8.5-2')
"""
return _EGG_NAME_RE.match(s) is not None
_INVALID_REQUIREMENTS = {
u"numpy-1.8.0": u"numpy 1.8.0",
}
def _translate_invalid_requirement(s):
return _INVALID_REQUIREMENTS.get(s, s)
def text_attr(**kw):
""" An attrs.attr-like descriptor to describe fields that must be unicode.
"""
for k in ("validator", ):
if k in kw:
raise ValueError("Cannot pass '{0}' argument".format(k))
return attr(validator=instance_of(six.text_type), **kw)
def text_or_none_attr(**kw):
""" An attrs.attr-like descriptor to describe fields that must be unicode
or None.
"""
for k in ("validator", ):
if k in kw:
raise ValueError("Cannot pass '{0}' argument".format(k))
return attr(validator=optional(instance_of(six.text_type)), **kw)
@six.python_2_unicode_compatible
@attributes(frozen=True)
class Requirement(object):
"""
Model for entries in the package metadata inside EGG-INFO/spec/depend
"""
name = text_attr(default=u"")
version_string = text_attr(default=u"")
build_number = attr(-1, validator=instance_of(int))
@property
def strictness(self):
if len(self.version_string) == 0:
return 1
elif self.build_number < 0:
return 2
else:
return 3
@classmethod
def from_string(cls, s, strictness=2):
"""
Create a Requirement from string following a name-version-build
format.
Parameters
----------
s: text type
Egg name, e.g. u'Qt-4.8.5-2'.
strictness: int
Control strictness of string representation
"""
name, version, build = split_egg_name(u"{0}.egg".format(s))
if strictness >= 3:
build_number = build
else:
build_number = -1
if strictness >= 2:
version_string = version
else:
version_string = u""
return cls(name=name, version_string=version_string,
build_number=build_number)
@classmethod
def from_spec_string(cls, s):
"""
Create a Requirement from a spec string (as used in
EGG-INFO/spec/depend).
"""
s = _translate_invalid_requirement(s)
parts = s.split()
if len(parts) == 1:
name = parts[0]
if "-" in name:
raise InvalidRequirementStringHyphen(name)
return cls(name=name)
elif len(parts) == 2:
name, version = parts
parts = version.split("-")
if len(parts) == 2:
upstream, build_number = parts
build_number = int(build_number)
else:
upstream, build_number = version, -1
return cls(name=name, version_string=upstream,
build_number=build_number)
else:
raise InvalidRequirementString(name)
def __str__(self):
if len(self.version_string) > 0:
if self.build_number > 0:
return u"{0} {1}-{2}".format(
self.name, self.version_string, self.build_number
)
else:
return u"{0} {1}".format(self.name, self.version_string)
else:
return self.name
_METADATA_TEMPLATES = {
M("1.1"): """\
metadata_version = '1.1'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
packages = {packages}
""",
M("1.2"): """\
metadata_version = '1.2'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
python_tag = {python_tag!r}
packages = {packages}
""",
M("1.3"): """\
metadata_version = '1.3'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
python_tag = {python_tag!r}
abi_tag = {abi_tag!r}
platform_tag = {platform_tag!r}
packages = {packages}
""",
M("1.4"): """\
metadata_version = '1.4'
name = {name!r}
version = {version!r}
build = {build}
arch = {arch!r}
platform = {platform!r}
osdist = {osdist!r}
python = {python!r}
python_tag = {python_tag!r}
abi_tag = {abi_tag!r}
platform_tag = {platform_tag!r}
platform_abi = {platform_abi!r}
packages = {packages}
"""
}
_METADATA_DEFAULT_VERSION_STRING = "1.4"
_METADATA_DEFAULT_VERSION = M(_METADATA_DEFAULT_VERSION_STRING)
def _epd_platform_from_raw_spec(raw_spec):
""" Create an EPDPlatform instance from the metadata info returned by
parse_rawspec.
if no platform is defined ('platform' and 'osdist' set to None), then
None is returned.
"""
platform = raw_spec[_TAG_PLATFORM]
osdist = raw_spec[_TAG_OSDIST]
if platform is None and osdist is None:
return None
else:
return EPDPlatform._from_spec_depend_data(
platform=platform,
osdist=osdist,
arch_name=raw_spec[_TAG_ARCH],
platform_abi=raw_spec.get(_TAG_PLATFORM_ABI, 'None'),
platform_tag=raw_spec.get(_TAG_PLATFORM_PEP425_TAG, 'None'),
python_version=raw_spec[_TAG_PYTHON])
@attributes
class LegacySpecDepend(object):
"""
This models the EGG-INFO/spec/depend content.
"""
# Name is taken from egg path, so may be upper case
name = text_attr()
"""
Egg name
"""
version = text_attr()
"""
Upstream version (as a string).
"""
build = attr(validator=instance_of(int))
"""
Build number
"""
python = text_or_none_attr()
"""
Python version
"""
python_tag = text_or_none_attr()
"""
Python tag (as defined in PEP 425).
"""
abi_tag = text_or_none_attr()
"""
ABI tag (as defined in PEP 425), except that 'none' is None.
"""
platform_tag = text_or_none_attr()
"""
Platform tag (as defined in PEP 425), except that 'any' is None.
"""
platform_abi = text_or_none_attr()
"""
Platform abi. None if no abi.
"""
packages = attr(validator=instance_of(list))
"""
List of dependencies for this egg
"""
_epd_legacy_platform = attr(
validator=optional(instance_of(LegacyEPDPlatform))
)
_metadata_version = attr(validator=instance_of(MetadataVersion))
@classmethod
def _from_data(cls, data, epd_platform):
args = data.copy()
args[_TAG_METADATA_VERSION] = M(
args.get(_TAG_METADATA_VERSION, _METADATA_DEFAULT_VERSION_STRING)
)
if epd_platform is None:
_epd_legacy_platform = None
else:
_epd_legacy_platform = LegacyEPDPlatform(epd_platform)
args["_epd_legacy_platform"] = _epd_legacy_platform
args[_TAG_PACKAGES] = [
Requirement.from_spec_string(s)
for s in args.get(_TAG_PACKAGES, [])
]
return cls(
args["name"],
args["version"],
args["build"],
args["python"],
args["python_tag"],
args["abi_tag"],
args["platform_tag"],
args["platform_abi"],
args["packages"],
args["_epd_legacy_platform"],
args["metadata_version"],
)
@classmethod
def from_egg(cls, path_or_file):
sha256 = None
if isinstance(path_or_file, string_types):
if (
may_be_in_platform_blacklist(path_or_file)
or may_be_in_python_tag_blacklist(path_or_file)
):
sha256 = compute_sha256(path_or_file)
else:
with _keep_position(path_or_file.fp):
sha256 = compute_sha256(path_or_file.fp)
return cls._from_egg(path_or_file, sha256)
@classmethod
def _from_egg(cls, path_or_file, sha256):
def _create_spec_depend(zp):
epd_platform_string = EGG_PLATFORM_BLACK_LIST.get(sha256)
if epd_platform_string is None:
epd_platform = None
else:
epd_platform = EPDPlatform.from_epd_string(epd_platform_string)
try:
spec_depend_string = zp.read(_SPEC_DEPEND_LOCATION).decode()
except KeyError:
msg = ("File {0!r} is not an Enthought egg (is missing {1})"
.format(path_or_file, _SPEC_DEPEND_LOCATION))
raise MissingMetadata(msg)
else:
data, epd_platform = _normalized_info_from_string(
spec_depend_string, epd_platform, sha256
)
return cls._from_data(data, epd_platform)
if isinstance(path_or_file, string_types):
with zipfile2.ZipFile(path_or_file) as zp:
return _create_spec_depend(zp)
else:
return _create_spec_depend(path_or_file)
@classmethod
def from_string(cls, spec_depend_string):
data, epd_platform = _normalized_info_from_string(spec_depend_string)
return cls._from_data(data, epd_platform)
@property
def arch(self):
"""
Egg architecture.
"""
if self._epd_legacy_platform is None:
return None
else:
return self._epd_legacy_platform.arch._legacy_name
@property
def egg_name(self):
"""
Full egg name (including .egg extension).
"""
return egg_name(self.name, self.version, self.build)
@property
def osdist(self):
if self._epd_legacy_platform is None:
return None
else:
return self._epd_legacy_platform.osdist
@property
def platform(self):
"""
The legacy platform name (sys.platform).
"""
if self._epd_legacy_platform is None:
return None
else:
return self._epd_legacy_platform.platform
@property
def metadata_version(self):
return self._metadata_version
@metadata_version.setter
def metadata_version(self, value):
self._metadata_version = value
def _to_dict(self):
raw_data = {
_TAG_NAME: self.name,
_TAG_VERSION: self.version,
_TAG_BUILD: self.build,
_TAG_ARCH: self.arch,
_TAG_PLATFORM: self.platform,
_TAG_OSDIST: self.osdist,
_TAG_PACKAGES: [str(p) for p in self.packages],
_TAG_PYTHON: self.python,
_TAG_PYTHON_PEP425_TAG: self.python_tag,
_TAG_ABI_PEP425_TAG: self.abi_tag,
_TAG_PLATFORM_PEP425_TAG: self.platform_tag,
_TAG_PLATFORM_ABI: self.platform_abi,
_TAG_METADATA_VERSION: self.metadata_version
}
return raw_data
def to_string(self):
"""
Returns a string that is suitable for the depend file inside our
legacy egg.
"""
template = _METADATA_TEMPLATES.get(self.metadata_version, None)
data = self._to_dict()
if six.PY2:
# Hack to avoid the 'u' prefix to appear in the spec/depend entries
for k, v in data.items():
data[k] = encode_if_needed(v)
# This is just to ensure the exact same string as the produced by the
# legacy buildsystem
if len(self.packages) == 0:
data[_TAG_PACKAGES] = "[]"
else:
if six.PY2:
packages = [decode_if_needed(p) for p in self.packages]
else:
packages = self.packages
data[_TAG_PACKAGES] = (
u"[\n{0}\n]".format(
"\n".join(" '{0}',".format(p) for p in packages)
)
)
return template.format(**data)
class Dependencies(object):
""" Object storing the various dependencies for an egg.
Each attribute is a tuple of Requirement instances.
"""
def __init__(self, runtime=None, build=None):
self.runtime = runtime or ()
self.build = runtime or ()
def _metadata_version_to_tuple(metadata_version):
""" Convert a metadata version string to a tuple for comparison."""
return tuple(int(s) for s in metadata_version.split("."))
def _normalized_info_from_string(spec_depend_string, epd_platform=None,
sha256=None):
""" Return a 'normalized' dictionary from the given spec/depend string.
Note: the name value is NOT lower-cased, so that the egg filename may
rebuilt from the data.
"""
raw_data = parse_rawspec(spec_depend_string)
data = {}
for k in (_TAG_METADATA_VERSION,
_TAG_NAME, _TAG_VERSION, _TAG_BUILD,
_TAG_ARCH, _TAG_OSDIST, _TAG_PLATFORM,
_TAG_PYTHON, _TAG_PACKAGES):
data[k] = raw_data[k]
epd_platform = epd_platform or _epd_platform_from_raw_spec(raw_data)
for k in (_TAG_ARCH, _TAG_PLATFORM, _TAG_OSDIST):
data.pop(k)
metadata_version = MetadataVersion.from_string(data[_TAG_METADATA_VERSION])
python_tag = EGG_PYTHON_TAG_BLACK_LIST.get(sha256)
if python_tag:
data[_TAG_PYTHON_PEP425_TAG] = python_tag
else:
if metadata_version < M("1.2"):
data[_TAG_PYTHON_PEP425_TAG] = _guess_python_tag(
raw_data[_TAG_PYTHON]
)
else:
data[_TAG_PYTHON_PEP425_TAG] = raw_data[_TAG_PYTHON_PEP425_TAG]
if metadata_version < M("1.3"):
python_tag = data[_TAG_PYTHON_PEP425_TAG]
data[_TAG_ABI_PEP425_TAG] = _guess_abi_tag(epd_platform, python_tag)
data[_TAG_PLATFORM_PEP425_TAG] = _guess_platform_tag(epd_platform)
else:
data[_TAG_ABI_PEP425_TAG] = raw_data[_TAG_ABI_PEP425_TAG]
data[_TAG_PLATFORM_PEP425_TAG] = raw_data[_TAG_PLATFORM_PEP425_TAG]
if metadata_version < M("1.4"):
python_tag = data[_TAG_PYTHON_PEP425_TAG]
platform_abi = _guess_platform_abi(epd_platform, python_tag)
else:
platform_abi = raw_data[_TAG_PLATFORM_ABI]
data[_TAG_PLATFORM_ABI] = platform_abi
return data, epd_platform
_JSON_METADATA_VERSION = "metadata_version"
_JSON__RAW_NAME = "_raw_name"
_JSON_VERSION = "version"
_JSON_EPD_PLATFORM = "epd_platform"
_JSON_PYTHON_TAG = "python_tag"
_JSON_ABI_TAG = "abi_tag"
_JSON_PLATFORM_TAG = "platform_tag"
_JSON_PLATFORM_ABI_TAG = "platform_abi_tag"
_JSON_RUNTIME_DEPENDENCIES = "runtime_dependencies"
_JSON_SUMMARY = "summary"
class EggMetadata(object):
""" Enthought egg metadata for format 1.x.
"""
HIGHEST_SUPPORTED_METADATA_VERSION = _METADATA_DEFAULT_VERSION
""" Highest supported metadata version (as a MetadataVersion object).
If the parsed metadata is higher, it will not be possible to write back
the metadata. If the parsed metadata version is not compatible (different
major version), then parsing will raise an UnsupportedMetadata exception as
well.
"""
@staticmethod
def _may_be_in_blacklist(path):
return (
may_be_in_platform_blacklist(path)
or may_be_in_pkg_info_blacklist(path)
or may_be_in_python_tag_blacklist(path)
)
@classmethod
def from_egg(cls, path_or_file, strict=True):
""" Create a EggMetadata instance from an existing Enthought egg.
Parameters
----------
path: str or file-like object.
If a string, understood as the path to the egg. Otherwise,
understood as a zipfile-like object.
strict: bool
If True, will fail if metadata cannot be decoded correctly (e.g.
unicode errors in EGG-INFO/PKG-INFO). If false, will ignore those
errors, at the risk of data loss.
"""
sha256 = None
if isinstance(path_or_file, string_types):
if cls._may_be_in_blacklist(path_or_file):
sha256 = compute_sha256(path_or_file)
else:
with _keep_position(path_or_file.fp):
sha256 = compute_sha256(path_or_file.fp)
return cls._from_egg(path_or_file, sha256, strict)
@classmethod
def from_json_dict(cls, json_dict, pkg_info):
version = EnpkgVersion.from_string(json_dict[_JSON_VERSION])
if json_dict[_JSON_PYTHON_TAG] is not None:
python = PythonImplementation.from_string(json_dict[_JSON_PYTHON_TAG])
else:
python = None
if json_dict[_JSON_EPD_PLATFORM] is None:
epd_platform = None
else:
epd_platform = EPDPlatform.from_epd_string(json_dict[_JSON_EPD_PLATFORM])
dependencies = Dependencies(tuple(json_dict[_JSON_RUNTIME_DEPENDENCIES]))
metadata_version = MetadataVersion.from_string(
json_dict[_JSON_METADATA_VERSION]
)
return cls(
json_dict[_JSON__RAW_NAME], version, epd_platform, python,
json_dict[_JSON_ABI_TAG], json_dict[_JSON_PLATFORM_ABI_TAG],
dependencies, pkg_info, json_dict[_JSON_SUMMARY],
metadata_version=metadata_version
)
@classmethod
def _from_egg(cls, path_or_file, sha256, strict=True):
def _read_summary(fp):
summary_arcname = "EGG-INFO/spec/summary"
try:
summary = fp.read(summary_arcname)
except KeyError:
# the summary file may not exist for eggs built with
# endist/repack
summary = b""
return summary.decode("utf8")
def _compute_all_metadata(fp):
summary = _read_summary(fp)
pkg_info_data = _read_pkg_info(fp)
if pkg_info_data is None:
pkg_info_string = None
else:
pkg_info_string = _convert_if_needed(
pkg_info_data, sha256, strict
)
spec_depend = LegacySpecDepend._from_egg(fp, sha256)
return summary, pkg_info_string, spec_depend
if isinstance(path_or_file, string_types):
with zipfile2.ZipFile(path_or_file) as zp:
summary, pkg_info_string, spec_depend = _compute_all_metadata(zp)
else:
summary, pkg_info_string, spec_depend = _compute_all_metadata(
path_or_file
)
return cls._from_spec_depend(spec_depend, pkg_info_string, summary)
@classmethod
def _from_spec_depend(cls, spec_depend, pkg_info, summary,
metadata_version=None):
raw_name = spec_depend.name
version = EnpkgVersion.from_upstream_and_build(spec_depend.version,
spec_depend.build)
python_tag = spec_depend.python_tag
abi_tag = spec_depend.abi_tag
platform_abi = spec_depend.platform_abi
if spec_depend._epd_legacy_platform is None:
platform = None
else:
platform = spec_depend._epd_legacy_platform._epd_platform
dependencies = Dependencies(
tuple(dep for dep in spec_depend.packages)
)
metadata_version = metadata_version or spec_depend.metadata_version
return cls(raw_name, version, platform, python_tag, abi_tag,
platform_abi, dependencies, pkg_info, summary,
metadata_version)
@classmethod
def from_egg_metadata(cls, egg_metadata, **kw):
""" Utility ctor to create a new EggMetadata instance from an existing
one, potentially updating some metadata.
Any keyword argument (except `egg_metadata`) is understood as an
argument to EggMetadata.__init__.
Parameters
----------
egg_metadata: EggMetadata
"""
passed_kw = {"raw_name": egg_metadata._raw_name}
for k in (
"version", "platform", "python", "abi_tag", "pkg_info", "summary",
"metadata_version", "platform_abi",
):
passed_kw[k] = getattr(egg_metadata, k)
passed_kw["dependencies"] = Dependencies(
egg_metadata.runtime_dependencies
)
passed_kw.update(**kw)
return cls(**passed_kw)
def __init__(self, raw_name, version, platform, python, abi_tag,
platform_abi, dependencies, pkg_info, summary,
metadata_version=None):
""" EggMetadata instances encompass Enthought egg metadata.
Note: the constructor is considered private, please use one of the
from_* class methods.
Parameters
----------
raw_name: str
The 'raw' name, i.e. the name value in spec/depend.
version: EnpkgVersion
The full version
platform: EPDPlatform
An EPDPlatform instance, or None for cross-platform eggs
python: Python
The python implementation
abi_tag: str
The ABI tag, e.g. 'cp27m'. May be None.
platform_abi: str
The platform abi, e.g. 'msvc2008', 'gnu', etc. May be None.
dependencies: Dependencies
A Dependencies instance.
pkg_info: PackageInfo or str or None
Instance modeling the PKG-INFO content of the egg. If a string is
passed, it is assumed to be the PKG-INFO content, and is lazily
parsed into a PackageInfo when pkg_info is accessed for the first
time.
summary: str
The summary. Models the string in EGG-INFO/spec/summary. May
be empty.
"""
self._raw_name = raw_name
self.version = version
""" The version, as an EnpkgVersion instance."""
self.platform = platform
""" The platform, as a Platform instance."""
if isinstance(python, string_types):
python = PythonImplementation.from_string(python)
self.python = python
""" The python implementation."""
if abi_tag is not None and isinstance(abi_tag, six.string_types):
abi_tag = PythonABI(abi_tag)
self.abi = abi_tag
""" The ABI tag, following the PEP425 format, except that no ABI
is sorted as None."""
if (
platform_abi is not None
and isinstance(platform_abi, six.string_types)):
platform_abi = PlatformABI(platform_abi)
self.platform_abi = platform_abi
self.runtime_dependencies = tuple(dependencies.runtime)
""" List of runtime dependencies (as strings)."""
self.metadata_version = metadata_version or _METADATA_DEFAULT_VERSION
""" The version format of the underlying metadata."""
self._pkg_info = pkg_info
""" A PackageInfo instance modeling the underlying PKG-INFO. May
be None for eggs without an PKG-INFO file."""
self.summary = summary
""" The summary string."""
@property
def abi_tag(self):
if self.abi is None:
return None
else:
return self.abi.pep425_tag
@property
def abi_tag_string(self):
return PythonABI.pep425_tag_string(self.abi)
@property
def build(self):
""" The build number."""
return self.version.build
@property
def egg_basename(self):
""" The egg "base name", i.e. the name part of the egg filename."""
return self._raw_name
@property
def egg_name(self):
""" The egg filename."""
return self._spec_depend.egg_name
@property
def is_strictly_supported(self):
""" Returns True if the given metadata_version is fully supported.
A metadata_version is fully supported iff:
- metadata_version.major ==
EggMetadata.HIGHEST_SUPPORTED_METADATA_VERSION.major
- and metadata_version.minor <=
EggMetadata.HIGHEST_SUPPORTED_METADATA_VERSION.minor
"""
max_supported = EggMetadata.HIGHEST_SUPPORTED_METADATA_VERSION
return (
_are_compatible(self.metadata_version, max_supported)
and self.metadata_version.minor <= max_supported.minor
)
@property
def kind(self):
return "egg"
@property
def name(self):
""" The package name."""
return self._raw_name.lower().replace("-", "_")
@property
def pkg_info(self):
if isinstance(self._pkg_info, six.string_types):
self._pkg_info = PackageInfo.from_string(self._pkg_info)
return self._pkg_info
@property
def platform_abi_tag(self):
if self.platform_abi is None:
return None
else:
return self.platform_abi.pep425_tag
@property
def platform_abi_tag_string(self):
return PlatformABI.pep425_tag_string(self.platform_abi)
@property
def platform_tag(self):
""" Platform tag following PEP425, except that no platform is
represented as None and not 'any'."""
if self.platform is None:
return None
else:
return self.platform.pep425_tag
@property
def platform_tag_string(self):
return EPDPlatform.pep425_tag_string(self.platform)
@property
def python_tag(self):
if self.python is None:
return None
else:
return self.python.pep425_tag
@property
def python_tag_string(self):
return PythonImplementation.pep425_tag_string(self.python)
@property
def spec_depend_string(self):
return self._spec_depend.to_string()
@property
def upstream_version(self):
return six.text_type(self.version.upstream)
@property
def _python(self):
if self.python is None:
return None
else:
return u"{0}.{1}".format(self.python.major, self.python.minor)
@property
def _spec_depend(self):
if not self.is_strictly_supported:
msg = "Cannot write back metadata with unsupported version {0!r}"
raise UnsupportedMetadata(
self.metadata_version, msg.format(str(self.metadata_version))
)
if self.platform is None:
epd_platform = None
else:
legacy_epd_platform = LegacyEPDPlatform(self.platform)
epd_platform = legacy_epd_platform._epd_platform
args = {
"name": self._raw_name,
"version": self.upstream_version,
"build": self.build,
"python": self._python,
"python_tag": self.python_tag,
"abi_tag": self.abi_tag,
"platform_tag": self.platform_tag,
"platform_abi": self.platform_abi_tag,
"packages": [six.text_type(p) for p in self.runtime_dependencies],
"metadata_version": six.text_type(self.metadata_version),
}
return LegacySpecDepend._from_data(args, epd_platform)
# Public methods
def dump(self, path):
""" Write the metadata to the given path as a metadata egg.
A metadata egg is a zipfile using the same structured as an egg, except
that it only contains metadata.
Parameters
----------
path : str
The path to write the zipped metadata into.
"""
with zipfile2.ZipFile(path, "w", zipfile2.ZIP_DEFLATED) as zp:
zp.writestr(
_SPEC_DEPEND_LOCATION, self.spec_depend_string.encode()
)
zp.writestr(
_SPEC_SUMMARY_LOCATION, self.summary.encode()
)
if self.pkg_info:
self.pkg_info._dump_as_zip(zp)
def to_json_dict(self):
if self.platform is None:
epd_platform = None
else:
epd_platform = six.text_type(self.platform)
return {
_JSON_METADATA_VERSION: six.text_type(self.metadata_version),
_JSON__RAW_NAME: self._raw_name,
_JSON_VERSION: six.text_type(self.version),
_JSON_EPD_PLATFORM: epd_platform,
_JSON_PYTHON_TAG: self.python_tag,
_JSON_ABI_TAG: self.abi_tag,
_JSON_PLATFORM_TAG: self.platform_tag,
_JSON_PLATFORM_ABI_TAG: self.platform_abi_tag,
_JSON_RUNTIME_DEPENDENCIES: [
six.text_type(p) for p in self.runtime_dependencies
],
_JSON_SUMMARY: self.summary,
}
# Protocol implementations
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.spec_depend_string == other.spec_depend_string
and self.summary == other.summary
and self.pkg_info == other.pkg_info
)
else:
raise TypeError(
"Only equality between EggMetadata instances is supported"
)
def __ne__(self, other):
return not self == other | 0.540924 | 0.084909 |
import pandas as pd
from datetime import datetime
from datetime import timedelta
ESSENCE = ['EventID', 'TimeWritten']
class EventProc:
def __init__(self):
raw_data = pd.read_csv(r'static/logon_rhythm.csv', header=1, encoding='utf-8')
self.df = pd.DataFrame(raw_data, columns=ESSENCE)
self.df = self.df.replace(['오전', '오후'], ['AM', 'PM'], regex=True)
self.df['TimeWritten'] = pd.to_datetime(self.df['TimeWritten'], format='%Y-%m-%d %p %I:%M:%S')
# delta pin
self.pin = None
# { delta: ###, flag: active/deactive }
self.result = dict()
def today_rhythm(self):
"""
today rhythm - each dataframe row to donut chart
output: generator
"""
now = datetime.today()
now_date = now - datetime(now.year, now.month, now.day)
today = now - now_date
checkpoint = today
# select query pin
evt_today = self.df[['TimeWritten', 'EventID']]
evt_today = evt_today[evt_today['TimeWritten'] > today]
# loop reverse index in dataframe
for _, element in evt_today.iloc[::-1].iterrows():
# type: datetime.datetime
self.pin = element['TimeWritten']
# type: int
evt_id = element['EventID']
# Refresh
if evt_id in [7001, 506]:
self.result = self.calc_delta(self.pin, checkpoint, 'deactive')
if evt_id in [7002, 507]:
self.result = self.calc_delta(self.pin, checkpoint, 'active')
checkpoint = self.pin
yield self.result
# current time
yield self.calc_delta(now, checkpoint, 'active')
# remaining time
tommorrow = today + timedelta(days=1)
yield self.calc_delta(tommorrow, now, 'remain')
def calc_delta(self, after, before, flag):
"""
Calculation delta time
output: `self.result` dictionary
"""
delta = after - before
return {
'delta': round(delta.total_seconds() / 60, 0),
'flag': flag
} | evt_proc.py |
import pandas as pd
from datetime import datetime
from datetime import timedelta
ESSENCE = ['EventID', 'TimeWritten']
class EventProc:
def __init__(self):
raw_data = pd.read_csv(r'static/logon_rhythm.csv', header=1, encoding='utf-8')
self.df = pd.DataFrame(raw_data, columns=ESSENCE)
self.df = self.df.replace(['오전', '오후'], ['AM', 'PM'], regex=True)
self.df['TimeWritten'] = pd.to_datetime(self.df['TimeWritten'], format='%Y-%m-%d %p %I:%M:%S')
# delta pin
self.pin = None
# { delta: ###, flag: active/deactive }
self.result = dict()
def today_rhythm(self):
"""
today rhythm - each dataframe row to donut chart
output: generator
"""
now = datetime.today()
now_date = now - datetime(now.year, now.month, now.day)
today = now - now_date
checkpoint = today
# select query pin
evt_today = self.df[['TimeWritten', 'EventID']]
evt_today = evt_today[evt_today['TimeWritten'] > today]
# loop reverse index in dataframe
for _, element in evt_today.iloc[::-1].iterrows():
# type: datetime.datetime
self.pin = element['TimeWritten']
# type: int
evt_id = element['EventID']
# Refresh
if evt_id in [7001, 506]:
self.result = self.calc_delta(self.pin, checkpoint, 'deactive')
if evt_id in [7002, 507]:
self.result = self.calc_delta(self.pin, checkpoint, 'active')
checkpoint = self.pin
yield self.result
# current time
yield self.calc_delta(now, checkpoint, 'active')
# remaining time
tommorrow = today + timedelta(days=1)
yield self.calc_delta(tommorrow, now, 'remain')
def calc_delta(self, after, before, flag):
"""
Calculation delta time
output: `self.result` dictionary
"""
delta = after - before
return {
'delta': round(delta.total_seconds() / 60, 0),
'flag': flag
} | 0.441191 | 0.166472 |
from math import *
import Spheral
import mpi
#-------------------------------------------------------------------------------
# A class for tracking the history of a given set of nodes.
#-------------------------------------------------------------------------------
class NodeHistory:
def __init__(self,
nodeList,
nodeIndices,
sampleMethod,
filename,
header = None,
labels = None):
self.restart = Spheral.RestartableObject(self)
self.nodeList = nodeList
self.sampleMethod = sampleMethod
self.filename = filename
self.cycleHistory = []
self.timeHistory = []
self.sampleHistory = []
# Figure out the dimensionality.
FieldConstructor = None
if isinstance(nodeList, Spheral.NodeList1d):
FieldConstructor = Spheral.IntField1d
elif isinstance(nodeList, Spheral.NodeList2d):
FieldConstructor = Spheral.IntField2d
elif isinstance(nodeList, Spheral.NodeList3d):
FieldConstructor = Spheral.IntField3d
assert FieldConstructor is not None
# Store the set of nodes we're going to sample as a field of flags.
# This should automatically be safe as NodeLists/Fields get renumbered,
# redistributed, deleted, added, or what have you.
self.nodeFlags = FieldConstructor("flag nodes", nodeList, 0)
if nodeIndices is None:
nodeIndices = range(nodeList.numInternalNodes)
self.nodeIndices = nodeIndices
if isinstance(nodeIndices, list):
for i in nodeIndices:
assert i >= 0 and i < nodeList.numInternalNodes
self.nodeFlags[i] = 1
else:
self.currentNodeIndices()
# Open the history file.
self.file = None
if mpi.rank == 0:
self.file = open(self.filename, "w")
assert self.file is not None
# Write the optional header string.
if header:
self.file.write(header + "\n")
# Write the optional label line
if labels:
self.file.write(("# " + ((len(labels) + 2)*'"%20s" ') + "\n") % (("cycle", "time") + labels))
return
def currentNodeIndices(self):
if isinstance(self.nodeIndices, list):
return [i for i in range(self.nodeList.numInternalNodes)
if self.nodeFlags[i] == 1]
else:
result = self.nodeIndices(self.nodeList)
self.nodeFlags.Zero()
for i in result:
assert i >= 0 and i < self.nodeList.numInternalNodes
self.nodeFlags[i] = 1
return result
def sample(self, cycle, t, dt):
# Get the set of nodes.
nodeIndices = self.currentNodeIndices()
# Get the result of the sampling method.
result = self.sampleMethod(self.nodeList, nodeIndices)
# Update our history variables.
self.cycleHistory.append(cycle)
self.timeHistory.append(t)
self.sampleHistory.append(result)
# Update the history file.
if mpi.rank == 0:
assert self.file is not None
if isinstance(result, tuple):
samplestr = ""
for x in result:
samplestr += str(x) + " "
else:
samplestr = str(result)
self.file.write("%i \t %g \t %s\n" % (cycle, t, samplestr))
self.file.flush()
return
def flushHistory(self):
if mpi.rank == 0:
assert self.file is not None
n = len(self.cycleHistory)
assert len(self.timeHistory) == n
assert len(self.sampleHistory) == n
if mpi.rank == 0:
for i in xrange(n):
if isinstance(self.sampleHistory[i], tuple):
samplestr = ""
for x in self.sampleHistory[i]:
samplestr += str(x) + " "
else:
samplestr = str(self.sampleHistory[i])
self.file.write("%i \t %g \t %s\n" % (self.cycleHistory[i],
self.timeHistory[i],
samplestr))
self.file.flush()
return
def label(self):
return "NodeHistory"
def dumpState(self, file, path):
file.writeObject(self.filename, path + "/filename")
file.writeObject(self.cycleHistory, path + "/cycleHistory")
file.writeObject(self.timeHistory, path + "/timeHistory")
file.writeObject(self.sampleHistory, path + "/sampleHistory")
file.write(self.nodeFlags, path + "/nodeFlags")
return
def restoreState(self, file, path):
try:
self.filename = file.readObject(path + "/filename")
self.cycleHistory = file.readObject(path + "/cycleHistory")
self.timeHistory = file.readObject(path + "/timeHistory")
self.sampleHistory = file.readObject(path + "/sampleHistory")
file.read(self.nodeFlags, path + "/nodeFlags")
self.flushHistory()
except RuntimeError:
print "WARNING: unable to restore NodeHistory restart state"
return
def __call__(self, cycle, t, dt):
self.sample(cycle, t, dt)
return | src/SimulationControl/NodeHistory.py | from math import *
import Spheral
import mpi
#-------------------------------------------------------------------------------
# A class for tracking the history of a given set of nodes.
#-------------------------------------------------------------------------------
class NodeHistory:
def __init__(self,
nodeList,
nodeIndices,
sampleMethod,
filename,
header = None,
labels = None):
self.restart = Spheral.RestartableObject(self)
self.nodeList = nodeList
self.sampleMethod = sampleMethod
self.filename = filename
self.cycleHistory = []
self.timeHistory = []
self.sampleHistory = []
# Figure out the dimensionality.
FieldConstructor = None
if isinstance(nodeList, Spheral.NodeList1d):
FieldConstructor = Spheral.IntField1d
elif isinstance(nodeList, Spheral.NodeList2d):
FieldConstructor = Spheral.IntField2d
elif isinstance(nodeList, Spheral.NodeList3d):
FieldConstructor = Spheral.IntField3d
assert FieldConstructor is not None
# Store the set of nodes we're going to sample as a field of flags.
# This should automatically be safe as NodeLists/Fields get renumbered,
# redistributed, deleted, added, or what have you.
self.nodeFlags = FieldConstructor("flag nodes", nodeList, 0)
if nodeIndices is None:
nodeIndices = range(nodeList.numInternalNodes)
self.nodeIndices = nodeIndices
if isinstance(nodeIndices, list):
for i in nodeIndices:
assert i >= 0 and i < nodeList.numInternalNodes
self.nodeFlags[i] = 1
else:
self.currentNodeIndices()
# Open the history file.
self.file = None
if mpi.rank == 0:
self.file = open(self.filename, "w")
assert self.file is not None
# Write the optional header string.
if header:
self.file.write(header + "\n")
# Write the optional label line
if labels:
self.file.write(("# " + ((len(labels) + 2)*'"%20s" ') + "\n") % (("cycle", "time") + labels))
return
def currentNodeIndices(self):
if isinstance(self.nodeIndices, list):
return [i for i in range(self.nodeList.numInternalNodes)
if self.nodeFlags[i] == 1]
else:
result = self.nodeIndices(self.nodeList)
self.nodeFlags.Zero()
for i in result:
assert i >= 0 and i < self.nodeList.numInternalNodes
self.nodeFlags[i] = 1
return result
def sample(self, cycle, t, dt):
# Get the set of nodes.
nodeIndices = self.currentNodeIndices()
# Get the result of the sampling method.
result = self.sampleMethod(self.nodeList, nodeIndices)
# Update our history variables.
self.cycleHistory.append(cycle)
self.timeHistory.append(t)
self.sampleHistory.append(result)
# Update the history file.
if mpi.rank == 0:
assert self.file is not None
if isinstance(result, tuple):
samplestr = ""
for x in result:
samplestr += str(x) + " "
else:
samplestr = str(result)
self.file.write("%i \t %g \t %s\n" % (cycle, t, samplestr))
self.file.flush()
return
def flushHistory(self):
if mpi.rank == 0:
assert self.file is not None
n = len(self.cycleHistory)
assert len(self.timeHistory) == n
assert len(self.sampleHistory) == n
if mpi.rank == 0:
for i in xrange(n):
if isinstance(self.sampleHistory[i], tuple):
samplestr = ""
for x in self.sampleHistory[i]:
samplestr += str(x) + " "
else:
samplestr = str(self.sampleHistory[i])
self.file.write("%i \t %g \t %s\n" % (self.cycleHistory[i],
self.timeHistory[i],
samplestr))
self.file.flush()
return
def label(self):
return "NodeHistory"
def dumpState(self, file, path):
file.writeObject(self.filename, path + "/filename")
file.writeObject(self.cycleHistory, path + "/cycleHistory")
file.writeObject(self.timeHistory, path + "/timeHistory")
file.writeObject(self.sampleHistory, path + "/sampleHistory")
file.write(self.nodeFlags, path + "/nodeFlags")
return
def restoreState(self, file, path):
try:
self.filename = file.readObject(path + "/filename")
self.cycleHistory = file.readObject(path + "/cycleHistory")
self.timeHistory = file.readObject(path + "/timeHistory")
self.sampleHistory = file.readObject(path + "/sampleHistory")
file.read(self.nodeFlags, path + "/nodeFlags")
self.flushHistory()
except RuntimeError:
print "WARNING: unable to restore NodeHistory restart state"
return
def __call__(self, cycle, t, dt):
self.sample(cycle, t, dt)
return | 0.672547 | 0.314794 |
import copy
import operator
import warnings
import re
from sentinels import NOTHING
from six import (
iteritems,
itervalues,
string_types,
)
from .__version__ import __version__
try:
from bson import ObjectId
except ImportError:
from .object_id import ObjectId
__all__ = ['Connection', 'Database', 'Collection', 'ObjectId']
RE_TYPE = type(re.compile(''))
def _force_list(v):
return v if isinstance(v, (list, tuple)) else [v]
def _not_nothing_and(f):
"wrap an operator to return False if the first arg is NOTHING"
return lambda v, l: v is not NOTHING and f(v, l)
def _all_op(doc_val, search_val):
dv = _force_list(doc_val)
return all(x in dv for x in search_val)
def _print_deprecation_warning(old_param_name, new_param_name):
warnings.warn("'%s' has been deprecated to be in line with pymongo implementation, "
"a new parameter '%s' should be used instead. the old parameter will be kept for backward "
"compatibility purposes." % old_param_name, new_param_name, DeprecationWarning)
OPERATOR_MAP = {'$ne': operator.ne,
'$gt': _not_nothing_and(operator.gt),
'$gte': _not_nothing_and(operator.ge),
'$lt': _not_nothing_and(operator.lt),
'$lte': _not_nothing_and(operator.le),
'$all':_all_op,
'$in':lambda dv, sv: any(x in sv for x in _force_list(dv)),
'$nin':lambda dv, sv: all(x not in sv for x in _force_list(dv)),
'$exists':lambda dv, sv: bool(sv) == (dv is not NOTHING),
'$regex':lambda dv, sv: re.compile(sv).match(dv),
'$where':lambda db, sv: True # ignore this complex filter
}
def resolve_key_value(key, doc):
"""Resolve keys to their proper value in a document.
Returns the appropriate nested value if the key includes dot notation.
"""
if not doc or not isinstance(doc, dict):
return NOTHING
else:
key_parts = key.split('.')
if len(key_parts) == 1:
return doc.get(key, NOTHING)
else:
sub_key = '.'.join(key_parts[1:])
sub_doc = doc.get(key_parts[0], {})
return resolve_key_value(sub_key, sub_doc)
class Connection(object):
def __init__(self, host = None, port = None, max_pool_size = 10,
network_timeout = None, document_class = dict,
tz_aware = False, _connect = True, **kwargs):
super(Connection, self).__init__()
self._databases = {}
self._document_class = document_class
def __getitem__(self, db_name):
db = self._databases.get(db_name, None)
if db is None:
db = self._databases[db_name] = Database(self, db_name)
return db
def __getattr__(self, attr):
return self[attr]
def server_info(self):
return {
"version" : "2.0.6",
"sysInfo" : "Mock",
"versionArray" : [
2,
0,
6,
0
],
"bits" : 64,
"debug" : False,
"maxBsonObjectSize" : 16777216,
"ok" : 1
}
class Database(object):
def __init__(self, conn, name):
super(Database, self).__init__()
self._collections = {'system.indexes' : Collection(self, "system.indexes")}
self._connection = conn
self._name = name
def __getitem__(self, collection_name):
db = self._collections.get(collection_name, None)
if db is None:
db = self._collections[collection_name] = Collection(self, collection_name)
return db
def __getattr__(self, attr):
return self[attr]
def collection_names(self):
return list(self._collections.keys())
class Collection(object):
def __init__(self, db, name):
super(Collection, self).__init__()
self._documents = {}
self._database = db
self._name = name
@property
def full_name(self):
return "{}.{}".format(self._database._name, self._name)
def insert(self, data, safe = None, continue_on_error = None):
if isinstance(data, list):
return [self._insert(element) for element in data]
return self._insert(data)
def _insert(self, document):
data = self._copy_for_insert(document)
if not '_id' in data:
data['_id'] = ObjectId()
object_id = data['_id']
assert object_id not in self._documents
self._documents[object_id] = data
return object_id
def update(self, spec, document, upsert = False, manipulate = False,
safe = False, multi = False, _check_keys = False, **kwargs):
"""Updates document(s) in the collection."""
found = False
for existing_document in self._iter_documents(spec):
first = True
found = True
for k, v in iteritems(document):
if k == '$set':
existing_document.update(v)
elif k == '$unset':
for field, value in v.iteritems():
if value and existing_document.has_key(field):
del existing_document[field]
elif k == '$inc':
for field, value in iteritems(v):
new_value = existing_document.get(field, 0)
new_value = new_value + value
existing_document[field] = new_value
elif k == '$addToSet':
for field, value in iteritems(v):
container = existing_document.setdefault(field, [])
if value not in container:
container.append(value)
elif k == '$pull':
for field, value in iteritems(v):
arr = existing_document[field]
existing_document[field] = [obj for obj in arr if not obj == value]
elif k == '$pushAll':
for field, value in iteritems(v):
arr = existing_document.get(field)
if arr is None:
existing_document[field] = list(value)
else:
existing_document[field].append(list(value))
else:
if first:
# replace entire document
for key in document.keys():
if key.startswith('$'):
# can't mix modifiers with non-modifiers in update
raise ValueError('field names cannot start with $ [{}]'.format(k))
_id = spec.get('_id', existing_document.get('_id', None))
existing_document.clear()
if _id:
existing_document['_id'] = _id
existing_document.update(document)
if existing_document['_id'] != _id:
# id changed, fix index
del self._documents[_id]
self.insert(existing_document)
break
else:
# can't mix modifiers with non-modifiers in update
raise ValueError('Invalid modifier specified: {}'.format(k))
first = False
if not multi:
return
if not found and upsert:
if '$set' in document.keys():
new_document = document['$set']
new_document.update(spec)
self.insert(new_document)
def find(self, spec = None, fields = None, filter = None, sort = None, slow = None, read_preference = None, limit = None):
if filter is not None:
_print_deprecation_warning('filter', 'spec')
if spec is None:
spec = filter
dataset = (self._copy_only_fields(document, fields) for document in self._iter_documents(spec))
cursor = Cursor(dataset)
if sort is not None:
for key, value in dict(sort).items():
cursor.sort(key, value)
return cursor
def _copy_for_insert(self, doc):
doc_copy = dict()
for key, value in doc.items():
if type(value) is dict and "$exists" in value:
continue
doc_copy[key] = value
return doc_copy
def _copy_only_fields(self, doc, fields):
"""Copy only the specified fields."""
if fields is None:
fields = doc.keys()
doc_copy = self._database._connection._document_class()
if not fields:
fields = ["_id"]
for key in fields:
if key in doc:
doc_copy[key] = doc[key]
return doc_copy
def _iter_documents(self, filter = None):
return (document for document in itervalues(self._documents) if self._filter_applies(filter, document))
def find_one(self, spec=None, **kwargs):
try:
return next(self.find(spec, **kwargs))
except StopIteration:
return None
def find_and_modify(self, query = {}, update = None, upsert = False, **kwargs):
old = self.find_one(query)
if not old:
if upsert:
old = {'_id':self.insert(query)}
else:
return None
self.update({'_id':old['_id']}, update)
if kwargs.get('new', False):
return self.find_one({'_id':old['_id']})
return old
def _filter_applies(self, search_filter, document):
"""Returns a boolean indicating whether @search_filter applies
to @document.
"""
if search_filter is None:
return True
elif isinstance(search_filter, ObjectId):
search_filter = {'_id': search_filter}
elif "$query" in search_filter:
search_filter = search_filter['$query']
for key, search in iteritems(search_filter):
doc_val = resolve_key_value(key, document)
if isinstance(search, dict):
is_match = all(
operator_string in OPERATOR_MAP and OPERATOR_MAP[operator_string] (doc_val, search_val)
for operator_string, search_val in iteritems(search)
)
elif isinstance(search, RE_TYPE) and isinstance(doc_val, string_types):
is_match = search.match(doc_val) is not None
elif key in OPERATOR_MAP:
OPERATOR_MAP[key] (doc_val, search)
elif search is None:
is_match = doc_val is None or doc_val == NOTHING
elif type(doc_val) is list:
is_match = search in doc_val or search == doc_val
else:
is_match = doc_val == search
if not is_match:
return False
return True
def save(self, to_save, manipulate = True, safe = False, **kwargs):
if not isinstance(to_save, dict):
raise TypeError("cannot save object of type %s" % type(to_save))
if "_id" not in to_save:
return self.insert(to_save)
else:
self.update({"_id": to_save["_id"]}, to_save, True,
manipulate, safe, _check_keys = True, **kwargs)
return to_save.get("_id", None)
def remove(self, spec_or_id = None, search_filter = None):
"""Remove objects matching spec_or_id from the collection."""
if search_filter is not None:
_print_deprecation_warning('search_filter', 'spec_or_id')
if spec_or_id is None:
spec_or_id = search_filter if search_filter else {}
if not isinstance(spec_or_id, dict):
spec_or_id = {'_id': spec_or_id}
to_delete = list(self.find(spec = spec_or_id))
for doc in to_delete:
doc_id = doc['_id']
del self._documents[doc_id]
def count(self):
return len(self._documents)
class Cursor(object):
def __init__(self, dataset):
super(Cursor, self).__init__()
self._dataset = dataset
self._limit = None
self._skip = None
def __iter__(self):
return self
def __next__(self):
if self._skip:
for i in range(self._skip):
next(self._dataset)
self._skip = None
if self._limit is not None and self._limit <= 0:
raise StopIteration()
if self._limit is not None:
self._limit -= 1
return next(self._dataset)
next = __next__
def sort(self, key, order):
arr = [x for x in self._dataset]
arr = sorted(arr, key = lambda x:x[key], reverse = order < 0)
self._dataset = iter(arr)
return self
def count(self):
arr = [x for x in self._dataset]
count = len(arr)
self._dataset = iter(arr)
return count
def skip(self, count):
self._skip = count
return self
def limit(self, count):
self._limit = count
return self
def batch_size(self, count):
return self
def rewind(self):
pass | mongomock/__init__.py | import copy
import operator
import warnings
import re
from sentinels import NOTHING
from six import (
iteritems,
itervalues,
string_types,
)
from .__version__ import __version__
try:
from bson import ObjectId
except ImportError:
from .object_id import ObjectId
__all__ = ['Connection', 'Database', 'Collection', 'ObjectId']
RE_TYPE = type(re.compile(''))
def _force_list(v):
return v if isinstance(v, (list, tuple)) else [v]
def _not_nothing_and(f):
"wrap an operator to return False if the first arg is NOTHING"
return lambda v, l: v is not NOTHING and f(v, l)
def _all_op(doc_val, search_val):
dv = _force_list(doc_val)
return all(x in dv for x in search_val)
def _print_deprecation_warning(old_param_name, new_param_name):
warnings.warn("'%s' has been deprecated to be in line with pymongo implementation, "
"a new parameter '%s' should be used instead. the old parameter will be kept for backward "
"compatibility purposes." % old_param_name, new_param_name, DeprecationWarning)
OPERATOR_MAP = {'$ne': operator.ne,
'$gt': _not_nothing_and(operator.gt),
'$gte': _not_nothing_and(operator.ge),
'$lt': _not_nothing_and(operator.lt),
'$lte': _not_nothing_and(operator.le),
'$all':_all_op,
'$in':lambda dv, sv: any(x in sv for x in _force_list(dv)),
'$nin':lambda dv, sv: all(x not in sv for x in _force_list(dv)),
'$exists':lambda dv, sv: bool(sv) == (dv is not NOTHING),
'$regex':lambda dv, sv: re.compile(sv).match(dv),
'$where':lambda db, sv: True # ignore this complex filter
}
def resolve_key_value(key, doc):
"""Resolve keys to their proper value in a document.
Returns the appropriate nested value if the key includes dot notation.
"""
if not doc or not isinstance(doc, dict):
return NOTHING
else:
key_parts = key.split('.')
if len(key_parts) == 1:
return doc.get(key, NOTHING)
else:
sub_key = '.'.join(key_parts[1:])
sub_doc = doc.get(key_parts[0], {})
return resolve_key_value(sub_key, sub_doc)
class Connection(object):
def __init__(self, host = None, port = None, max_pool_size = 10,
network_timeout = None, document_class = dict,
tz_aware = False, _connect = True, **kwargs):
super(Connection, self).__init__()
self._databases = {}
self._document_class = document_class
def __getitem__(self, db_name):
db = self._databases.get(db_name, None)
if db is None:
db = self._databases[db_name] = Database(self, db_name)
return db
def __getattr__(self, attr):
return self[attr]
def server_info(self):
return {
"version" : "2.0.6",
"sysInfo" : "Mock",
"versionArray" : [
2,
0,
6,
0
],
"bits" : 64,
"debug" : False,
"maxBsonObjectSize" : 16777216,
"ok" : 1
}
class Database(object):
def __init__(self, conn, name):
super(Database, self).__init__()
self._collections = {'system.indexes' : Collection(self, "system.indexes")}
self._connection = conn
self._name = name
def __getitem__(self, collection_name):
db = self._collections.get(collection_name, None)
if db is None:
db = self._collections[collection_name] = Collection(self, collection_name)
return db
def __getattr__(self, attr):
return self[attr]
def collection_names(self):
return list(self._collections.keys())
class Collection(object):
def __init__(self, db, name):
super(Collection, self).__init__()
self._documents = {}
self._database = db
self._name = name
@property
def full_name(self):
return "{}.{}".format(self._database._name, self._name)
def insert(self, data, safe = None, continue_on_error = None):
if isinstance(data, list):
return [self._insert(element) for element in data]
return self._insert(data)
def _insert(self, document):
data = self._copy_for_insert(document)
if not '_id' in data:
data['_id'] = ObjectId()
object_id = data['_id']
assert object_id not in self._documents
self._documents[object_id] = data
return object_id
def update(self, spec, document, upsert = False, manipulate = False,
safe = False, multi = False, _check_keys = False, **kwargs):
"""Updates document(s) in the collection."""
found = False
for existing_document in self._iter_documents(spec):
first = True
found = True
for k, v in iteritems(document):
if k == '$set':
existing_document.update(v)
elif k == '$unset':
for field, value in v.iteritems():
if value and existing_document.has_key(field):
del existing_document[field]
elif k == '$inc':
for field, value in iteritems(v):
new_value = existing_document.get(field, 0)
new_value = new_value + value
existing_document[field] = new_value
elif k == '$addToSet':
for field, value in iteritems(v):
container = existing_document.setdefault(field, [])
if value not in container:
container.append(value)
elif k == '$pull':
for field, value in iteritems(v):
arr = existing_document[field]
existing_document[field] = [obj for obj in arr if not obj == value]
elif k == '$pushAll':
for field, value in iteritems(v):
arr = existing_document.get(field)
if arr is None:
existing_document[field] = list(value)
else:
existing_document[field].append(list(value))
else:
if first:
# replace entire document
for key in document.keys():
if key.startswith('$'):
# can't mix modifiers with non-modifiers in update
raise ValueError('field names cannot start with $ [{}]'.format(k))
_id = spec.get('_id', existing_document.get('_id', None))
existing_document.clear()
if _id:
existing_document['_id'] = _id
existing_document.update(document)
if existing_document['_id'] != _id:
# id changed, fix index
del self._documents[_id]
self.insert(existing_document)
break
else:
# can't mix modifiers with non-modifiers in update
raise ValueError('Invalid modifier specified: {}'.format(k))
first = False
if not multi:
return
if not found and upsert:
if '$set' in document.keys():
new_document = document['$set']
new_document.update(spec)
self.insert(new_document)
def find(self, spec = None, fields = None, filter = None, sort = None, slow = None, read_preference = None, limit = None):
if filter is not None:
_print_deprecation_warning('filter', 'spec')
if spec is None:
spec = filter
dataset = (self._copy_only_fields(document, fields) for document in self._iter_documents(spec))
cursor = Cursor(dataset)
if sort is not None:
for key, value in dict(sort).items():
cursor.sort(key, value)
return cursor
def _copy_for_insert(self, doc):
doc_copy = dict()
for key, value in doc.items():
if type(value) is dict and "$exists" in value:
continue
doc_copy[key] = value
return doc_copy
def _copy_only_fields(self, doc, fields):
"""Copy only the specified fields."""
if fields is None:
fields = doc.keys()
doc_copy = self._database._connection._document_class()
if not fields:
fields = ["_id"]
for key in fields:
if key in doc:
doc_copy[key] = doc[key]
return doc_copy
def _iter_documents(self, filter = None):
return (document for document in itervalues(self._documents) if self._filter_applies(filter, document))
def find_one(self, spec=None, **kwargs):
try:
return next(self.find(spec, **kwargs))
except StopIteration:
return None
def find_and_modify(self, query = {}, update = None, upsert = False, **kwargs):
old = self.find_one(query)
if not old:
if upsert:
old = {'_id':self.insert(query)}
else:
return None
self.update({'_id':old['_id']}, update)
if kwargs.get('new', False):
return self.find_one({'_id':old['_id']})
return old
def _filter_applies(self, search_filter, document):
"""Returns a boolean indicating whether @search_filter applies
to @document.
"""
if search_filter is None:
return True
elif isinstance(search_filter, ObjectId):
search_filter = {'_id': search_filter}
elif "$query" in search_filter:
search_filter = search_filter['$query']
for key, search in iteritems(search_filter):
doc_val = resolve_key_value(key, document)
if isinstance(search, dict):
is_match = all(
operator_string in OPERATOR_MAP and OPERATOR_MAP[operator_string] (doc_val, search_val)
for operator_string, search_val in iteritems(search)
)
elif isinstance(search, RE_TYPE) and isinstance(doc_val, string_types):
is_match = search.match(doc_val) is not None
elif key in OPERATOR_MAP:
OPERATOR_MAP[key] (doc_val, search)
elif search is None:
is_match = doc_val is None or doc_val == NOTHING
elif type(doc_val) is list:
is_match = search in doc_val or search == doc_val
else:
is_match = doc_val == search
if not is_match:
return False
return True
def save(self, to_save, manipulate = True, safe = False, **kwargs):
if not isinstance(to_save, dict):
raise TypeError("cannot save object of type %s" % type(to_save))
if "_id" not in to_save:
return self.insert(to_save)
else:
self.update({"_id": to_save["_id"]}, to_save, True,
manipulate, safe, _check_keys = True, **kwargs)
return to_save.get("_id", None)
def remove(self, spec_or_id = None, search_filter = None):
"""Remove objects matching spec_or_id from the collection."""
if search_filter is not None:
_print_deprecation_warning('search_filter', 'spec_or_id')
if spec_or_id is None:
spec_or_id = search_filter if search_filter else {}
if not isinstance(spec_or_id, dict):
spec_or_id = {'_id': spec_or_id}
to_delete = list(self.find(spec = spec_or_id))
for doc in to_delete:
doc_id = doc['_id']
del self._documents[doc_id]
def count(self):
return len(self._documents)
class Cursor(object):
def __init__(self, dataset):
super(Cursor, self).__init__()
self._dataset = dataset
self._limit = None
self._skip = None
def __iter__(self):
return self
def __next__(self):
if self._skip:
for i in range(self._skip):
next(self._dataset)
self._skip = None
if self._limit is not None and self._limit <= 0:
raise StopIteration()
if self._limit is not None:
self._limit -= 1
return next(self._dataset)
next = __next__
def sort(self, key, order):
arr = [x for x in self._dataset]
arr = sorted(arr, key = lambda x:x[key], reverse = order < 0)
self._dataset = iter(arr)
return self
def count(self):
arr = [x for x in self._dataset]
count = len(arr)
self._dataset = iter(arr)
return count
def skip(self, count):
self._skip = count
return self
def limit(self, count):
self._limit = count
return self
def batch_size(self, count):
return self
def rewind(self):
pass | 0.499756 | 0.118998 |
from datetime import timedelta
from traceback import format_exc
from mesh.exceptions import *
from scheme import current_timestamp
from spire.schema import *
from spire.support.logs import LogHelper
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm.collections import attribute_mapped_collection
from platoon.constants import *
from platoon.queue import ThreadPackage
from platoon.models.action import ProcessAction
from platoon.models.queue import Queue
from platoon.models.scheduledtask import ScheduledTask
from platoon.resources.process import InitiationResponse
log = LogHelper('platoon')
schema = Schema('platoon')
class Process(Model):
"""A process."""
class meta:
schema = schema
tablename = 'process'
id = Identifier()
queue_id = ForeignKey('queue.id', nullable=False)
executor_endpoint_id = ForeignKey('executor_endpoint.id')
tag = Text(nullable=False)
timeout = Integer()
status = Enumeration('pending initiating executing aborting aborted completed failed timedout',
nullable=False, default='pending')
input = Json()
output = Json()
progress = Json()
state = Json()
started = DateTime(timezone=True)
ended = DateTime(timezone=True)
communicated = DateTime(timezone=True)
executor_endpoint = relationship('ExecutorEndpoint',
backref=backref('processes', lazy='dynamic'))
queue = relationship(Queue, backref=backref('processes', lazy='dynamic'))
tasks = association_proxy('process_tasks', 'task',
creator=lambda k, v: ProcessTask(phase=k, task=v))
@property
def endpoint(self):
return self.executor_endpoint.endpoint
@property
def executor(self):
return self.executor_endpoint.executor
def abandon(self, session):
session.refresh(self, lockmode='update')
if self.status != 'executing':
return
self.verify(session, True)
if self.status != 'executing':
return
self.status = 'timedout'
self._schedule_task(session, 'report-timeout-to-executor', limit=10)
self._schedule_task(session, 'report-timeout-to-queue', limit=10)
def abort(self, session):
session.refresh(self, lockmode='update')
if self.status not in ('pending', 'executing'):
return
self.status = 'aborting'
self._schedule_task(session, 'report-abortion', limit=10)
def end(self, session, status='completed', output=None, bypass_checks=False):
if not bypass_checks:
session.refresh(self, lockmode='update')
if self.status not in ('aborting', 'executing', 'pending'):
return
self.ended = current_timestamp()
self.status = status
self.output = output
self._schedule_task(session, 'report-end', limit=10)
@classmethod
def create(cls, session, queue_id, **attrs):
try:
queue = Queue.load(session, id=queue_id)
except NoResultFound:
raise OperationError(token='invalid-queue-id')
process = cls(queue_id=queue_id, **attrs)
session.add(process)
process.executor_endpoint = queue.assign_executor_endpoint(session)
if not process.executor_endpoint:
raise OperationError(token='no-executor-available')
process._schedule_task(session, 'initiate-process')
return process
def initiate_process(self, session):
session.refresh(self, lockmode='update')
if self.status != 'pending':
return
self.started = current_timestamp()
payload = self._construct_payload(status='initiating', input=self.input)
try:
status, response = self.endpoint.request(payload)
if status != COMPLETED:
log('error', 'initiation of %s failed during initial request\n%s', repr(self), response)
return self.end(session, 'failed', bypass_checks=True)
except Exception, exception:
log('exception', 'initiation of %s failed during initial request', repr(self))
return self.end(session, 'failed', bypass_checks=True)
try:
response = InitiationResponse.process(response)
except Exception, exception:
log('exception', 'initiation of %s failed due to invalid response', repr(self))
return self.end(session, 'failed', bypass_checks=True)
self.status = response['status']
if self.status in ('completed', 'failed'):
return self.end(session, self.status, response.get('output'), True)
state = response.get('state')
if state:
self.state = state
@classmethod
def process_processes(cls, taskqueue, session):
occurrence = current_timestamp()
query = session.query(cls).filter(cls.timeout != None,
cls.started != None, cls.status == 'executing')
for process in query:
if (process.started + timedelta(minutes=process.timeout)) < occurrence:
log('info', 'abandoning %r due to timing out', process)
taskqueue.enqueue(process, 'abandon')
def report_abortion(self, session):
payload = self._construct_payload(status='aborting', for_executor=True)
return self.endpoint.request(payload)
def report_end(self, session):
payload = self._construct_payload(status=self.status, output=self.output)
return self.queue.endpoint.request(payload)
def report_progress(self, session):
payload = self._construct_payload(status='executing', progress=self.progress)
return self.queue.endpoint.request(payload)
def report_timeout_to_executor(self, session):
payload = self._construct_payload(status='timedout', for_executor=True)
return self.endpoint.request(payload)
def report_timeout_to_queue(self, session):
payload = self._construct_payload(status='timedout')
return self.queue.endpoint.request(payload)
def update(self, session, status=None, output=None, progress=None, state=None):
if status == 'aborting':
self.abort(session)
elif status in ('aborted', 'completed', 'failed'):
self.end(session, status, output)
elif progress:
self.progress = progress
if state:
self.state = state
self._schedule_task(session, 'report-progress', limit=3)
def verify(self, session, bypass_checks=False):
if not bypass_checks:
session.refresh(self, lockmode='update')
if self.status != 'executing':
return
payload = self._construct_payload(status='executing', for_executor=True)
try:
status, response = self.endpoint.request(payload)
if status != COMPLETED:
log('error', 'verification of %s failed during initial request\n%s', repr(self), response)
return self.end(session, 'failed', bypass_checks=True)
except Exception:
log('exception', 'verification of %s failed during initial request', repr(self))
return self.end(session, 'failed', bypass_checks=True)
self.communicated = current_timestamp()
try:
response = InitiationResponse.process(response)
except Exception:
log('exception', 'verification of %s failed due to invalid response', repr(self))
return self.end(session, 'failed', bypass_checks=True)
status = response['status']
if status in ('completed', 'failed'):
return self.end(session, status, response.get('output'), True)
state = response.get('state')
if state:
self.state = state
def _construct_payload(self, for_executor=False, **params):
params.update(id=self.id, tag=self.tag, subject=self.queue.subject)
if for_executor:
params['state'] = self.state
return params
def _schedule_task(self, session, action, delta=None, limit=0, timeout=120, backoff=1.4):
self.tasks[action] = ScheduledTask.create(session,
tag='%s:%s' % (action, self.tag),
action=ProcessAction(process_id=self.id, action=action),
delta=delta,
retry_limit=limit,
retry_timeout=timeout,
retry_backoff=backoff)
class ProcessTask(Model):
"""A process task."""
class meta:
constraints = [UniqueConstraint('process_id', 'task_id', 'phase')]
schema = schema
tablename = 'process_task'
id = Identifier()
process_id = ForeignKey('process.id', nullable=False, ondelete='CASCADE')
task_id = ForeignKey('scheduled_task.task_id', nullable=False, ondelete='CASCADE')
phase = Enumeration(PROCESS_TASK_ACTIONS, nullable=False)
process = relationship(Process, backref=backref('process_tasks',
collection_class=attribute_mapped_collection('phase'),
cascade='all,delete-orphan', passive_deletes=True))
task = relationship(ScheduledTask, cascade='all,delete-orphan', single_parent=True) | platoon/models/process.py | from datetime import timedelta
from traceback import format_exc
from mesh.exceptions import *
from scheme import current_timestamp
from spire.schema import *
from spire.support.logs import LogHelper
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm.collections import attribute_mapped_collection
from platoon.constants import *
from platoon.queue import ThreadPackage
from platoon.models.action import ProcessAction
from platoon.models.queue import Queue
from platoon.models.scheduledtask import ScheduledTask
from platoon.resources.process import InitiationResponse
log = LogHelper('platoon')
schema = Schema('platoon')
class Process(Model):
"""A process."""
class meta:
schema = schema
tablename = 'process'
id = Identifier()
queue_id = ForeignKey('queue.id', nullable=False)
executor_endpoint_id = ForeignKey('executor_endpoint.id')
tag = Text(nullable=False)
timeout = Integer()
status = Enumeration('pending initiating executing aborting aborted completed failed timedout',
nullable=False, default='pending')
input = Json()
output = Json()
progress = Json()
state = Json()
started = DateTime(timezone=True)
ended = DateTime(timezone=True)
communicated = DateTime(timezone=True)
executor_endpoint = relationship('ExecutorEndpoint',
backref=backref('processes', lazy='dynamic'))
queue = relationship(Queue, backref=backref('processes', lazy='dynamic'))
tasks = association_proxy('process_tasks', 'task',
creator=lambda k, v: ProcessTask(phase=k, task=v))
@property
def endpoint(self):
return self.executor_endpoint.endpoint
@property
def executor(self):
return self.executor_endpoint.executor
def abandon(self, session):
session.refresh(self, lockmode='update')
if self.status != 'executing':
return
self.verify(session, True)
if self.status != 'executing':
return
self.status = 'timedout'
self._schedule_task(session, 'report-timeout-to-executor', limit=10)
self._schedule_task(session, 'report-timeout-to-queue', limit=10)
def abort(self, session):
session.refresh(self, lockmode='update')
if self.status not in ('pending', 'executing'):
return
self.status = 'aborting'
self._schedule_task(session, 'report-abortion', limit=10)
def end(self, session, status='completed', output=None, bypass_checks=False):
if not bypass_checks:
session.refresh(self, lockmode='update')
if self.status not in ('aborting', 'executing', 'pending'):
return
self.ended = current_timestamp()
self.status = status
self.output = output
self._schedule_task(session, 'report-end', limit=10)
@classmethod
def create(cls, session, queue_id, **attrs):
try:
queue = Queue.load(session, id=queue_id)
except NoResultFound:
raise OperationError(token='invalid-queue-id')
process = cls(queue_id=queue_id, **attrs)
session.add(process)
process.executor_endpoint = queue.assign_executor_endpoint(session)
if not process.executor_endpoint:
raise OperationError(token='no-executor-available')
process._schedule_task(session, 'initiate-process')
return process
def initiate_process(self, session):
session.refresh(self, lockmode='update')
if self.status != 'pending':
return
self.started = current_timestamp()
payload = self._construct_payload(status='initiating', input=self.input)
try:
status, response = self.endpoint.request(payload)
if status != COMPLETED:
log('error', 'initiation of %s failed during initial request\n%s', repr(self), response)
return self.end(session, 'failed', bypass_checks=True)
except Exception, exception:
log('exception', 'initiation of %s failed during initial request', repr(self))
return self.end(session, 'failed', bypass_checks=True)
try:
response = InitiationResponse.process(response)
except Exception, exception:
log('exception', 'initiation of %s failed due to invalid response', repr(self))
return self.end(session, 'failed', bypass_checks=True)
self.status = response['status']
if self.status in ('completed', 'failed'):
return self.end(session, self.status, response.get('output'), True)
state = response.get('state')
if state:
self.state = state
@classmethod
def process_processes(cls, taskqueue, session):
occurrence = current_timestamp()
query = session.query(cls).filter(cls.timeout != None,
cls.started != None, cls.status == 'executing')
for process in query:
if (process.started + timedelta(minutes=process.timeout)) < occurrence:
log('info', 'abandoning %r due to timing out', process)
taskqueue.enqueue(process, 'abandon')
def report_abortion(self, session):
payload = self._construct_payload(status='aborting', for_executor=True)
return self.endpoint.request(payload)
def report_end(self, session):
payload = self._construct_payload(status=self.status, output=self.output)
return self.queue.endpoint.request(payload)
def report_progress(self, session):
payload = self._construct_payload(status='executing', progress=self.progress)
return self.queue.endpoint.request(payload)
def report_timeout_to_executor(self, session):
payload = self._construct_payload(status='timedout', for_executor=True)
return self.endpoint.request(payload)
def report_timeout_to_queue(self, session):
payload = self._construct_payload(status='timedout')
return self.queue.endpoint.request(payload)
def update(self, session, status=None, output=None, progress=None, state=None):
if status == 'aborting':
self.abort(session)
elif status in ('aborted', 'completed', 'failed'):
self.end(session, status, output)
elif progress:
self.progress = progress
if state:
self.state = state
self._schedule_task(session, 'report-progress', limit=3)
def verify(self, session, bypass_checks=False):
if not bypass_checks:
session.refresh(self, lockmode='update')
if self.status != 'executing':
return
payload = self._construct_payload(status='executing', for_executor=True)
try:
status, response = self.endpoint.request(payload)
if status != COMPLETED:
log('error', 'verification of %s failed during initial request\n%s', repr(self), response)
return self.end(session, 'failed', bypass_checks=True)
except Exception:
log('exception', 'verification of %s failed during initial request', repr(self))
return self.end(session, 'failed', bypass_checks=True)
self.communicated = current_timestamp()
try:
response = InitiationResponse.process(response)
except Exception:
log('exception', 'verification of %s failed due to invalid response', repr(self))
return self.end(session, 'failed', bypass_checks=True)
status = response['status']
if status in ('completed', 'failed'):
return self.end(session, status, response.get('output'), True)
state = response.get('state')
if state:
self.state = state
def _construct_payload(self, for_executor=False, **params):
params.update(id=self.id, tag=self.tag, subject=self.queue.subject)
if for_executor:
params['state'] = self.state
return params
def _schedule_task(self, session, action, delta=None, limit=0, timeout=120, backoff=1.4):
self.tasks[action] = ScheduledTask.create(session,
tag='%s:%s' % (action, self.tag),
action=ProcessAction(process_id=self.id, action=action),
delta=delta,
retry_limit=limit,
retry_timeout=timeout,
retry_backoff=backoff)
class ProcessTask(Model):
"""A process task."""
class meta:
constraints = [UniqueConstraint('process_id', 'task_id', 'phase')]
schema = schema
tablename = 'process_task'
id = Identifier()
process_id = ForeignKey('process.id', nullable=False, ondelete='CASCADE')
task_id = ForeignKey('scheduled_task.task_id', nullable=False, ondelete='CASCADE')
phase = Enumeration(PROCESS_TASK_ACTIONS, nullable=False)
process = relationship(Process, backref=backref('process_tasks',
collection_class=attribute_mapped_collection('phase'),
cascade='all,delete-orphan', passive_deletes=True))
task = relationship(ScheduledTask, cascade='all,delete-orphan', single_parent=True) | 0.701406 | 0.118436 |
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from data_poor_fl import hypcluster_train
def create_dataset():
# Create data satisfying y = x + 1
x = [[1.0], [2.0], [3.0]]
y = [[2.0], [3.0], [4.0]]
return tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
def get_input_spec():
return create_dataset().element_spec
def model_fn(initializer='zeros'):
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer=initializer,
bias_initializer=initializer,
input_shape=(1,))
])
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=get_input_spec(),
loss=tf.keras.losses.MeanSquaredError())
@tff.tf_computation
def create_nested_structure():
return [
dict(a=tf.zeros((2, 2), dtype=tf.int32), b=1, c=3),
dict(a=tf.ones((2, 2), dtype=tf.int32), b=2, c=4),
dict(a=2 * tf.ones((2, 2), dtype=tf.int32), b=3, c=5),
]
def create_initial_models(num_models: int):
model = model_fn(initializer='ones')
model_weights_tensors = tf.nest.map_structure(
lambda var: var.numpy(), tff.learning.ModelWeights.from_model(model))
return [model_weights_tensors for _ in range(num_models)]
class HypClusterTrainTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('clusters1_with_init', 1, create_initial_models(1)),
('clusters1_without_init', 1, None),
('clusters2_without_int', 2, None),
('clusters3_with_init', 3, create_initial_models(3)),
('clusters5_without_init', 5, None),
)
def test_constructs_with_default_aggregator(self, num_clusters,
initial_model_weights_list):
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
hyp_alg = hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=num_clusters,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer,
initial_model_weights_list=initial_model_weights_list)
state = hyp_alg.initialize()
self.assertLen(state.global_model_weights, num_clusters)
self.assertLen(state.aggregator, num_clusters)
self.assertLen(state.finalizer, num_clusters)
if initial_model_weights_list:
tf.nest.map_structure(self.assertAllEqual, state.global_model_weights,
initial_model_weights_list)
@parameterized.named_parameters(
('clusters1_with_init', 1, create_initial_models(1)),
('clusters1_without_init', 1, None),
('clusters2_without_int', 2, None),
('clusters3_with_init', 3, create_initial_models(3)),
('clusters5_without_init', 5, None),
)
def test_constructs_with_non_default_aggregator(self, num_clusters,
initial_model_weights_list):
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
hyp_alg = hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=num_clusters,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer,
model_aggregator=tff.learning.robust_aggregator(),
initial_model_weights_list=initial_model_weights_list)
state = hyp_alg.initialize()
self.assertLen(state.global_model_weights, num_clusters)
self.assertLen(state.aggregator, num_clusters)
self.assertLen(state.finalizer, num_clusters)
if initial_model_weights_list:
tf.nest.map_structure(self.assertAllEqual, state.global_model_weights,
initial_model_weights_list)
def test_construction_fails_with_mismatched_initial_models(self):
num_clusters = 1
initial_model_weights_list = create_initial_models(2)
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
with self.assertRaisesRegex(ValueError, 'does not equal'):
hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=num_clusters,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer,
initial_model_weights_list=initial_model_weights_list)
def test_matches_fed_avg_with_one_cluster(self):
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
fed_avg = tff.learning.algorithms.build_weighted_fed_avg(
model_fn=model_fn,
client_optimizer_fn=client_optimizer,
server_optimizer_fn=server_optimizer)
hyp_alg = hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=1,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer)
fed_avg_state = fed_avg.initialize()
hyp_alg_state = hyp_alg.initialize()
self.assertAllClose(
fed_avg.get_model_weights(fed_avg_state).trainable,
hyp_alg.get_model_weights(hyp_alg_state)[0].trainable)
federated_data = [create_dataset(), create_dataset()]
for _ in range(5):
fed_avg_output = fed_avg.next(fed_avg_state, federated_data)
fed_avg_state = fed_avg_output.state
hyp_alg_output = hyp_alg.next(hyp_alg_state, federated_data)
hyp_alg_state = hyp_alg_output.state
self.assertAllClose(
fed_avg.get_model_weights(fed_avg_state).trainable,
hyp_alg.get_model_weights(hyp_alg_state)[0].trainable)
if __name__ == '__main__':
tf.test.main() | data_poor_fl/hypcluster_train_test.py |
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from data_poor_fl import hypcluster_train
def create_dataset():
# Create data satisfying y = x + 1
x = [[1.0], [2.0], [3.0]]
y = [[2.0], [3.0], [4.0]]
return tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
def get_input_spec():
return create_dataset().element_spec
def model_fn(initializer='zeros'):
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer=initializer,
bias_initializer=initializer,
input_shape=(1,))
])
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=get_input_spec(),
loss=tf.keras.losses.MeanSquaredError())
@tff.tf_computation
def create_nested_structure():
return [
dict(a=tf.zeros((2, 2), dtype=tf.int32), b=1, c=3),
dict(a=tf.ones((2, 2), dtype=tf.int32), b=2, c=4),
dict(a=2 * tf.ones((2, 2), dtype=tf.int32), b=3, c=5),
]
def create_initial_models(num_models: int):
model = model_fn(initializer='ones')
model_weights_tensors = tf.nest.map_structure(
lambda var: var.numpy(), tff.learning.ModelWeights.from_model(model))
return [model_weights_tensors for _ in range(num_models)]
class HypClusterTrainTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('clusters1_with_init', 1, create_initial_models(1)),
('clusters1_without_init', 1, None),
('clusters2_without_int', 2, None),
('clusters3_with_init', 3, create_initial_models(3)),
('clusters5_without_init', 5, None),
)
def test_constructs_with_default_aggregator(self, num_clusters,
initial_model_weights_list):
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
hyp_alg = hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=num_clusters,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer,
initial_model_weights_list=initial_model_weights_list)
state = hyp_alg.initialize()
self.assertLen(state.global_model_weights, num_clusters)
self.assertLen(state.aggregator, num_clusters)
self.assertLen(state.finalizer, num_clusters)
if initial_model_weights_list:
tf.nest.map_structure(self.assertAllEqual, state.global_model_weights,
initial_model_weights_list)
@parameterized.named_parameters(
('clusters1_with_init', 1, create_initial_models(1)),
('clusters1_without_init', 1, None),
('clusters2_without_int', 2, None),
('clusters3_with_init', 3, create_initial_models(3)),
('clusters5_without_init', 5, None),
)
def test_constructs_with_non_default_aggregator(self, num_clusters,
initial_model_weights_list):
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
hyp_alg = hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=num_clusters,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer,
model_aggregator=tff.learning.robust_aggregator(),
initial_model_weights_list=initial_model_weights_list)
state = hyp_alg.initialize()
self.assertLen(state.global_model_weights, num_clusters)
self.assertLen(state.aggregator, num_clusters)
self.assertLen(state.finalizer, num_clusters)
if initial_model_weights_list:
tf.nest.map_structure(self.assertAllEqual, state.global_model_weights,
initial_model_weights_list)
def test_construction_fails_with_mismatched_initial_models(self):
num_clusters = 1
initial_model_weights_list = create_initial_models(2)
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
with self.assertRaisesRegex(ValueError, 'does not equal'):
hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=num_clusters,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer,
initial_model_weights_list=initial_model_weights_list)
def test_matches_fed_avg_with_one_cluster(self):
client_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=0.01)
server_optimizer = tff.learning.optimizers.build_sgdm(learning_rate=1.0)
fed_avg = tff.learning.algorithms.build_weighted_fed_avg(
model_fn=model_fn,
client_optimizer_fn=client_optimizer,
server_optimizer_fn=server_optimizer)
hyp_alg = hypcluster_train.build_hypcluster_train(
model_fn=model_fn,
num_clusters=1,
client_optimizer=client_optimizer,
server_optimizer=server_optimizer)
fed_avg_state = fed_avg.initialize()
hyp_alg_state = hyp_alg.initialize()
self.assertAllClose(
fed_avg.get_model_weights(fed_avg_state).trainable,
hyp_alg.get_model_weights(hyp_alg_state)[0].trainable)
federated_data = [create_dataset(), create_dataset()]
for _ in range(5):
fed_avg_output = fed_avg.next(fed_avg_state, federated_data)
fed_avg_state = fed_avg_output.state
hyp_alg_output = hyp_alg.next(hyp_alg_state, federated_data)
hyp_alg_state = hyp_alg_output.state
self.assertAllClose(
fed_avg.get_model_weights(fed_avg_state).trainable,
hyp_alg.get_model_weights(hyp_alg_state)[0].trainable)
if __name__ == '__main__':
tf.test.main() | 0.843734 | 0.543469 |
import taichi as ti
import bounds3
from bounds3 import Bounds3
import sphere
from sphere import Sphere
import triangle
from triangle import Triangle
import material
from utils import FMAX
from tqdm import tqdm
import time
BVHNode = ti.types.struct(
bounds=Bounds3,
left=ti.i32,
right=ti.i32,
obj_type=ti.i32,
obj_idx=ti.i32,
area=ti.f32
)
def init():
return BVHNode(bounds=bounds3.init(), left=-1, right=-1, obj_type=-1, obj_idx=-1, area=-1)
def get_obj_bounds_func(obj_type):
if obj_type == 0:
return sphere.get_bounds
elif obj_type == 1:
return triangle.get_bounds
else:
raise ValueError("Invalid object type {}".format(str(obj_type)))
def get_obj_area_func(obj_type):
if obj_type == 0:
return sphere.get_area
elif obj_type == 1:
return triangle.get_area
else:
raise ValueError("Invalid object type {}".format(str(obj_type)))
@ti.data_oriented
class BVHAccel:
def __init__(self, spheres_py, triangles_py, max_prims=1, stack_width=512, stack_height=512, stack_depth=20):
self.stack_depth = stack_depth
self.stack = ti.field(ti.i32, shape=(stack_width, stack_height, stack_depth))
self.stack_pt = ti.field(ti.i32, shape=(stack_width, stack_height))
self.max_prims = max_prims
self.nodes_py = list()
print("Building BVH...")
tic = time.time()
self.objects_py = spheres_py + triangles_py
self.obj_types = [0] * len(spheres_py) + [1] * len(triangles_py) # 0-sphere, 1-triangle
self.type_begin = [0, len(spheres_py)]
self.recursive_build_py(range(len(self.objects_py)))
toc = time.time()
print("Done, time elapsed: {:.3f}".format(toc - tic))
# move BVHNodePy to taichi scope
self.n_nodes = len(self.nodes_py)
print("Number of Spheres: ", len(spheres_py))
print("Number of Triangles: ", len(triangles_py))
print("Number of BVH Nodes: ", self.n_nodes)
print("Moving BVH to Taichi fields...")
self.nodes = BVHNode.field()
ti.root.dense(ti.i, self.n_nodes).place(self.nodes)
self.spheres = Sphere.field()
ti.root.dense(ti.i, max(len(spheres_py), 1)).place(self.spheres)
self.triangles = Triangle.field()
ti.root.dense(ti.i, max(len(triangles_py), 1)).place(self.triangles)
self.move_to_taichi_field()
del self.objects_py
del self.obj_types
del self.type_begin
del self.nodes_py
print("Done.")
def recursive_build_py(self, indices):
node = init()
node.idx = len(self.nodes_py)
self.nodes_py.append(node)
bounds = bounds3.init()
for i in indices:
bounds = bounds3.bounds_union(bounds, get_obj_bounds_func(self.obj_types[i])(self.objects_py[i]))
if len(indices) == 1:
node.bounds = bounds
node.obj_type = self.obj_types[indices[0]]
node.obj_idx = indices[0] - self.type_begin[node.obj_type]
node.area = get_obj_area_func(node.obj_type)(self.objects_py[indices[0]])
node.is_leaf = True
elif len(indices) == 2:
node.bounds = bounds
node.left = self.recursive_build_py([indices[0]])
node.right = self.recursive_build_py([indices[1]])
node.area = self.nodes_py[node.left].area + self.nodes_py[node.right].area
else:
centroid_bounds = bounds3.init()
for i in indices:
centroid_bounds = bounds3.bounds_vec_union(
centroid_bounds,
bounds3.get_centroid(get_obj_bounds_func(self.obj_types[i])(self.objects_py[i]))
)
dim = bounds3.max_extent(centroid_bounds)
indices = sorted(
indices,
key=lambda x: bounds3.get_centroid(get_obj_bounds_func(self.obj_types[x])(self.objects_py[x]))[dim]
)
mid = len(indices) // 2
node.left = self.recursive_build_py(indices[:mid])
node.right = self.recursive_build_py(indices[mid:])
node.bounds = bounds
node.area = self.nodes_py[node.left].area + self.nodes_py[node.right].area
return node.idx
def move_to_taichi_field(self):
for i in tqdm(range(len(self.objects_py))):
t = self.obj_types[i]
idx = i - self.type_begin[t]
if t == 0:
self.spheres[idx] = self.objects_py[i]
elif t == 1:
self.triangles[idx] = self.objects_py[i]
else:
raise ValueError("Invalid object type {}".format(str(t)))
for i in tqdm(range(self.n_nodes)):
self.nodes[i] = self.nodes_py[i]
@ti.func
def stack_push(self, i, j, node_idx):
self.stack_pt[i, j] += 1
self.stack[i, j, self.stack_pt[i, j]] = node_idx
@ti.func
def stack_pop(self, i, j):
res = self.stack[i, j, self.stack_pt[i, j]]
self.stack_pt[i, j] -= 1
return res
@ti.func
def stack_top(self, i, j):
return self.stack[i, j, self.stack_pt[i, j]]
@ti.func
def stack_empty(self, i, j):
return self.stack_pt[i, j] < 0
@ti.func
def node_intersetc_p(self, node, ray, t_min=1e-3, t_max=1e9):
is_hit = False
t = FMAX
coords = ti.Vector([0.0, 0.0, 0.0])
normal = ti.Vector([0.0, 0.0, 0.0])
face_front = True
u = 0.0
v = 0.0
m = material.init()
if node.obj_type == 0:
obj = self.spheres[node.obj_idx]
is_hit, t, coords, normal, face_front, m = sphere.intersect_p(obj, ray, t_min, t_max)
u, v = sphere.get_uv(obj, coords)
elif node.obj_type == 1:
obj = self.triangles[node.obj_idx]
is_hit, t, coords, normal, face_front, m = triangle.intersect_p(obj, ray, t_min, t_max)
u, v = triangle.get_uv(obj, coords)
return is_hit, t, coords, normal, face_front, u, v, m
@ti.func
def intersect_p(self, ray, si, sj, t_min=1e-3, t_max=1e9):
is_hit = False
t = FMAX
coords = ti.Vector([0.0, 0.0, 0.0])
u = 0.0
v = 0.0
normal = ti.Vector([0.0, 0.0, 0.0])
face_front = True
m = material.init()
self.stack_pt[si, sj] = -1
# iterative pre-order traversal
node_idx = 0
while node_idx >= 0 or not self.stack_empty(si, sj):
while node_idx >= 0:
node = self.nodes[node_idx]
if bounds3.bounds_intersect(node.bounds, ray):
if node.obj_type != -1:
is_hit_tmp, t_tmp, coords_tmp, normal_tmp, f_tmp, u_tmp, v_tmp, m_tmp = \
self.node_intersetc_p(node, ray, t_min, t)
if is_hit_tmp:
is_hit = True
t = t_tmp
coords = coords_tmp
normal = normal_tmp
face_front = f_tmp
u = u_tmp
v = v_tmp
m = m_tmp
break
else:
self.stack_push(si, sj, node_idx)
node_idx = node.left
else:
break
if not self.stack_empty(si, sj):
node_idx = self.stack_pop(si, sj)
node_idx = self.nodes[node_idx].right
else:
break
return is_hit, t, coords, u, v, normal, face_front, m | BVH.py | import taichi as ti
import bounds3
from bounds3 import Bounds3
import sphere
from sphere import Sphere
import triangle
from triangle import Triangle
import material
from utils import FMAX
from tqdm import tqdm
import time
BVHNode = ti.types.struct(
bounds=Bounds3,
left=ti.i32,
right=ti.i32,
obj_type=ti.i32,
obj_idx=ti.i32,
area=ti.f32
)
def init():
return BVHNode(bounds=bounds3.init(), left=-1, right=-1, obj_type=-1, obj_idx=-1, area=-1)
def get_obj_bounds_func(obj_type):
if obj_type == 0:
return sphere.get_bounds
elif obj_type == 1:
return triangle.get_bounds
else:
raise ValueError("Invalid object type {}".format(str(obj_type)))
def get_obj_area_func(obj_type):
if obj_type == 0:
return sphere.get_area
elif obj_type == 1:
return triangle.get_area
else:
raise ValueError("Invalid object type {}".format(str(obj_type)))
@ti.data_oriented
class BVHAccel:
def __init__(self, spheres_py, triangles_py, max_prims=1, stack_width=512, stack_height=512, stack_depth=20):
self.stack_depth = stack_depth
self.stack = ti.field(ti.i32, shape=(stack_width, stack_height, stack_depth))
self.stack_pt = ti.field(ti.i32, shape=(stack_width, stack_height))
self.max_prims = max_prims
self.nodes_py = list()
print("Building BVH...")
tic = time.time()
self.objects_py = spheres_py + triangles_py
self.obj_types = [0] * len(spheres_py) + [1] * len(triangles_py) # 0-sphere, 1-triangle
self.type_begin = [0, len(spheres_py)]
self.recursive_build_py(range(len(self.objects_py)))
toc = time.time()
print("Done, time elapsed: {:.3f}".format(toc - tic))
# move BVHNodePy to taichi scope
self.n_nodes = len(self.nodes_py)
print("Number of Spheres: ", len(spheres_py))
print("Number of Triangles: ", len(triangles_py))
print("Number of BVH Nodes: ", self.n_nodes)
print("Moving BVH to Taichi fields...")
self.nodes = BVHNode.field()
ti.root.dense(ti.i, self.n_nodes).place(self.nodes)
self.spheres = Sphere.field()
ti.root.dense(ti.i, max(len(spheres_py), 1)).place(self.spheres)
self.triangles = Triangle.field()
ti.root.dense(ti.i, max(len(triangles_py), 1)).place(self.triangles)
self.move_to_taichi_field()
del self.objects_py
del self.obj_types
del self.type_begin
del self.nodes_py
print("Done.")
def recursive_build_py(self, indices):
node = init()
node.idx = len(self.nodes_py)
self.nodes_py.append(node)
bounds = bounds3.init()
for i in indices:
bounds = bounds3.bounds_union(bounds, get_obj_bounds_func(self.obj_types[i])(self.objects_py[i]))
if len(indices) == 1:
node.bounds = bounds
node.obj_type = self.obj_types[indices[0]]
node.obj_idx = indices[0] - self.type_begin[node.obj_type]
node.area = get_obj_area_func(node.obj_type)(self.objects_py[indices[0]])
node.is_leaf = True
elif len(indices) == 2:
node.bounds = bounds
node.left = self.recursive_build_py([indices[0]])
node.right = self.recursive_build_py([indices[1]])
node.area = self.nodes_py[node.left].area + self.nodes_py[node.right].area
else:
centroid_bounds = bounds3.init()
for i in indices:
centroid_bounds = bounds3.bounds_vec_union(
centroid_bounds,
bounds3.get_centroid(get_obj_bounds_func(self.obj_types[i])(self.objects_py[i]))
)
dim = bounds3.max_extent(centroid_bounds)
indices = sorted(
indices,
key=lambda x: bounds3.get_centroid(get_obj_bounds_func(self.obj_types[x])(self.objects_py[x]))[dim]
)
mid = len(indices) // 2
node.left = self.recursive_build_py(indices[:mid])
node.right = self.recursive_build_py(indices[mid:])
node.bounds = bounds
node.area = self.nodes_py[node.left].area + self.nodes_py[node.right].area
return node.idx
def move_to_taichi_field(self):
for i in tqdm(range(len(self.objects_py))):
t = self.obj_types[i]
idx = i - self.type_begin[t]
if t == 0:
self.spheres[idx] = self.objects_py[i]
elif t == 1:
self.triangles[idx] = self.objects_py[i]
else:
raise ValueError("Invalid object type {}".format(str(t)))
for i in tqdm(range(self.n_nodes)):
self.nodes[i] = self.nodes_py[i]
@ti.func
def stack_push(self, i, j, node_idx):
self.stack_pt[i, j] += 1
self.stack[i, j, self.stack_pt[i, j]] = node_idx
@ti.func
def stack_pop(self, i, j):
res = self.stack[i, j, self.stack_pt[i, j]]
self.stack_pt[i, j] -= 1
return res
@ti.func
def stack_top(self, i, j):
return self.stack[i, j, self.stack_pt[i, j]]
@ti.func
def stack_empty(self, i, j):
return self.stack_pt[i, j] < 0
@ti.func
def node_intersetc_p(self, node, ray, t_min=1e-3, t_max=1e9):
is_hit = False
t = FMAX
coords = ti.Vector([0.0, 0.0, 0.0])
normal = ti.Vector([0.0, 0.0, 0.0])
face_front = True
u = 0.0
v = 0.0
m = material.init()
if node.obj_type == 0:
obj = self.spheres[node.obj_idx]
is_hit, t, coords, normal, face_front, m = sphere.intersect_p(obj, ray, t_min, t_max)
u, v = sphere.get_uv(obj, coords)
elif node.obj_type == 1:
obj = self.triangles[node.obj_idx]
is_hit, t, coords, normal, face_front, m = triangle.intersect_p(obj, ray, t_min, t_max)
u, v = triangle.get_uv(obj, coords)
return is_hit, t, coords, normal, face_front, u, v, m
@ti.func
def intersect_p(self, ray, si, sj, t_min=1e-3, t_max=1e9):
is_hit = False
t = FMAX
coords = ti.Vector([0.0, 0.0, 0.0])
u = 0.0
v = 0.0
normal = ti.Vector([0.0, 0.0, 0.0])
face_front = True
m = material.init()
self.stack_pt[si, sj] = -1
# iterative pre-order traversal
node_idx = 0
while node_idx >= 0 or not self.stack_empty(si, sj):
while node_idx >= 0:
node = self.nodes[node_idx]
if bounds3.bounds_intersect(node.bounds, ray):
if node.obj_type != -1:
is_hit_tmp, t_tmp, coords_tmp, normal_tmp, f_tmp, u_tmp, v_tmp, m_tmp = \
self.node_intersetc_p(node, ray, t_min, t)
if is_hit_tmp:
is_hit = True
t = t_tmp
coords = coords_tmp
normal = normal_tmp
face_front = f_tmp
u = u_tmp
v = v_tmp
m = m_tmp
break
else:
self.stack_push(si, sj, node_idx)
node_idx = node.left
else:
break
if not self.stack_empty(si, sj):
node_idx = self.stack_pop(si, sj)
node_idx = self.nodes[node_idx].right
else:
break
return is_hit, t, coords, u, v, normal, face_front, m | 0.612773 | 0.271057 |
from gui import Animation
from d_star_lite import DStarLite
from grid import OccupancyGridMap, SLAM
OBSTACLE = 255
UNOCCUPIED = 0
if __name__ == '__main__':
"""
set initial values for the map occupancy grid
|----------> y, column
| (x=0,y=2)
|
V (x=2, y=0)
x, row
"""
x_dim = 100
y_dim = 80
start = (10, 10)
goal = (40, 70)
view_range = 5
gui = Animation(title="D* Lite Path Planning",
width=10,
height=10,
margin=0,
x_dim=x_dim,
y_dim=y_dim,
start=start,
goal=goal,
viewing_range=view_range)
new_map = gui.world
old_map = new_map
new_position = start
last_position = start
# new_observation = None
# type = OBSTACLE
# D* Lite (optimized)
dstar = DStarLite(map=new_map,
s_start=start,
s_goal=goal)
# SLAM to detect vertices
slam = SLAM(map=new_map,
view_range=view_range)
# move and compute path
path, g, rhs = dstar.move_and_replan(robot_position=new_position)
while not gui.done:
# update the map
# print(path)
# drive gui
gui.run_game(path=path)
new_position = gui.current
new_observation = gui.observation
new_map = gui.world
"""
if new_observation is not None:
if new_observation["type"] == OBSTACLE:
dstar.global_map.set_obstacle(pos=new_observation["pos"])
if new_observation["pos"] == UNOCCUPIED:
dstar.global_map.remove_obstacle(pos=new_observation["pos"])
"""
if new_observation is not None:
old_map = new_map
slam.set_ground_truth_map(gt_map=new_map)
if new_position != last_position:
last_position = new_position
# slam
new_edges_and_old_costs, slam_map = slam.rescan(global_position=new_position)
dstar.new_edges_and_old_costs = new_edges_and_old_costs
dstar.sensed_map = slam_map
# d star
path, g, rhs = dstar.move_and_replan(robot_position=new_position) | python/python/main.py | from gui import Animation
from d_star_lite import DStarLite
from grid import OccupancyGridMap, SLAM
OBSTACLE = 255
UNOCCUPIED = 0
if __name__ == '__main__':
"""
set initial values for the map occupancy grid
|----------> y, column
| (x=0,y=2)
|
V (x=2, y=0)
x, row
"""
x_dim = 100
y_dim = 80
start = (10, 10)
goal = (40, 70)
view_range = 5
gui = Animation(title="D* Lite Path Planning",
width=10,
height=10,
margin=0,
x_dim=x_dim,
y_dim=y_dim,
start=start,
goal=goal,
viewing_range=view_range)
new_map = gui.world
old_map = new_map
new_position = start
last_position = start
# new_observation = None
# type = OBSTACLE
# D* Lite (optimized)
dstar = DStarLite(map=new_map,
s_start=start,
s_goal=goal)
# SLAM to detect vertices
slam = SLAM(map=new_map,
view_range=view_range)
# move and compute path
path, g, rhs = dstar.move_and_replan(robot_position=new_position)
while not gui.done:
# update the map
# print(path)
# drive gui
gui.run_game(path=path)
new_position = gui.current
new_observation = gui.observation
new_map = gui.world
"""
if new_observation is not None:
if new_observation["type"] == OBSTACLE:
dstar.global_map.set_obstacle(pos=new_observation["pos"])
if new_observation["pos"] == UNOCCUPIED:
dstar.global_map.remove_obstacle(pos=new_observation["pos"])
"""
if new_observation is not None:
old_map = new_map
slam.set_ground_truth_map(gt_map=new_map)
if new_position != last_position:
last_position = new_position
# slam
new_edges_and_old_costs, slam_map = slam.rescan(global_position=new_position)
dstar.new_edges_and_old_costs = new_edges_and_old_costs
dstar.sensed_map = slam_map
# d star
path, g, rhs = dstar.move_and_replan(robot_position=new_position) | 0.513912 | 0.226185 |
import os
from collections import OrderedDict, deque
from datetime import datetime, date
from enum import Enum, IntEnum
from scripts import SmartJson
class Test:
def __init__(self):
self.name = "test"
self.date = datetime.now()
self.list = ["is list item", 1, datetime.now()]
self.complex = complex(2, -3)
self.bytes = "Me".encode("utf-8")
self.dict = {'url': "https://pypi.org/project/smartjson/", 'version': "2.0.0", 'author': "K.J.O",
'date': date(2019, 10, 1)}
self.bool = True
self.float = 9500.50
self.int = 12
self.path = os.getcwd()
self.bytes = "pip install smartjson".encode("utf-8")
class MyObject:
def __init__(self):
self.object = Test()
self.date = datetime.now()
self.id = 1
self.lastId = None
self.set = (["1", 12, datetime.now()])
self.list = [datetime.now(), 1]
self.ordereddict = OrderedDict([
("b", OrderedDict([("b", 2), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])]))
])
self.deque = deque([
deque([1, 2]),
deque([3, 4]),
])
# self.data = data
data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1),
"datetime": datetime(2020, 1, 1, 18, 30, 0, 500),
"pull": Test(),
"set": (["1", 12, datetime.now()]),
"list": [datetime.now(), Test()],
"ordereddict": OrderedDict([
("b", OrderedDict([("b", Test()), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])])),
]),
"deque": deque([
deque([1, 2]),
deque([3, 4]),
]),
'complex': complex(42, 13)
}
class LoggerLevel(Enum):
CRITICAL = 'CRITICAL'
ERROR = 'ERROR'
WARNING = 'WARNING'
INFO = 'INFO'
DEBUG = 'DEBUG'
NOTSET = data
class Status(IntEnum):
success = 0
failure = 1
if __name__ == '__main__':
print("")
SmartJson(complex(1, 2)).serializeToJsonFile()
print(SmartJson(["LoggerLevel", 1, datetime.now()]).serialize())
# print(SmartJson(Test()).serialize())
"""
class Test:
def __init__(self):
self.test = "none"
self.id = 2
self.date = datetime.now()
self.tuple = [((1, 'a'), (2, 'b'))]
data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1),
"datetime": datetime(2020, 1, 1, 18, 30, 0, 500),
"pull": Test(),
"set": (["1", 12, datetime.now()]),
"list": [datetime.now(), Test()],
"ordereddict": OrderedDict([
("b", OrderedDict([("b", Test()), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])])),
]),
"deque": deque([
deque([1, 2]),
deque([3, 4]),
])
}
class Pull:
def __init__(self):
self.id = 2
self.title = "Iam pull"
self.author = "<NAME>."
self.subPull = Pull.SubPull()
self.data = data
self.date = datetime.now()
self.list = [1, datetime.now(), Pull.SubPull()]
class SubPull:
def __init__(self):
self.subId = 3
self.subTitle = "I am sub title"
self.subAuthor = "OKJ."
self.date = date(2010, 1, 1)
class Jobs:
def __init__(self):
self.name = 'John'
self.url = "5444"
self.id = 1
self.job = Jobs.Job()
self.data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1)
}
def name(self, set=None):
if set != None:
self.name = set
return self.name
class Job:
def __init__(self):
self.job_name = 'Test'
self.job_url = "_blank"
self.date = datetime.now().strftime('%m/%d/%Y')
self.date2 = datetime.now()
self.item = Jobs.Item()
self.pull = Pull()
class Item:
def __init__(self):
self.item_name = 'item 1'
self.item_boof = datetime.now()
self.mylist = [1, 2, 3]
self.another = Jobs.Item.Another()
class Another:
def __init__(self):
self.age = 26
self.precision = 99.56
self.ville = "Lille"
self.meteo = Jobs.Item.Another.Meteo()
class Meteo:
def __init__(self):
self.pluie = True
self.complex = complex(12, 78)
self.tuple = [((1, 'a'), (2, 'b'))]
self.none = None
""" | example.py | import os
from collections import OrderedDict, deque
from datetime import datetime, date
from enum import Enum, IntEnum
from scripts import SmartJson
class Test:
def __init__(self):
self.name = "test"
self.date = datetime.now()
self.list = ["is list item", 1, datetime.now()]
self.complex = complex(2, -3)
self.bytes = "Me".encode("utf-8")
self.dict = {'url': "https://pypi.org/project/smartjson/", 'version': "2.0.0", 'author': "K.J.O",
'date': date(2019, 10, 1)}
self.bool = True
self.float = 9500.50
self.int = 12
self.path = os.getcwd()
self.bytes = "pip install smartjson".encode("utf-8")
class MyObject:
def __init__(self):
self.object = Test()
self.date = datetime.now()
self.id = 1
self.lastId = None
self.set = (["1", 12, datetime.now()])
self.list = [datetime.now(), 1]
self.ordereddict = OrderedDict([
("b", OrderedDict([("b", 2), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])]))
])
self.deque = deque([
deque([1, 2]),
deque([3, 4]),
])
# self.data = data
data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1),
"datetime": datetime(2020, 1, 1, 18, 30, 0, 500),
"pull": Test(),
"set": (["1", 12, datetime.now()]),
"list": [datetime.now(), Test()],
"ordereddict": OrderedDict([
("b", OrderedDict([("b", Test()), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])])),
]),
"deque": deque([
deque([1, 2]),
deque([3, 4]),
]),
'complex': complex(42, 13)
}
class LoggerLevel(Enum):
CRITICAL = 'CRITICAL'
ERROR = 'ERROR'
WARNING = 'WARNING'
INFO = 'INFO'
DEBUG = 'DEBUG'
NOTSET = data
class Status(IntEnum):
success = 0
failure = 1
if __name__ == '__main__':
print("")
SmartJson(complex(1, 2)).serializeToJsonFile()
print(SmartJson(["LoggerLevel", 1, datetime.now()]).serialize())
# print(SmartJson(Test()).serialize())
"""
class Test:
def __init__(self):
self.test = "none"
self.id = 2
self.date = datetime.now()
self.tuple = [((1, 'a'), (2, 'b'))]
data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1),
"datetime": datetime(2020, 1, 1, 18, 30, 0, 500),
"pull": Test(),
"set": (["1", 12, datetime.now()]),
"list": [datetime.now(), Test()],
"ordereddict": OrderedDict([
("b", OrderedDict([("b", Test()), ("a", datetime.now())])),
("a", OrderedDict([("b", 1), ("a", [((1, 'a'), (datetime.now(), 'b'))])])),
]),
"deque": deque([
deque([1, 2]),
deque([3, 4]),
])
}
class Pull:
def __init__(self):
self.id = 2
self.title = "Iam pull"
self.author = "<NAME>."
self.subPull = Pull.SubPull()
self.data = data
self.date = datetime.now()
self.list = [1, datetime.now(), Pull.SubPull()]
class SubPull:
def __init__(self):
self.subId = 3
self.subTitle = "I am sub title"
self.subAuthor = "OKJ."
self.date = date(2010, 1, 1)
class Jobs:
def __init__(self):
self.name = 'John'
self.url = "5444"
self.id = 1
self.job = Jobs.Job()
self.data = {
"int": 1,
"str": "SmartJson",
"bytes": "pip install smartjson".encode("utf-8"),
"date": date(2010, 1, 1)
}
def name(self, set=None):
if set != None:
self.name = set
return self.name
class Job:
def __init__(self):
self.job_name = 'Test'
self.job_url = "_blank"
self.date = datetime.now().strftime('%m/%d/%Y')
self.date2 = datetime.now()
self.item = Jobs.Item()
self.pull = Pull()
class Item:
def __init__(self):
self.item_name = 'item 1'
self.item_boof = datetime.now()
self.mylist = [1, 2, 3]
self.another = Jobs.Item.Another()
class Another:
def __init__(self):
self.age = 26
self.precision = 99.56
self.ville = "Lille"
self.meteo = Jobs.Item.Another.Meteo()
class Meteo:
def __init__(self):
self.pluie = True
self.complex = complex(12, 78)
self.tuple = [((1, 'a'), (2, 'b'))]
self.none = None
""" | 0.436862 | 0.276639 |
import os
from urllib.request import urlretrieve, urlopen
import time
from datetime import datetime, timedelta
import pytz
import tqdm
# Import data science packages
import numpy as np
import pandas as pd
# Import reddit related packages
import praw
import pdb
import re
# Import Google NLP API
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.oauth2 import service_account
from Preprocessing.base_class import Preprocessor
from Preprocessing.helpers import date_to_iso8601, date_to_interval
# Define
class Reddit_Scanner(Preprocessor):
"""
For a given subreddit:
1. Collect all comments made in a time period
2. Send them to Google Natural Language Processing AP for entity-sentiment analysis
3. Summarise results in a dataframe. Preprocessor should account for both the level of sentiment append
the number of comments being made (magnitude/vocality)
"""
def __init__(self, interval, start_time, end_time):
"""
:interval: Interval in minutes
:start_time: How far back to collect data, as a datetime object
:end_time: Latest datapoint, as a datetime object
"""
self.interval = interval
self.start_time = start_time
self.end_time = end_time
def authenticate(self, client_ID, client_secret, include_sentiment_analysis=False):
"""
Authenticate with Reddit and Google Cloud
"""
self.reddit = praw.Reddit(user_agent='Comment Extraction (by /u/kibbl3)',
client_id=client_ID, client_secret=client_secret)
# Initialize Google NLP API credentials
self.include_sentiment_analysis = include_sentiment_analysis
if self.include_sentiment_analysis:
creds = service_account.Credentials.from_service_account_file(
'./Traderbot-5d2e0a1af0a9.json')
self.NLP_client = language.LanguageServiceClient(credentials=creds)
def get_training_data(self, topic):
"""
Call API to collect target dataset over the defined time period. Returns fully formatted data as a
dataframe and summarized into intervals.
:topic: Subreddit to collect data for
Note that training data here has not yet been split into data vs. targets
"""
start_timestamp = self.start_time.replace(tzinfo=pytz.utc).timestamp()
end_timestamp = self.end_time.replace(tzinfo=pytz.utc).timestamp()
raw_comments = self.get_raw_comments(topic)
raw_comments = self.scrub_reddit_comments(raw_comments, start_timestamp, end_timestamp)
# Calls GCloud NLP API for sentiment analysis
if self.include_sentiment_analysis:
for i, row in raw_comments.iterrows():
text = row['Comment_Text']
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
sentiment = self.NLP_client.analyze_sentiment(document=document)
row['Sentiment_Score'] = sentiment.document_sentiment.score
row['Sentiment_Magnitude'] = sentiment.document_sentiment.magnitude
# Convert dataframe from objects to float for numerical analysis
raw_comments[['Sentiment_Score','Sentiment_Magnitude', "ETH_Score", "ETH_Magnitude","BTC_Score", "BTC_Magnitude", "LTC_Score", "LTC_Magnitude"]] = raw_comments[['Sentiment_Score','Sentiment_Magnitude',"ETH_Score", "ETH_Magnitude", "BTC_Score", "BTC_Magnitude", "LTC_Score", "LTC_Magnitude"]].apply(pd.to_numeric)
print("just before groupby: /n", raw_comments.head())
# Group comments into periods to aggregate sentiment
reddit_sentiment = raw_comments.groupby('period')
reddit_sentiment = reddit_sentiment.agg({
'Comment_ID': 'count', # Gonna cheat and use this existing column as the count
'Sentiment_Score': np.mean,
'Sentiment_Magnitude': np.mean, # Average magnitude because don't want to skew to many low magnitude comments
'BTC_Score': np.mean,
'BTC_Magnitude': np.mean,
'ETH_Score': np.mean,
'ETH_Magnitude': np.mean,
'LTC_Score': np.mean,
'LTC_Magnitude': np.mean,
})
reddit_sentiment = reddit_sentiment.rename(columns={'Comment_ID': 'Volume'})
return reddit_sentiment
def get_test_data(self):
"""
Call API to collect data for 1 time period only starting from now. Returns fully formatted data in dataframe.
Note that this function will be significantly simpler than get_training_data since there is no need to loop through
multiple time periods and aggregate multiple API calls
"""
# Get all comments for rising or controversial posts
raise NotImplementedError("{} must override step()".format(self.__class__.__name__))
def get_raw_comments(self, subreddit):
"""
Creates and populates a dataframe of raw_comments for a given subreddit
"""
subreddit = self.reddit.subreddit(subreddit)
# Define the raw comment output DataFrame structure
columns = ["Post_ID", "Post_Date", "Post_Score",
"Comment_ID", "Comment_Text", "Comment_Date",
"Comment_Score", "Replying_to_ID",
"Sentiment_Score", "Sentiment_Magnitude",
"ETH_Score", "ETH_Magnitude",
"BTC_Score", "BTC_Magnitude",
"LTC_Score", "LTC_Magnitude"]
raw_comments = pd.DataFrame([], columns=columns, dtype=float)
all_posts = subreddit.hot(limit=5000)
counter = 0
for post in all_posts:
post.comments.replace_more(limit=None)
for comment in post.comments.list():
#print(datetime.fromtimestamp(comment.created_utc))
new_line = [[
post.id, post.created_utc, post.score,
comment.id, comment.body, comment.created_utc,
comment.score, comment.parent_id,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] # 0.0 placeholders until NLP results returned
raw_comments = raw_comments.append(pd.DataFrame(new_line, columns=columns),ignore_index=True)
counter += 1
print('post {}'.format(counter))
return raw_comments
def scrub_reddit_comments(self, raw_comments, start_timestamp, end_timestamp):
"""
Scrubs a reddit dataframe defined by get_raw_comments to only the time period, and also cleans up naming, time periods, etc
"""
# Scrub data to remove low value comments
#print("raw shape is ", raw_comments.shape)
raw_comments = raw_comments[raw_comments['Comment_Date'] > start_timestamp]
#print("shape after removing comments before start date is ", raw_comments.shape)
raw_comments = raw_comments[raw_comments['Comment_Date'] < end_timestamp]
#print("shape after removing comments after end date is ", raw_comments.shape)
discarded_comments = raw_comments[raw_comments['Comment_Text'].map(len) < 100]
raw_comments = raw_comments[raw_comments['Comment_Text'].map(len) >= 100]
#print("final shape is ", raw_comments.shape)
#print(discarded_comments[:50]['Comment_Text']) # Check what's being discarded_comments
# Add periods for later aggregation
raw_comments['datetime'] = pd.to_datetime(raw_comments['Comment_Date'], unit='s') # Reformat unix timestamp as datetime
raw_comments['period'] = raw_comments['datetime'].map(lambda x: date_to_interval(x, self.interval))
# Replace synonyms for ETH, BTC, and LTC. If multiple terms comes up (e.g. ETH and ether)
raw_comments = raw_comments.apply(lambda x: x.astype(str).str.lower())
raw_comments = raw_comments.replace("ethereum", "ETH")
raw_comments = raw_comments.replace("ethereum's", "ETH")
raw_comments = raw_comments.replace("eth's", "ETH")
raw_comments = raw_comments.replace("ether's", "ETH")
raw_comments = raw_comments.replace("ether", "ETH")
raw_comments = raw_comments.replace("ethers", "ETH")
raw_comments = raw_comments.replace("etherium", "ETH")
raw_comments = raw_comments.replace("eth/usd", "ETH")
raw_comments = raw_comments.replace("eth/eur", "ETH")
raw_comments = raw_comments.replace("eth/cny", "ETH")
raw_comments = raw_comments.replace("bitcoin", "BTC")
raw_comments = raw_comments.replace("bitcoin's", "BTC")
raw_comments = raw_comments.replace("btc's", "BTC")
raw_comments = raw_comments.replace("bitc", "BTC")
raw_comments = raw_comments.replace("bitcoins", "BTC")
raw_comments = raw_comments.replace("btc/usd", "BTC")
raw_comments = raw_comments.replace("btc/eur", "BTC")
raw_comments = raw_comments.replace("btc/cny", "BTC")
raw_comments = raw_comments.replace("litecoin", "LTC")
raw_comments = raw_comments.replace("litcoin", "LTC")
raw_comments = raw_comments.replace("litecoin's", "LTC")
raw_comments = raw_comments.replace("litcoin's", "LTC")
raw_comments = raw_comments.replace("ltc's", "LTC")
raw_comments = raw_comments.replace("ltc/usd", "LTC")
raw_comments = raw_comments.replace("ltc/eur", "LTC")
raw_comments = raw_comments.replace("ltc/cny", "LTC")
return raw_comments | Preprocessing/reddit.py | import os
from urllib.request import urlretrieve, urlopen
import time
from datetime import datetime, timedelta
import pytz
import tqdm
# Import data science packages
import numpy as np
import pandas as pd
# Import reddit related packages
import praw
import pdb
import re
# Import Google NLP API
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.oauth2 import service_account
from Preprocessing.base_class import Preprocessor
from Preprocessing.helpers import date_to_iso8601, date_to_interval
# Define
class Reddit_Scanner(Preprocessor):
"""
For a given subreddit:
1. Collect all comments made in a time period
2. Send them to Google Natural Language Processing AP for entity-sentiment analysis
3. Summarise results in a dataframe. Preprocessor should account for both the level of sentiment append
the number of comments being made (magnitude/vocality)
"""
def __init__(self, interval, start_time, end_time):
"""
:interval: Interval in minutes
:start_time: How far back to collect data, as a datetime object
:end_time: Latest datapoint, as a datetime object
"""
self.interval = interval
self.start_time = start_time
self.end_time = end_time
def authenticate(self, client_ID, client_secret, include_sentiment_analysis=False):
"""
Authenticate with Reddit and Google Cloud
"""
self.reddit = praw.Reddit(user_agent='Comment Extraction (by /u/kibbl3)',
client_id=client_ID, client_secret=client_secret)
# Initialize Google NLP API credentials
self.include_sentiment_analysis = include_sentiment_analysis
if self.include_sentiment_analysis:
creds = service_account.Credentials.from_service_account_file(
'./Traderbot-5d2e0a1af0a9.json')
self.NLP_client = language.LanguageServiceClient(credentials=creds)
def get_training_data(self, topic):
"""
Call API to collect target dataset over the defined time period. Returns fully formatted data as a
dataframe and summarized into intervals.
:topic: Subreddit to collect data for
Note that training data here has not yet been split into data vs. targets
"""
start_timestamp = self.start_time.replace(tzinfo=pytz.utc).timestamp()
end_timestamp = self.end_time.replace(tzinfo=pytz.utc).timestamp()
raw_comments = self.get_raw_comments(topic)
raw_comments = self.scrub_reddit_comments(raw_comments, start_timestamp, end_timestamp)
# Calls GCloud NLP API for sentiment analysis
if self.include_sentiment_analysis:
for i, row in raw_comments.iterrows():
text = row['Comment_Text']
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
sentiment = self.NLP_client.analyze_sentiment(document=document)
row['Sentiment_Score'] = sentiment.document_sentiment.score
row['Sentiment_Magnitude'] = sentiment.document_sentiment.magnitude
# Convert dataframe from objects to float for numerical analysis
raw_comments[['Sentiment_Score','Sentiment_Magnitude', "ETH_Score", "ETH_Magnitude","BTC_Score", "BTC_Magnitude", "LTC_Score", "LTC_Magnitude"]] = raw_comments[['Sentiment_Score','Sentiment_Magnitude',"ETH_Score", "ETH_Magnitude", "BTC_Score", "BTC_Magnitude", "LTC_Score", "LTC_Magnitude"]].apply(pd.to_numeric)
print("just before groupby: /n", raw_comments.head())
# Group comments into periods to aggregate sentiment
reddit_sentiment = raw_comments.groupby('period')
reddit_sentiment = reddit_sentiment.agg({
'Comment_ID': 'count', # Gonna cheat and use this existing column as the count
'Sentiment_Score': np.mean,
'Sentiment_Magnitude': np.mean, # Average magnitude because don't want to skew to many low magnitude comments
'BTC_Score': np.mean,
'BTC_Magnitude': np.mean,
'ETH_Score': np.mean,
'ETH_Magnitude': np.mean,
'LTC_Score': np.mean,
'LTC_Magnitude': np.mean,
})
reddit_sentiment = reddit_sentiment.rename(columns={'Comment_ID': 'Volume'})
return reddit_sentiment
def get_test_data(self):
"""
Call API to collect data for 1 time period only starting from now. Returns fully formatted data in dataframe.
Note that this function will be significantly simpler than get_training_data since there is no need to loop through
multiple time periods and aggregate multiple API calls
"""
# Get all comments for rising or controversial posts
raise NotImplementedError("{} must override step()".format(self.__class__.__name__))
def get_raw_comments(self, subreddit):
"""
Creates and populates a dataframe of raw_comments for a given subreddit
"""
subreddit = self.reddit.subreddit(subreddit)
# Define the raw comment output DataFrame structure
columns = ["Post_ID", "Post_Date", "Post_Score",
"Comment_ID", "Comment_Text", "Comment_Date",
"Comment_Score", "Replying_to_ID",
"Sentiment_Score", "Sentiment_Magnitude",
"ETH_Score", "ETH_Magnitude",
"BTC_Score", "BTC_Magnitude",
"LTC_Score", "LTC_Magnitude"]
raw_comments = pd.DataFrame([], columns=columns, dtype=float)
all_posts = subreddit.hot(limit=5000)
counter = 0
for post in all_posts:
post.comments.replace_more(limit=None)
for comment in post.comments.list():
#print(datetime.fromtimestamp(comment.created_utc))
new_line = [[
post.id, post.created_utc, post.score,
comment.id, comment.body, comment.created_utc,
comment.score, comment.parent_id,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] # 0.0 placeholders until NLP results returned
raw_comments = raw_comments.append(pd.DataFrame(new_line, columns=columns),ignore_index=True)
counter += 1
print('post {}'.format(counter))
return raw_comments
def scrub_reddit_comments(self, raw_comments, start_timestamp, end_timestamp):
"""
Scrubs a reddit dataframe defined by get_raw_comments to only the time period, and also cleans up naming, time periods, etc
"""
# Scrub data to remove low value comments
#print("raw shape is ", raw_comments.shape)
raw_comments = raw_comments[raw_comments['Comment_Date'] > start_timestamp]
#print("shape after removing comments before start date is ", raw_comments.shape)
raw_comments = raw_comments[raw_comments['Comment_Date'] < end_timestamp]
#print("shape after removing comments after end date is ", raw_comments.shape)
discarded_comments = raw_comments[raw_comments['Comment_Text'].map(len) < 100]
raw_comments = raw_comments[raw_comments['Comment_Text'].map(len) >= 100]
#print("final shape is ", raw_comments.shape)
#print(discarded_comments[:50]['Comment_Text']) # Check what's being discarded_comments
# Add periods for later aggregation
raw_comments['datetime'] = pd.to_datetime(raw_comments['Comment_Date'], unit='s') # Reformat unix timestamp as datetime
raw_comments['period'] = raw_comments['datetime'].map(lambda x: date_to_interval(x, self.interval))
# Replace synonyms for ETH, BTC, and LTC. If multiple terms comes up (e.g. ETH and ether)
raw_comments = raw_comments.apply(lambda x: x.astype(str).str.lower())
raw_comments = raw_comments.replace("ethereum", "ETH")
raw_comments = raw_comments.replace("ethereum's", "ETH")
raw_comments = raw_comments.replace("eth's", "ETH")
raw_comments = raw_comments.replace("ether's", "ETH")
raw_comments = raw_comments.replace("ether", "ETH")
raw_comments = raw_comments.replace("ethers", "ETH")
raw_comments = raw_comments.replace("etherium", "ETH")
raw_comments = raw_comments.replace("eth/usd", "ETH")
raw_comments = raw_comments.replace("eth/eur", "ETH")
raw_comments = raw_comments.replace("eth/cny", "ETH")
raw_comments = raw_comments.replace("bitcoin", "BTC")
raw_comments = raw_comments.replace("bitcoin's", "BTC")
raw_comments = raw_comments.replace("btc's", "BTC")
raw_comments = raw_comments.replace("bitc", "BTC")
raw_comments = raw_comments.replace("bitcoins", "BTC")
raw_comments = raw_comments.replace("btc/usd", "BTC")
raw_comments = raw_comments.replace("btc/eur", "BTC")
raw_comments = raw_comments.replace("btc/cny", "BTC")
raw_comments = raw_comments.replace("litecoin", "LTC")
raw_comments = raw_comments.replace("litcoin", "LTC")
raw_comments = raw_comments.replace("litecoin's", "LTC")
raw_comments = raw_comments.replace("litcoin's", "LTC")
raw_comments = raw_comments.replace("ltc's", "LTC")
raw_comments = raw_comments.replace("ltc/usd", "LTC")
raw_comments = raw_comments.replace("ltc/eur", "LTC")
raw_comments = raw_comments.replace("ltc/cny", "LTC")
return raw_comments | 0.426083 | 0.381076 |
import logging
import threading
import os
import json
from time import time, sleep
import cv2
from src.common.timestamp import get_timestamp
from src.shell.shell import Shell
from src.configuration.shell_configuration import LocalShellConfiguration
from src.data_chunks.data_chunk_data import DataChunkImage
from src.configuration.config_provider import ConfigProvider
class LocalShell(Shell):
"""
This class describes a periodic shell
"""
def __init__(self):
self.config = LocalShellConfiguration()
self.name = None
self.time_scheduler = None
self.callback = None
def import_configuration(self, config_provider: ConfigProvider):
self.config.read(config_provider)
self.name = config_provider.provide_name()
def apply_configuration(self):
os.makedirs(self.config.storage_path, exist_ok=True)
self.time_scheduler = TimeScheduler(self.config.time_interval, self.execution_step)
self.time_scheduler.start()
def attach_callback(self, callback):
self.callback = callback
def execution_step(self):
timestamp = get_timestamp()
data_chunks = self.callback()
for chunk in data_chunks:
for chunk_piece_data in chunk.data:
if isinstance(chunk_piece_data, DataChunkImage):
for chunk_piece_metadata in chunk.metadata:
if chunk_piece_metadata.name == 'timestamp':
timestamp = chunk_piece_metadata.value
break
image_file_name = str(timestamp)+'.png'
image_file_fullpath = os.path.join(self.config.storage_path, image_file_name)
cv2.imwrite(image_file_fullpath, chunk_piece_data.value)
break
metadata_file_dict = {}
for chunk in data_chunks:
metadata_file_dict[chunk.name] = chunk.as_dict()
for chunk_piece in chunk.data:
if isinstance(chunk_piece, DataChunkImage):
metadata_file_dict[chunk.name].pop('data')
metadata_file_name = str(timestamp) + '.json'
metadata_file_fullpath = os.path.join(self.config.storage_path, metadata_file_name)
with open(metadata_file_fullpath, 'w') as json_file:
json.dump(metadata_file_dict, json_file, indent=4)
class TimeScheduler:
def __init__(self, time_interval: float, executed_function):
self.time_interval = time_interval
self.executed_function = executed_function
self.thread = None
self.stop_flag = False
def start(self):
self.stop_flag = False
self.thread = threading.Thread(target=self.single_step, args=[])
self.thread.start()
def stop(self):
self.stop_flag = True
def single_step(self):
cycle_begin = time() - self.time_interval / 1000.0
while not self.stop_flag:
print('Loop step')
logging.info('Loop step')
cycle_begin = cycle_begin + self.time_interval / 1000.0
if cycle_begin + 0.010 < time():
logging.error('Capturing skipped (consider increasing interval)')
continue
self.executed_function()
debug_str = 'Execution duration %i ms' % int((time() - cycle_begin) * 1000)
logging.debug(debug_str)
cycle_duration = time() - cycle_begin
if cycle_duration > self.time_interval / 1000.0:
logging.warning('Capturing takes longer ' + str(cycle_duration) + ' than given time intervals')
else:
sleep(max(self.time_interval / 1000.0 - (time() - cycle_begin), 0)) | src/shell/local_shell.py | import logging
import threading
import os
import json
from time import time, sleep
import cv2
from src.common.timestamp import get_timestamp
from src.shell.shell import Shell
from src.configuration.shell_configuration import LocalShellConfiguration
from src.data_chunks.data_chunk_data import DataChunkImage
from src.configuration.config_provider import ConfigProvider
class LocalShell(Shell):
"""
This class describes a periodic shell
"""
def __init__(self):
self.config = LocalShellConfiguration()
self.name = None
self.time_scheduler = None
self.callback = None
def import_configuration(self, config_provider: ConfigProvider):
self.config.read(config_provider)
self.name = config_provider.provide_name()
def apply_configuration(self):
os.makedirs(self.config.storage_path, exist_ok=True)
self.time_scheduler = TimeScheduler(self.config.time_interval, self.execution_step)
self.time_scheduler.start()
def attach_callback(self, callback):
self.callback = callback
def execution_step(self):
timestamp = get_timestamp()
data_chunks = self.callback()
for chunk in data_chunks:
for chunk_piece_data in chunk.data:
if isinstance(chunk_piece_data, DataChunkImage):
for chunk_piece_metadata in chunk.metadata:
if chunk_piece_metadata.name == 'timestamp':
timestamp = chunk_piece_metadata.value
break
image_file_name = str(timestamp)+'.png'
image_file_fullpath = os.path.join(self.config.storage_path, image_file_name)
cv2.imwrite(image_file_fullpath, chunk_piece_data.value)
break
metadata_file_dict = {}
for chunk in data_chunks:
metadata_file_dict[chunk.name] = chunk.as_dict()
for chunk_piece in chunk.data:
if isinstance(chunk_piece, DataChunkImage):
metadata_file_dict[chunk.name].pop('data')
metadata_file_name = str(timestamp) + '.json'
metadata_file_fullpath = os.path.join(self.config.storage_path, metadata_file_name)
with open(metadata_file_fullpath, 'w') as json_file:
json.dump(metadata_file_dict, json_file, indent=4)
class TimeScheduler:
def __init__(self, time_interval: float, executed_function):
self.time_interval = time_interval
self.executed_function = executed_function
self.thread = None
self.stop_flag = False
def start(self):
self.stop_flag = False
self.thread = threading.Thread(target=self.single_step, args=[])
self.thread.start()
def stop(self):
self.stop_flag = True
def single_step(self):
cycle_begin = time() - self.time_interval / 1000.0
while not self.stop_flag:
print('Loop step')
logging.info('Loop step')
cycle_begin = cycle_begin + self.time_interval / 1000.0
if cycle_begin + 0.010 < time():
logging.error('Capturing skipped (consider increasing interval)')
continue
self.executed_function()
debug_str = 'Execution duration %i ms' % int((time() - cycle_begin) * 1000)
logging.debug(debug_str)
cycle_duration = time() - cycle_begin
if cycle_duration > self.time_interval / 1000.0:
logging.warning('Capturing takes longer ' + str(cycle_duration) + ' than given time intervals')
else:
sleep(max(self.time_interval / 1000.0 - (time() - cycle_begin), 0)) | 0.555676 | 0.116337 |
import datetime
from ruv_dl.data import Entry, EntrySet, Episode
def test_entry_from_dict():
data = {
'fn': 'some_fn',
'url': 'some_url',
'date': '2017/06/14',
'etag': 'some_etag',
'episode': {'id': 'some_episode'},
}
e = Entry.from_dict(data)
assert e.fn == data['fn']
assert e.url == data['url']
assert e.date == datetime.datetime(2017, 6, 14)
assert e.etag == data['etag']
assert e.episode.data == data['episode']
del data['episode']
e = Entry.from_dict(data)
assert e.episode.data == {}
def test_entry_to_dict():
e = Entry(
'some_fn',
'some_url',
datetime.datetime(2017, 6, 14),
'some_etag',
{'id': 'some_episode'},
)
assert e.to_dict() == {
'fn': 'some_fn',
'url': 'some_url',
'date': '2017/06/14',
'etag': 'some_etag',
'episode': {'id': 'some_episode'},
}
def test_entry_equality():
assert Entry('fdsa', 'fdsa', None, 'etag') == Entry(
'asdf', 'asdf', datetime.datetime.min, 'etag'
)
assert Entry('fdsa', 'fdsa', None, 'etag') == Entry(
'fdsa', 'fdsa', None, 'etag'
)
assert Entry('fdsa', 'fdsa', None, 'etag') != Entry(
'fdsa', 'fdsa', None, 'not-same-etag'
)
def test_choose_best_item():
s = EntrySet()
episode_real = {
'id': 'some_id_from_ruv_api',
'number': 1,
}
episode_generated = {
'number': 2,
}
item1 = Entry('fn1', 'url1', datetime.datetime.min, 'etag1', None)
item2 = Entry('fn2', 'url2', datetime.datetime.min, 'etag2', episode_real)
assert s._choose_best_item(item1, item2).episode.to_dict() == episode_real
assert s._choose_best_item(item2, item1).episode.to_dict() == episode_real
item1.episode = Episode(episode_generated)
assert s._choose_best_item(item1, item2).episode.to_dict() == episode_real
assert s._choose_best_item(item2, item1).episode.to_dict() == episode_real
item2.episode = None
assert (
s._choose_best_item(item1, item2).episode.to_dict()
== episode_generated
)
assert (
s._choose_best_item(item2, item1).episode.to_dict()
== episode_generated
) | tests/test_data.py | import datetime
from ruv_dl.data import Entry, EntrySet, Episode
def test_entry_from_dict():
data = {
'fn': 'some_fn',
'url': 'some_url',
'date': '2017/06/14',
'etag': 'some_etag',
'episode': {'id': 'some_episode'},
}
e = Entry.from_dict(data)
assert e.fn == data['fn']
assert e.url == data['url']
assert e.date == datetime.datetime(2017, 6, 14)
assert e.etag == data['etag']
assert e.episode.data == data['episode']
del data['episode']
e = Entry.from_dict(data)
assert e.episode.data == {}
def test_entry_to_dict():
e = Entry(
'some_fn',
'some_url',
datetime.datetime(2017, 6, 14),
'some_etag',
{'id': 'some_episode'},
)
assert e.to_dict() == {
'fn': 'some_fn',
'url': 'some_url',
'date': '2017/06/14',
'etag': 'some_etag',
'episode': {'id': 'some_episode'},
}
def test_entry_equality():
assert Entry('fdsa', 'fdsa', None, 'etag') == Entry(
'asdf', 'asdf', datetime.datetime.min, 'etag'
)
assert Entry('fdsa', 'fdsa', None, 'etag') == Entry(
'fdsa', 'fdsa', None, 'etag'
)
assert Entry('fdsa', 'fdsa', None, 'etag') != Entry(
'fdsa', 'fdsa', None, 'not-same-etag'
)
def test_choose_best_item():
s = EntrySet()
episode_real = {
'id': 'some_id_from_ruv_api',
'number': 1,
}
episode_generated = {
'number': 2,
}
item1 = Entry('fn1', 'url1', datetime.datetime.min, 'etag1', None)
item2 = Entry('fn2', 'url2', datetime.datetime.min, 'etag2', episode_real)
assert s._choose_best_item(item1, item2).episode.to_dict() == episode_real
assert s._choose_best_item(item2, item1).episode.to_dict() == episode_real
item1.episode = Episode(episode_generated)
assert s._choose_best_item(item1, item2).episode.to_dict() == episode_real
assert s._choose_best_item(item2, item1).episode.to_dict() == episode_real
item2.episode = None
assert (
s._choose_best_item(item1, item2).episode.to_dict()
== episode_generated
)
assert (
s._choose_best_item(item2, item1).episode.to_dict()
== episode_generated
) | 0.444324 | 0.354014 |
from usermgnt.data import data_adapter as data_adapter
from usermgnt.common import common as common
from usermgnt.common.logs import LOG
# Profile content:
# {
# "device_id": "device/11111111d",
# "service_consumer": boolean,
# "resource_contributor": boolean
# }
# get_user_profile_by_id: Get user profile by ID
def get_user_profile_by_id(profile_id):
LOG.debug("[usermgnt.modules.um_profiling] [get_user_profile_by_id] profile_id=" + profile_id)
user_profile = data_adapter.get_user_profile_by_id(profile_id)
if user_profile is None:
return common.gen_response(500, 'Error', 'profile_id', profile_id, 'profile', {})
elif user_profile == -1:
return common.gen_response_ko('Warning: User profile not found', 'profile_id', profile_id, 'profile', {})
else:
return common.gen_response_ok('User found', 'profile_id', profile_id, 'profile', user_profile)
# get_current_user_profile: Get current user profile
def get_current_user_profile():
LOG.info("[usermgnt.modules.um_profiling] [get_current_user_profile] Getting current user-profile value ...")
user_profile = data_adapter.get_current_user_profile()
if user_profile is None:
return common.gen_response(500, 'Error', 'user_profile', 'not found / error', 'profile', {})
elif user_profile == -1:
return common.gen_response_ko('Warning: User profile not found', 'user_profile', 'not found / error', 'profile', {})
else:
return common.gen_response_ok('User found', 'user_profile', user_profile)
# Initializes users profile
def create_user_profile(data):
LOG.info("[usermgnt.modules.um_profiling] [create_user_profile] data=" + str(data))
# check if profile exists
device_id = data['device_id']
user_profile = data_adapter.get_user_profile(device_id)
data_adapter.save_device_id(device_id)
if user_profile == -1 or user_profile is None:
# register user/profile
user_profile = data_adapter.register_user(data)
if user_profile is None:
return None
else:
return user_profile
else:
return user_profile
# update_user_profile: Updates users profile
def update_user_profile_by_id(profile_id, data):
LOG.debug("[usermgnt.modules.um_profiling] [update_user_profile_by_id] profile_id=" + profile_id + ", data=" + str(data))
# update user
user_profile = data_adapter.update_user_profile_by_id(profile_id, data)
if user_profile is None:
return common.gen_response(500, 'Error', 'profile_id', profile_id, 'profile', {})
elif user_profile == -1:
return common.gen_response_ko('Warning: User profile not found', 'profile_id', profile_id, 'profile', {})
else:
return common.gen_response_ok('User updated', 'profile_id', profile_id, 'profile', user_profile)
# delete_user_profile: Deletes users profile
def delete_user_profile_by_id(profile_id):
LOG.info("[usermgnt.modules.um_profiling] [delete_user_profile_by_id] profile_id=" + profile_id)
# delete profile
if data_adapter.delete_user_profile_by_id(profile_id) is None:
return common.gen_response(500, 'Error', 'profile_id', profile_id)
else:
return common.gen_response_ok('Profile deleted', 'profile_id', profile_id) | usermgnt/modules/um_profiling.py | from usermgnt.data import data_adapter as data_adapter
from usermgnt.common import common as common
from usermgnt.common.logs import LOG
# Profile content:
# {
# "device_id": "device/11111111d",
# "service_consumer": boolean,
# "resource_contributor": boolean
# }
# get_user_profile_by_id: Get user profile by ID
def get_user_profile_by_id(profile_id):
LOG.debug("[usermgnt.modules.um_profiling] [get_user_profile_by_id] profile_id=" + profile_id)
user_profile = data_adapter.get_user_profile_by_id(profile_id)
if user_profile is None:
return common.gen_response(500, 'Error', 'profile_id', profile_id, 'profile', {})
elif user_profile == -1:
return common.gen_response_ko('Warning: User profile not found', 'profile_id', profile_id, 'profile', {})
else:
return common.gen_response_ok('User found', 'profile_id', profile_id, 'profile', user_profile)
# get_current_user_profile: Get current user profile
def get_current_user_profile():
LOG.info("[usermgnt.modules.um_profiling] [get_current_user_profile] Getting current user-profile value ...")
user_profile = data_adapter.get_current_user_profile()
if user_profile is None:
return common.gen_response(500, 'Error', 'user_profile', 'not found / error', 'profile', {})
elif user_profile == -1:
return common.gen_response_ko('Warning: User profile not found', 'user_profile', 'not found / error', 'profile', {})
else:
return common.gen_response_ok('User found', 'user_profile', user_profile)
# Initializes users profile
def create_user_profile(data):
LOG.info("[usermgnt.modules.um_profiling] [create_user_profile] data=" + str(data))
# check if profile exists
device_id = data['device_id']
user_profile = data_adapter.get_user_profile(device_id)
data_adapter.save_device_id(device_id)
if user_profile == -1 or user_profile is None:
# register user/profile
user_profile = data_adapter.register_user(data)
if user_profile is None:
return None
else:
return user_profile
else:
return user_profile
# update_user_profile: Updates users profile
def update_user_profile_by_id(profile_id, data):
LOG.debug("[usermgnt.modules.um_profiling] [update_user_profile_by_id] profile_id=" + profile_id + ", data=" + str(data))
# update user
user_profile = data_adapter.update_user_profile_by_id(profile_id, data)
if user_profile is None:
return common.gen_response(500, 'Error', 'profile_id', profile_id, 'profile', {})
elif user_profile == -1:
return common.gen_response_ko('Warning: User profile not found', 'profile_id', profile_id, 'profile', {})
else:
return common.gen_response_ok('User updated', 'profile_id', profile_id, 'profile', user_profile)
# delete_user_profile: Deletes users profile
def delete_user_profile_by_id(profile_id):
LOG.info("[usermgnt.modules.um_profiling] [delete_user_profile_by_id] profile_id=" + profile_id)
# delete profile
if data_adapter.delete_user_profile_by_id(profile_id) is None:
return common.gen_response(500, 'Error', 'profile_id', profile_id)
else:
return common.gen_response_ok('Profile deleted', 'profile_id', profile_id) | 0.384334 | 0.179999 |
import numpy as np
import pandas as pd
from scipy.spatial import Voronoi, ConvexHull
import signature.calculations as calc
from functools import partial
class MixedCrystalSignature:
"""Class for calculation of the Mixed Crystal Signature
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
L_VEC = np.array([4, 5, 6],dtype=np.int32) #Choose which l to use for calculation of qlm
MAX_L = np.max(L_VEC)
def __init__(self, solid_thresh=0.55, pool=None):
"""solid_thresh is a threshold between 0 (very disordered) and 1 (very crystalline)
pool is a pool from the multiprocessing module.
If no pool is provided, the calculation will be single-core"""
self.solid_thresh = solid_thresh
self.inner_bool = None
self.indices = None
self.outsider_indices = None
self.insider_indices = None
self.voro = None
self.neighborlist = None
self.conv_hulls = None
self.voro_vols = None
self.qlm_arrays = None
self.signature = pd.DataFrame()
self.datapoints = None
self.p = None
if pool is not None:
self.p = pool
self.len_qlm=0
self.idx_qlm=dict()
for i,l in enumerate(self.L_VEC):
self.idx_qlm[l]=np.arange(self.len_qlm,self.len_qlm+2*l+1,dtype=np.int32)
self.len_qlm += (2*self.L_VEC[i]+1)
def set_datapoints(self,data):
"""provide datapoints for signature calculation"""
self.datapoints=data
self.inner_bool=np.ones(self.datapoints.shape[0],dtype=np.bool)
self.calc_inner_outer_indices()
def calc_voro(self):
"""calculate voronoi diagram of the datapoints"""
self.voro=Voronoi(self.datapoints)
def calc_neighborlist(self):
"""retrieve neighborlists from voronoi diagram"""
ridge_points=self.voro.ridge_points
self.neighborlist=[[] for _ in range(self.datapoints.shape[0])]
for j in range(len(ridge_points)):
if ridge_points[j,1] not in self.neighborlist[ridge_points[j,0]]:
self.neighborlist[ridge_points[j,0]].append(ridge_points[j,1])
if ridge_points[j,0] not in self.neighborlist[ridge_points[j,1]]:
self.neighborlist[ridge_points[j,1]].append(ridge_points[j,0])
def calc_inner_outer_indices(self):
"""calculate indices to choose inner volume datapoints or outer volume datapoints"""
self.indices = np.arange(0, self.datapoints.shape[0], dtype=np.int32)
self.outsider_indices = self.indices[np.invert(self.inner_bool)]
self.insider_indices = self.indices[self.inner_bool]
def set_inner_volume(self,volume):
"""define the inner volume
format of the volume:
volume=[[x_min,xmax],[y_min,y_max],[z_min,z_max]]"""
x_min, x_max = volume[0]
y_min, y_max = volume[1]
z_min, z_max = volume[2]
bool_matrix_min = self.datapoints >= [x_min,y_min,z_min]
bool_matrix_max = self.datapoints <= [x_max,y_max,z_max]
bool_matrix=np.logical_and(bool_matrix_min,bool_matrix_max)
self.inner_bool=np.all(bool_matrix,axis=1)
self.calc_inner_outer_indices()
def set_inner_bool_vec(self,bool_vec):
"""define inner volume with a customized array of booleans
length of bool_vec needs to be the number of rows in datapoints"""
self.inner_bool=bool_vec
self.calc_inner_outer_indices()
def calc_convex_hulls(self):
"""calculate the convex hulls for all datapoints"""
regions=self.voro.regions
point_region=self.voro.point_region
vertices=self.voro.vertices
voro_points_list=[vertices[regions[point_region[i]]] for i in self.indices]
if self.p is not None:
self.conv_hulls= self.p.map(partial(ConvexHull,qhull_options="QJ"),voro_points_list,chunksize=400)
else:
self.conv_hulls=[ConvexHull(voro_points_list[i],qhull_options="QJ") for i in self.indices]
def calc_voro_area_angles(self):
"""calculate voronoi facet areas and normal vectors"""
voro_area_angles=[]
for hull in self.conv_hulls:
voro_area_angle=calc.calc_voro_area_angle(hull.simplices.shape[0],
hull.equations[:,0:3],
hull.simplices,hull.points)
voro_area_angles.append(voro_area_angle)
return voro_area_angles
def calc_qlm_array(self):
"""calculate qlm from minkowski structure metric
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
self.calc_voro()
self.calc_neighborlist()
self.calc_convex_hulls()
self.voro_area_angles=self.calc_voro_area_angles()
self.total_areas=[hull.area for hull in self.conv_hulls]
self.voro_vols=[hull.volume for hull in self.conv_hulls]
len_array=0
for i in range(self.L_VEC.shape[0]):
len_array += (2*self.L_VEC[i]+1)
self.qlm_arrays=np.zeros((len(self.total_areas),len_array),dtype=np.complex128)
for i in range(len(self.total_areas)):
self.qlm_arrays[i,:]=calc.calc_msm_qlm(len_array,
self.L_VEC,
self.voro_area_angles[i][:,2],
self.voro_area_angles[i][:,1],
self.total_areas[i],
self.voro_area_angles[i][:,0])
def calc_struct_order(self):
"""calculate the structural order for every particle
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
si_l=6 #this should only make sense with l=6, so its hardcoded
self.solid_bool=np.zeros(self.datapoints.shape[0],dtype=np.bool)
self.struct_order=np.zeros(self.datapoints.shape[0],dtype=np.float64)
for i in self.insider_indices:
voro_neighbors = np.array(self.neighborlist[i],dtype=np.int64)
qlm_array_neighbors = self.qlm_arrays[voro_neighbors][:,self.idx_qlm[si_l]]
num_neighbors=len(self.neighborlist[i])
si=calc.calc_si(6,self.qlm_arrays[i,self.idx_qlm[si_l]],num_neighbors,qlm_array_neighbors)
self.solid_bool[i]=(si>=self.solid_thresh)
self.struct_order[i]=si
self.solid_indices=self.indices[np.logical_and(self.inner_bool,self.solid_bool)]
def calc_num_neigh(self):
"""calculate the number of neighbors for all solid particles"""
self.signature['N']=[len(self.neighborlist[i]) for i in self.solid_indices]
def calc_msm(self):
"""calculate ql from minkowski structure metric for all solid particles
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
ql_array=calc.calc_qls_from_qlm_arrays(self.L_VEC,self.qlm_arrays[self.solid_indices]).transpose()
for l in self.L_VEC:
self.signature['q{:d}'.format(l)]=ql_array[self.L_VEC==l][0]
wigner_arr,m_arr,count_arr=calc.calc_wigner3j_general(self.L_VEC)
wl_array=calc.calc_wls_from_qlm_arrays(self.L_VEC,self.qlm_arrays[self.solid_indices],wigner_arr,m_arr,count_arr).transpose()
for l in self.L_VEC:
if l%2==0: #odd number w_l are useless
self.signature['w{:d}'.format(l)]=wl_array[self.L_VEC==l][0]
def calc_bond_angles(self):
"""calculate bond angles for all solid particles
definition in: https://doi.org/10.1103/PhysRevB.73.054104"""
bond_angles=calc.calc_bond_angles(self.solid_indices,self.neighborlist,self.datapoints)
for dim in range(bond_angles.shape[1]):
self.signature['ba{:d}'.format(dim)]=bond_angles[:,dim]
def calc_hist_distances(self):
"""calculate histogram of normalized distances
Modified from https://doi.org/10.1103/PhysRevE.96.011301"""
hist_distances=calc.calc_hist_distances(self.solid_indices,self.neighborlist,self.datapoints,self.voro_vols)
for dim in range(hist_distances.shape[1]):
self.signature['dist{:d}'.format(dim)]=hist_distances[:,dim]
def calc_minkowski_eigvals(self):
"""calculate eigenvalues of rank 4 minkowski tensor for all solid particles
Description in https://doi.org/10.1103/PhysRevE.85.030301"""
eigenvals_arr=np.zeros((self.solid_indices.shape[0],6),dtype=np.float64)
for idx in range(self.solid_indices.shape[0]):
i=self.solid_indices[idx]
eigenvals_arr[idx]=calc.calc_minkowski_eigenvalues(self.total_areas[i],
self.voro_area_angles[i][:,0],
self.conv_hulls[i].equations[:,0:3])
for dim in range(eigenvals_arr.shape[1]):
self.signature['zeta{:d}'.format(dim)]=eigenvals_arr[:,dim]
def calc_signature(self):
"""Function to calculate the mixed crystal signature on the dataset
Description in https://doi.org/10.1103/PhysRevE.96.011301 (with minor modifications)"""
self.signature=pd.DataFrame()
self.calc_qlm_array()
self.calc_struct_order()
self.calc_num_neigh()
self.calc_bond_angles()
self.calc_msm()
self.calc_minkowski_eigvals()
self.calc_hist_distances() | mixedcrystalsignature.py | import numpy as np
import pandas as pd
from scipy.spatial import Voronoi, ConvexHull
import signature.calculations as calc
from functools import partial
class MixedCrystalSignature:
"""Class for calculation of the Mixed Crystal Signature
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
L_VEC = np.array([4, 5, 6],dtype=np.int32) #Choose which l to use for calculation of qlm
MAX_L = np.max(L_VEC)
def __init__(self, solid_thresh=0.55, pool=None):
"""solid_thresh is a threshold between 0 (very disordered) and 1 (very crystalline)
pool is a pool from the multiprocessing module.
If no pool is provided, the calculation will be single-core"""
self.solid_thresh = solid_thresh
self.inner_bool = None
self.indices = None
self.outsider_indices = None
self.insider_indices = None
self.voro = None
self.neighborlist = None
self.conv_hulls = None
self.voro_vols = None
self.qlm_arrays = None
self.signature = pd.DataFrame()
self.datapoints = None
self.p = None
if pool is not None:
self.p = pool
self.len_qlm=0
self.idx_qlm=dict()
for i,l in enumerate(self.L_VEC):
self.idx_qlm[l]=np.arange(self.len_qlm,self.len_qlm+2*l+1,dtype=np.int32)
self.len_qlm += (2*self.L_VEC[i]+1)
def set_datapoints(self,data):
"""provide datapoints for signature calculation"""
self.datapoints=data
self.inner_bool=np.ones(self.datapoints.shape[0],dtype=np.bool)
self.calc_inner_outer_indices()
def calc_voro(self):
"""calculate voronoi diagram of the datapoints"""
self.voro=Voronoi(self.datapoints)
def calc_neighborlist(self):
"""retrieve neighborlists from voronoi diagram"""
ridge_points=self.voro.ridge_points
self.neighborlist=[[] for _ in range(self.datapoints.shape[0])]
for j in range(len(ridge_points)):
if ridge_points[j,1] not in self.neighborlist[ridge_points[j,0]]:
self.neighborlist[ridge_points[j,0]].append(ridge_points[j,1])
if ridge_points[j,0] not in self.neighborlist[ridge_points[j,1]]:
self.neighborlist[ridge_points[j,1]].append(ridge_points[j,0])
def calc_inner_outer_indices(self):
"""calculate indices to choose inner volume datapoints or outer volume datapoints"""
self.indices = np.arange(0, self.datapoints.shape[0], dtype=np.int32)
self.outsider_indices = self.indices[np.invert(self.inner_bool)]
self.insider_indices = self.indices[self.inner_bool]
def set_inner_volume(self,volume):
"""define the inner volume
format of the volume:
volume=[[x_min,xmax],[y_min,y_max],[z_min,z_max]]"""
x_min, x_max = volume[0]
y_min, y_max = volume[1]
z_min, z_max = volume[2]
bool_matrix_min = self.datapoints >= [x_min,y_min,z_min]
bool_matrix_max = self.datapoints <= [x_max,y_max,z_max]
bool_matrix=np.logical_and(bool_matrix_min,bool_matrix_max)
self.inner_bool=np.all(bool_matrix,axis=1)
self.calc_inner_outer_indices()
def set_inner_bool_vec(self,bool_vec):
"""define inner volume with a customized array of booleans
length of bool_vec needs to be the number of rows in datapoints"""
self.inner_bool=bool_vec
self.calc_inner_outer_indices()
def calc_convex_hulls(self):
"""calculate the convex hulls for all datapoints"""
regions=self.voro.regions
point_region=self.voro.point_region
vertices=self.voro.vertices
voro_points_list=[vertices[regions[point_region[i]]] for i in self.indices]
if self.p is not None:
self.conv_hulls= self.p.map(partial(ConvexHull,qhull_options="QJ"),voro_points_list,chunksize=400)
else:
self.conv_hulls=[ConvexHull(voro_points_list[i],qhull_options="QJ") for i in self.indices]
def calc_voro_area_angles(self):
"""calculate voronoi facet areas and normal vectors"""
voro_area_angles=[]
for hull in self.conv_hulls:
voro_area_angle=calc.calc_voro_area_angle(hull.simplices.shape[0],
hull.equations[:,0:3],
hull.simplices,hull.points)
voro_area_angles.append(voro_area_angle)
return voro_area_angles
def calc_qlm_array(self):
"""calculate qlm from minkowski structure metric
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
self.calc_voro()
self.calc_neighborlist()
self.calc_convex_hulls()
self.voro_area_angles=self.calc_voro_area_angles()
self.total_areas=[hull.area for hull in self.conv_hulls]
self.voro_vols=[hull.volume for hull in self.conv_hulls]
len_array=0
for i in range(self.L_VEC.shape[0]):
len_array += (2*self.L_VEC[i]+1)
self.qlm_arrays=np.zeros((len(self.total_areas),len_array),dtype=np.complex128)
for i in range(len(self.total_areas)):
self.qlm_arrays[i,:]=calc.calc_msm_qlm(len_array,
self.L_VEC,
self.voro_area_angles[i][:,2],
self.voro_area_angles[i][:,1],
self.total_areas[i],
self.voro_area_angles[i][:,0])
def calc_struct_order(self):
"""calculate the structural order for every particle
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
si_l=6 #this should only make sense with l=6, so its hardcoded
self.solid_bool=np.zeros(self.datapoints.shape[0],dtype=np.bool)
self.struct_order=np.zeros(self.datapoints.shape[0],dtype=np.float64)
for i in self.insider_indices:
voro_neighbors = np.array(self.neighborlist[i],dtype=np.int64)
qlm_array_neighbors = self.qlm_arrays[voro_neighbors][:,self.idx_qlm[si_l]]
num_neighbors=len(self.neighborlist[i])
si=calc.calc_si(6,self.qlm_arrays[i,self.idx_qlm[si_l]],num_neighbors,qlm_array_neighbors)
self.solid_bool[i]=(si>=self.solid_thresh)
self.struct_order[i]=si
self.solid_indices=self.indices[np.logical_and(self.inner_bool,self.solid_bool)]
def calc_num_neigh(self):
"""calculate the number of neighbors for all solid particles"""
self.signature['N']=[len(self.neighborlist[i]) for i in self.solid_indices]
def calc_msm(self):
"""calculate ql from minkowski structure metric for all solid particles
Description in https://doi.org/10.1103/PhysRevE.96.011301"""
ql_array=calc.calc_qls_from_qlm_arrays(self.L_VEC,self.qlm_arrays[self.solid_indices]).transpose()
for l in self.L_VEC:
self.signature['q{:d}'.format(l)]=ql_array[self.L_VEC==l][0]
wigner_arr,m_arr,count_arr=calc.calc_wigner3j_general(self.L_VEC)
wl_array=calc.calc_wls_from_qlm_arrays(self.L_VEC,self.qlm_arrays[self.solid_indices],wigner_arr,m_arr,count_arr).transpose()
for l in self.L_VEC:
if l%2==0: #odd number w_l are useless
self.signature['w{:d}'.format(l)]=wl_array[self.L_VEC==l][0]
def calc_bond_angles(self):
"""calculate bond angles for all solid particles
definition in: https://doi.org/10.1103/PhysRevB.73.054104"""
bond_angles=calc.calc_bond_angles(self.solid_indices,self.neighborlist,self.datapoints)
for dim in range(bond_angles.shape[1]):
self.signature['ba{:d}'.format(dim)]=bond_angles[:,dim]
def calc_hist_distances(self):
"""calculate histogram of normalized distances
Modified from https://doi.org/10.1103/PhysRevE.96.011301"""
hist_distances=calc.calc_hist_distances(self.solid_indices,self.neighborlist,self.datapoints,self.voro_vols)
for dim in range(hist_distances.shape[1]):
self.signature['dist{:d}'.format(dim)]=hist_distances[:,dim]
def calc_minkowski_eigvals(self):
"""calculate eigenvalues of rank 4 minkowski tensor for all solid particles
Description in https://doi.org/10.1103/PhysRevE.85.030301"""
eigenvals_arr=np.zeros((self.solid_indices.shape[0],6),dtype=np.float64)
for idx in range(self.solid_indices.shape[0]):
i=self.solid_indices[idx]
eigenvals_arr[idx]=calc.calc_minkowski_eigenvalues(self.total_areas[i],
self.voro_area_angles[i][:,0],
self.conv_hulls[i].equations[:,0:3])
for dim in range(eigenvals_arr.shape[1]):
self.signature['zeta{:d}'.format(dim)]=eigenvals_arr[:,dim]
def calc_signature(self):
"""Function to calculate the mixed crystal signature on the dataset
Description in https://doi.org/10.1103/PhysRevE.96.011301 (with minor modifications)"""
self.signature=pd.DataFrame()
self.calc_qlm_array()
self.calc_struct_order()
self.calc_num_neigh()
self.calc_bond_angles()
self.calc_msm()
self.calc_minkowski_eigvals()
self.calc_hist_distances() | 0.678859 | 0.306054 |
import socket
class UkbSession:
def __init__(self, port, server = "localhost"):
self.buffer_size = 16384
self.buffer = None
self.a = 0
self.b = 0
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((socket.gethostbyname(server), port))
except:
raise OSError("Can not connect to server '{}:{}'".format(server, str(port)))
def _read_packet(self):
'''Reads a packet up to self.buffer_size and leaves it in self.buffer. Returns the number of bytes read.'''
self.buffer = self.socket.recv(self.buffer_size)
if not self.buffer:
raise OSError
self.a = 0
self.b = len(self.buffer)
def _read_nstring(self, N):
'''Tries to read up to N bytes from buffer. Return the number of bytes left.'''
mydata = b''
while True:
m = min(self.b - self.a, N)
if m:
mydata += self.buffer[self.a:self.a + m]
self.a += m
N -= m
if N == 0:
break
self._read_packet()
return mydata
def _read_size(self):
'''Read the size of the incoming string. Returns an int'''
len_bytes = self._read_nstring(1)
size = 0
try:
aux = self._read_nstring(int(len_bytes))
size = int(aux)
except ValueError:
raise Exception('_read_size: Protocol error')
return size
def _write_data(self, data):
'''Send data bytes'''
N = len(data)
a = 0
while a < N:
n = self.socket.send(data[a:])
if not n:
raise OSError
a += n
def _write_size(self, N):
'''Send an int following protocol.'''
length_string = str(N)
self._write_data(str(len(length_string)).encode('ascii')) # first, one byte string with the length of the length-string
self._write_data(length_string.encode('ascii')) # then, the length-string as bytes
def recv(self):
'''Receive a string from the socket. Returns a utf-8 encoded string'''
if not self.socket:
raise OSError('recv: no socket')
data = b''
try:
l = self._read_size()
if l == 0: return ''
data = self._read_nstring(l)
except OSError:
raise OSError('recv: connection closed by peer')
return data.decode('utf-8', errors='replace')
def send(self, data):
'''Send a string to the server, following ukb protocol'''
if not self.socket:
raise OSError('send: no socket')
if isinstance(data, str):
data = data.encode('utf-8')
l = len(data)
if l == 0: return
try:
self._write_size(l)
self._write_data(data)
except OSError:
raise OSError('send: connection closed by peer')
def close(self):
if self.socket:
self.socket.close()
self.socket = None | src/ukb/contrib/python-server/ukbprotocol.py |
import socket
class UkbSession:
def __init__(self, port, server = "localhost"):
self.buffer_size = 16384
self.buffer = None
self.a = 0
self.b = 0
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((socket.gethostbyname(server), port))
except:
raise OSError("Can not connect to server '{}:{}'".format(server, str(port)))
def _read_packet(self):
'''Reads a packet up to self.buffer_size and leaves it in self.buffer. Returns the number of bytes read.'''
self.buffer = self.socket.recv(self.buffer_size)
if not self.buffer:
raise OSError
self.a = 0
self.b = len(self.buffer)
def _read_nstring(self, N):
'''Tries to read up to N bytes from buffer. Return the number of bytes left.'''
mydata = b''
while True:
m = min(self.b - self.a, N)
if m:
mydata += self.buffer[self.a:self.a + m]
self.a += m
N -= m
if N == 0:
break
self._read_packet()
return mydata
def _read_size(self):
'''Read the size of the incoming string. Returns an int'''
len_bytes = self._read_nstring(1)
size = 0
try:
aux = self._read_nstring(int(len_bytes))
size = int(aux)
except ValueError:
raise Exception('_read_size: Protocol error')
return size
def _write_data(self, data):
'''Send data bytes'''
N = len(data)
a = 0
while a < N:
n = self.socket.send(data[a:])
if not n:
raise OSError
a += n
def _write_size(self, N):
'''Send an int following protocol.'''
length_string = str(N)
self._write_data(str(len(length_string)).encode('ascii')) # first, one byte string with the length of the length-string
self._write_data(length_string.encode('ascii')) # then, the length-string as bytes
def recv(self):
'''Receive a string from the socket. Returns a utf-8 encoded string'''
if not self.socket:
raise OSError('recv: no socket')
data = b''
try:
l = self._read_size()
if l == 0: return ''
data = self._read_nstring(l)
except OSError:
raise OSError('recv: connection closed by peer')
return data.decode('utf-8', errors='replace')
def send(self, data):
'''Send a string to the server, following ukb protocol'''
if not self.socket:
raise OSError('send: no socket')
if isinstance(data, str):
data = data.encode('utf-8')
l = len(data)
if l == 0: return
try:
self._write_size(l)
self._write_data(data)
except OSError:
raise OSError('send: connection closed by peer')
def close(self):
if self.socket:
self.socket.close()
self.socket = None | 0.372163 | 0.165728 |
from __future__ import absolute_import
# Import Salt libs
import salt.config
import salt.exceptions
import salt.utils.http
from salt.exceptions import SaltInvocationError
# Import 3rd-party libs
import salt.ext.six.moves.http_client
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urljoin as _urljoin
__virtualname__ = 'gitlab'
def __virtual__():
return __virtualname__
def _get_config():
"""
Retrieves and return the GitLab's configuration.
"""
try:
master_opts = salt.config.client_config('/etc/salt/master')
except Exception as err:
raise SaltInvocationError(
'No GitLab configuration found: {0}'.format(err))
return master_opts.get('gitlab', {})
def _http_request(method,
path,
data=None,
formdata=False,
formdata_fieldname=None,
stream=False,
streaming_callback=None,
verify_ssl=True,
cert=None):
"""
Return the result of a query to GitLab API.
"""
gitlab_config = _get_config()
api_url = gitlab_config.get('api_url')
if not api_url:
raise SaltInvocationError('No GitLab API URL found')
token = gitlab_config.get('token')
if not token:
raise SaltInvocationError('No GitLab Token found')
decode = True
if method == 'DELETE':
decode = False
ca_bundle = None
ca_certs = gitlab_config.get('ca_certs', None)
if ca_certs and verify_ssl == True:
ca_bundle = ca_certs
url = _urljoin(api_url, '/api/v4' + six.text_type(path))
log.warning(url)
headers = {'PRIVATE-TOKEN': token}
if method != 'POST':
headers['Content-Type'] = 'application/json'
response = salt.utils.http.query(url,
method,
ca_bundle=ca_bundle,
data=data,
decode=decode,
decode_type='auto',
formdata=formdata,
formdata_fieldname=formdata_fieldname,
header_dict=headers,
status=True,
stream=stream,
streaming_callback=streaming_callback,
text=True,
opts=__opts__,
)
return response
def http_delete(path, **kwargs):
"""
Make a DELETE request to the Gitlab server.
"""
return _http_request('DELETE', path, **kwargs)
def http_get(path, **kwargs):
"""
Send a GET request to GitLab API.
"""
response = _http_request('GET', path, **kwargs)
streamed = kwargs.get('stream', False)
if streamed:
return response
if response.get('status', None) != salt.ext.six.moves.http_client.OK:
raise SaltInvocationError(response.get('error'))
return response['dict']
def http_post(path, data=None, **kwargs):
"""
Send a POST request to GitLab API.
"""
response = _http_request('POST',
path,
data=data,
**kwargs)
if response.get('status', None) != salt.ext.six.moves.http_client.CREATED:
raise SaltInvocationError(response.get('error'))
return response.get('dict', {})
def http_put(path, data=None, **kwargs):
"""
Send a PUT request to GitLab API.
"""
return _http_request('PUT', path, data=data, **kwargs) | gitlab.py | from __future__ import absolute_import
# Import Salt libs
import salt.config
import salt.exceptions
import salt.utils.http
from salt.exceptions import SaltInvocationError
# Import 3rd-party libs
import salt.ext.six.moves.http_client
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urljoin as _urljoin
__virtualname__ = 'gitlab'
def __virtual__():
return __virtualname__
def _get_config():
"""
Retrieves and return the GitLab's configuration.
"""
try:
master_opts = salt.config.client_config('/etc/salt/master')
except Exception as err:
raise SaltInvocationError(
'No GitLab configuration found: {0}'.format(err))
return master_opts.get('gitlab', {})
def _http_request(method,
path,
data=None,
formdata=False,
formdata_fieldname=None,
stream=False,
streaming_callback=None,
verify_ssl=True,
cert=None):
"""
Return the result of a query to GitLab API.
"""
gitlab_config = _get_config()
api_url = gitlab_config.get('api_url')
if not api_url:
raise SaltInvocationError('No GitLab API URL found')
token = gitlab_config.get('token')
if not token:
raise SaltInvocationError('No GitLab Token found')
decode = True
if method == 'DELETE':
decode = False
ca_bundle = None
ca_certs = gitlab_config.get('ca_certs', None)
if ca_certs and verify_ssl == True:
ca_bundle = ca_certs
url = _urljoin(api_url, '/api/v4' + six.text_type(path))
log.warning(url)
headers = {'PRIVATE-TOKEN': token}
if method != 'POST':
headers['Content-Type'] = 'application/json'
response = salt.utils.http.query(url,
method,
ca_bundle=ca_bundle,
data=data,
decode=decode,
decode_type='auto',
formdata=formdata,
formdata_fieldname=formdata_fieldname,
header_dict=headers,
status=True,
stream=stream,
streaming_callback=streaming_callback,
text=True,
opts=__opts__,
)
return response
def http_delete(path, **kwargs):
"""
Make a DELETE request to the Gitlab server.
"""
return _http_request('DELETE', path, **kwargs)
def http_get(path, **kwargs):
"""
Send a GET request to GitLab API.
"""
response = _http_request('GET', path, **kwargs)
streamed = kwargs.get('stream', False)
if streamed:
return response
if response.get('status', None) != salt.ext.six.moves.http_client.OK:
raise SaltInvocationError(response.get('error'))
return response['dict']
def http_post(path, data=None, **kwargs):
"""
Send a POST request to GitLab API.
"""
response = _http_request('POST',
path,
data=data,
**kwargs)
if response.get('status', None) != salt.ext.six.moves.http_client.CREATED:
raise SaltInvocationError(response.get('error'))
return response.get('dict', {})
def http_put(path, data=None, **kwargs):
"""
Send a PUT request to GitLab API.
"""
return _http_request('PUT', path, data=data, **kwargs) | 0.612541 | 0.066146 |
import asyncio
import dataclasses
import enum
from typing import List, Iterable, Optional
import dataclasses_json
from ... import json_rpc
from . import async_server_connection
class ServerNotInitializedError(json_rpc.JSONRPCException):
def error_code(self) -> int:
return -32002
class RequestCancelledError(json_rpc.JSONRPCException):
def error_code(self) -> int:
return -32800
async def _read_headers(input_channel: async_server_connection.TextReader) -> List[str]:
headers = []
header = await input_channel.read_until("\r\n")
while header != "\r\n":
headers.append(header)
header = await input_channel.read_until("\r\n")
return headers
def _get_content_length(headers: Iterable[str]) -> int:
try:
for header in headers:
parts = [part.strip().lower() for part in header.split(":", maxsplit=1)]
if len(parts) <= 1:
continue
if parts[0] == "content-length":
return int(parts[1])
raise json_rpc.ParseError(f"Failed to find content length header from {parts}")
except ValueError as error:
raise json_rpc.ParseError(f"Cannot parse content length into integer: {error}")
async def read_json_rpc(
input_channel: async_server_connection.TextReader,
) -> json_rpc.Request:
"""
Asynchronously read a JSON-RPC request from the given input channel.
May raise `json_rpc.ParseError`, `json_rpc.InvalidRequestError` and
`json_prc.InvalidParameterError`.
"""
try:
headers = await _read_headers(input_channel)
content_length = _get_content_length(headers)
payload = await input_channel.read_exactly(content_length)
return json_rpc.Request.from_string(payload)
except asyncio.IncompleteReadError as error:
raise json_rpc.ParseError(str(error)) from error
async def write_json_rpc(
output_channel: async_server_connection.TextWriter, response: json_rpc.JSONRPC
) -> None:
"""
Asynchronously write a JSON-RPC response to the given output channel.
"""
payload = response.serialize()
await output_channel.write(f"Content-Length: {len(payload)}\r\n\r\n{payload}")
class SerializationSafeIntEnum(enum.IntEnum):
def __repr(self) -> str:
return str(self.value)
class DiagnosticTag(SerializationSafeIntEnum):
UNNECESSARY = 1
DEPRECATED = 2
class TextDocumentSyncKind(SerializationSafeIntEnum):
NONE = 0
FULL = 1
INCREMENTAL = 2
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class Info:
name: str
version: Optional[str] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class TextDocumentSyncClientCapabilities:
did_save: bool = False
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class PublishDiagnosticsClientTagSupport:
value_set: List[DiagnosticTag] = dataclasses.field(default_factory=list)
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class PublishDiagnosticsClientCapabilities:
related_information: bool = False
tag_support: Optional[PublishDiagnosticsClientTagSupport] = None
version_support: bool = False
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class TextDocumentClientCapabilities:
synchronization: Optional[TextDocumentSyncClientCapabilities] = None
publish_diagnostics: Optional[PublishDiagnosticsClientCapabilities] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class ClientCapabilities:
text_document: Optional[TextDocumentClientCapabilities] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class SaveOptions:
include_text: Optional[bool] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class TextDocumentSyncOptions:
open_close: bool = False
change: TextDocumentSyncKind = TextDocumentSyncKind.NONE
save: Optional[SaveOptions] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class ServerCapabilities:
text_document_sync: Optional[TextDocumentSyncOptions] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class InitializeParameters:
capabilities: ClientCapabilities
process_id: Optional[int] = None
client_info: Optional[Info] = None
@staticmethod
def from_json_rpc_parameters(
parameters: json_rpc.Parameters,
) -> "InitializeParameters":
if not isinstance(parameters, json_rpc.ByNameParameters):
raise json_rpc.InvalidRequestError(
"Parameters for initialize request must be passed by name"
)
try:
# pyre-fixme[16]: Pyre doesn't understand `dataclasses_json`
return InitializeParameters.schema().load(parameters.values)
except (KeyError, ValueError, dataclasses_json.mm.ValidationError) as error:
raise json_rpc.InvalidRequestError(str(error)) from error
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class InitializeResult:
capabilities: ServerCapabilities
server_info: Optional[Info] = None | client/commands/v2/language_server_protocol.py |
import asyncio
import dataclasses
import enum
from typing import List, Iterable, Optional
import dataclasses_json
from ... import json_rpc
from . import async_server_connection
class ServerNotInitializedError(json_rpc.JSONRPCException):
def error_code(self) -> int:
return -32002
class RequestCancelledError(json_rpc.JSONRPCException):
def error_code(self) -> int:
return -32800
async def _read_headers(input_channel: async_server_connection.TextReader) -> List[str]:
headers = []
header = await input_channel.read_until("\r\n")
while header != "\r\n":
headers.append(header)
header = await input_channel.read_until("\r\n")
return headers
def _get_content_length(headers: Iterable[str]) -> int:
try:
for header in headers:
parts = [part.strip().lower() for part in header.split(":", maxsplit=1)]
if len(parts) <= 1:
continue
if parts[0] == "content-length":
return int(parts[1])
raise json_rpc.ParseError(f"Failed to find content length header from {parts}")
except ValueError as error:
raise json_rpc.ParseError(f"Cannot parse content length into integer: {error}")
async def read_json_rpc(
input_channel: async_server_connection.TextReader,
) -> json_rpc.Request:
"""
Asynchronously read a JSON-RPC request from the given input channel.
May raise `json_rpc.ParseError`, `json_rpc.InvalidRequestError` and
`json_prc.InvalidParameterError`.
"""
try:
headers = await _read_headers(input_channel)
content_length = _get_content_length(headers)
payload = await input_channel.read_exactly(content_length)
return json_rpc.Request.from_string(payload)
except asyncio.IncompleteReadError as error:
raise json_rpc.ParseError(str(error)) from error
async def write_json_rpc(
output_channel: async_server_connection.TextWriter, response: json_rpc.JSONRPC
) -> None:
"""
Asynchronously write a JSON-RPC response to the given output channel.
"""
payload = response.serialize()
await output_channel.write(f"Content-Length: {len(payload)}\r\n\r\n{payload}")
class SerializationSafeIntEnum(enum.IntEnum):
def __repr(self) -> str:
return str(self.value)
class DiagnosticTag(SerializationSafeIntEnum):
UNNECESSARY = 1
DEPRECATED = 2
class TextDocumentSyncKind(SerializationSafeIntEnum):
NONE = 0
FULL = 1
INCREMENTAL = 2
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class Info:
name: str
version: Optional[str] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class TextDocumentSyncClientCapabilities:
did_save: bool = False
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class PublishDiagnosticsClientTagSupport:
value_set: List[DiagnosticTag] = dataclasses.field(default_factory=list)
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class PublishDiagnosticsClientCapabilities:
related_information: bool = False
tag_support: Optional[PublishDiagnosticsClientTagSupport] = None
version_support: bool = False
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class TextDocumentClientCapabilities:
synchronization: Optional[TextDocumentSyncClientCapabilities] = None
publish_diagnostics: Optional[PublishDiagnosticsClientCapabilities] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class ClientCapabilities:
text_document: Optional[TextDocumentClientCapabilities] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class SaveOptions:
include_text: Optional[bool] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class TextDocumentSyncOptions:
open_close: bool = False
change: TextDocumentSyncKind = TextDocumentSyncKind.NONE
save: Optional[SaveOptions] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class ServerCapabilities:
text_document_sync: Optional[TextDocumentSyncOptions] = None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class InitializeParameters:
capabilities: ClientCapabilities
process_id: Optional[int] = None
client_info: Optional[Info] = None
@staticmethod
def from_json_rpc_parameters(
parameters: json_rpc.Parameters,
) -> "InitializeParameters":
if not isinstance(parameters, json_rpc.ByNameParameters):
raise json_rpc.InvalidRequestError(
"Parameters for initialize request must be passed by name"
)
try:
# pyre-fixme[16]: Pyre doesn't understand `dataclasses_json`
return InitializeParameters.schema().load(parameters.values)
except (KeyError, ValueError, dataclasses_json.mm.ValidationError) as error:
raise json_rpc.InvalidRequestError(str(error)) from error
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class InitializeResult:
capabilities: ServerCapabilities
server_info: Optional[Info] = None | 0.831109 | 0.13852 |
import tkinter as tk
import webbrowser as wb
from datetime import datetime, timedelta
from logging import getLogger, basicConfig, Formatter, WARNING
from logging.handlers import RotatingFileHandler
from operator import itemgetter
from os import getenv
from time import sleep
from schedule import every, run_pending
from win32com.client import Dispatch
__name__ = 'notification_system'
def logging(msg):
access = 'a'
filename = getenv('AppData') + r'\notifications.log'
size = 50 * 1024
basicConfig(level=WARNING)
file_handler = RotatingFileHandler(filename, access, maxBytes=size,
backupCount=2, encoding=None, delay=False)
file_format = Formatter('%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setLevel(WARNING)
file_handler.setFormatter(file_format)
logger = getLogger(__name__)
logger.addHandler(file_handler)
logger.warning(msg)
sleep(1)
wb.open(filename)
def send_alert_email(site_id, priority):
outlook = Dispatch('outlook.application')
msg = outlook.CreateItem(0)
msg.to = '<EMAIL>'
msg.Subject = site_id + ' ' + priority
msg.HTMLBody = '<html><body><p>Time for another update!\nIt is important to' \
'SLA\'s and the customer\nthat an update be sent promptly!' \
'</p></body></html>'
try:
msg.Send()
except Exception:
logging('Problem sending email')
pass
def check_alarms():
for item in watching:
if item[1] <= datetime.now():
send_alert_email(item[0], item[2])
item[1] = item[1] + timedelta(minutes=30)
display_watched()
def run_schedule():
run_pending()
root.after(1000, run_schedule)
def add_site(_):
entry = []
site = site_entry.get()
time_down = time_entry.get()
priority = priority_choice.get()
alarm = ''
if site == '':
return
if time_down == '' or priority == '':
logging('Bad timer statement')
return
if time_down != '':
time_down = int(time_down)
site_choice.set('')
time_down_choice.set('')
site_entry.focus()
if priority == 'P1':
alarm = datetime.now() + timedelta(minutes=30 - time_down)
elif priority == 'P2':
alarm = datetime.now() + timedelta(minutes=240 - time_down)
elif priority == 'P3':
alarm = datetime.now() + timedelta(minutes=480 - time_down)
entry.append(site)
entry.append(alarm)
entry.append(priority)
watching.append(entry)
display_watched()
def display_watched():
global watching
site_list.delete(0, 'end')
watching = sorted(watching, key=itemgetter(1))
for item in watching:
site_list.insert('end', item[0] + ' - ' + item[1].strftime('%H:%M') + ' - ' + item[2])
def remove_alarm():
try:
selection = int(site_list.curselection()[0])
del watching[selection]
site_list.delete(selection)
except IndexError:
logging('Problem removing selection')
return
def focus_next(event):
event.widget.tk_focusNext().focus()
return ('break')
def change_mode(tog=[0]):
tog[0] = not tog[0]
frames = (entry_frame, list_frame)
widgets = (site_label, time_label, site_entry, time_entry, priority_c,
add_button, site_list, delete_button, priority_c['menu'],
change_button)
nbg = '#000000'
nfg = '#66FFFF'
dbg = '#FFFFFF'
dfg = '#000000'
if tog[0]:
root.option_add('*Background', dbg)
root.option_add('*Foreground', dfg)
root.configure(background=dbg, highlightbackground=dbg, highlightcolor=dfg)
for i in frames:
i.configure(background=dbg, highlightbackground=dbg, highlightcolor=dfg)
for i in widgets:
i.configure(background=dbg, foreground=dfg)
else:
root.option_add('*Background', nbg)
root.option_add('*Foreground', nfg)
root.configure(background=nbg, highlightbackground=nbg, highlightcolor=nfg)
for i in frames:
i.configure(background=nbg, highlightbackground=nbg, highlightcolor=nfg)
for i in widgets:
i.configure(background=nbg, foreground=nfg)
root = tk.Tk()
root.title('Notification Reminder')
root.resizable(False, False)
root.focusmodel('active')
root.geometry('310x230+200+200')
site_choice = tk.Variable(root)
priority_choice = tk.Variable(root)
time_down_choice = tk.Variable(root)
email_recipient = tk.Variable(root)
watching = []
entry_frame = tk.Frame(root)
site_label = tk.Label(entry_frame, text='Site:')
site_entry = tk.Entry(entry_frame, textvariable=site_choice, width=8)
time_label = tk.Label(entry_frame, text='Time Down:')
time_entry = tk.Entry(entry_frame, textvariable=time_down_choice, width=8)
priority_choice.set('Priority')
priority_c = tk.OptionMenu(entry_frame, priority_choice, 'P1', 'P2', 'P3')
priority_c['highlightthickness'] = 0
entry_frame.grid(row=0, column=0)
site_label.grid(row=0, column=0, sticky='e')
site_entry.grid(row=0, column=1, padx=5)
time_label.grid(row=0, column=2)
time_entry.grid(row=0, column=3, padx=5)
priority_c.grid(row=0, column=4, padx=5)
list_frame = tk.Frame(root)
site_list = tk.Listbox(list_frame, width=50)
add_button = tk.Button(list_frame, text='Add', borderwidth=0.5,
command=lambda: add_site(watching))
delete_button = tk.Button(list_frame, text='Remove', borderwidth=0.5,
command=remove_alarm)
change_button = tk.Button(list_frame, text='Change', borderwidth=0.5,
command=change_mode)
list_frame.grid(row=1, column=0)
site_list.grid(row=0, column=0, columnspan=3)
add_button.grid(row=2, column=0, padx=5, pady=5)
delete_button.grid(row=2, column=1, padx=5, pady=5)
change_button.grid(row=2, column=2, padx=5, pady=5)
every(30).seconds.do(check_alarms)
root.after(1000, run_schedule)
site_entry.bind('<Tab>', focus_next)
site_entry.bind('<Return>', add_site)
time_entry.bind('<Return>', add_site)
site_entry.focus()
root.mainloop() | notifReminder.py | import tkinter as tk
import webbrowser as wb
from datetime import datetime, timedelta
from logging import getLogger, basicConfig, Formatter, WARNING
from logging.handlers import RotatingFileHandler
from operator import itemgetter
from os import getenv
from time import sleep
from schedule import every, run_pending
from win32com.client import Dispatch
__name__ = 'notification_system'
def logging(msg):
access = 'a'
filename = getenv('AppData') + r'\notifications.log'
size = 50 * 1024
basicConfig(level=WARNING)
file_handler = RotatingFileHandler(filename, access, maxBytes=size,
backupCount=2, encoding=None, delay=False)
file_format = Formatter('%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setLevel(WARNING)
file_handler.setFormatter(file_format)
logger = getLogger(__name__)
logger.addHandler(file_handler)
logger.warning(msg)
sleep(1)
wb.open(filename)
def send_alert_email(site_id, priority):
outlook = Dispatch('outlook.application')
msg = outlook.CreateItem(0)
msg.to = '<EMAIL>'
msg.Subject = site_id + ' ' + priority
msg.HTMLBody = '<html><body><p>Time for another update!\nIt is important to' \
'SLA\'s and the customer\nthat an update be sent promptly!' \
'</p></body></html>'
try:
msg.Send()
except Exception:
logging('Problem sending email')
pass
def check_alarms():
for item in watching:
if item[1] <= datetime.now():
send_alert_email(item[0], item[2])
item[1] = item[1] + timedelta(minutes=30)
display_watched()
def run_schedule():
run_pending()
root.after(1000, run_schedule)
def add_site(_):
entry = []
site = site_entry.get()
time_down = time_entry.get()
priority = priority_choice.get()
alarm = ''
if site == '':
return
if time_down == '' or priority == '':
logging('Bad timer statement')
return
if time_down != '':
time_down = int(time_down)
site_choice.set('')
time_down_choice.set('')
site_entry.focus()
if priority == 'P1':
alarm = datetime.now() + timedelta(minutes=30 - time_down)
elif priority == 'P2':
alarm = datetime.now() + timedelta(minutes=240 - time_down)
elif priority == 'P3':
alarm = datetime.now() + timedelta(minutes=480 - time_down)
entry.append(site)
entry.append(alarm)
entry.append(priority)
watching.append(entry)
display_watched()
def display_watched():
global watching
site_list.delete(0, 'end')
watching = sorted(watching, key=itemgetter(1))
for item in watching:
site_list.insert('end', item[0] + ' - ' + item[1].strftime('%H:%M') + ' - ' + item[2])
def remove_alarm():
try:
selection = int(site_list.curselection()[0])
del watching[selection]
site_list.delete(selection)
except IndexError:
logging('Problem removing selection')
return
def focus_next(event):
event.widget.tk_focusNext().focus()
return ('break')
def change_mode(tog=[0]):
tog[0] = not tog[0]
frames = (entry_frame, list_frame)
widgets = (site_label, time_label, site_entry, time_entry, priority_c,
add_button, site_list, delete_button, priority_c['menu'],
change_button)
nbg = '#000000'
nfg = '#66FFFF'
dbg = '#FFFFFF'
dfg = '#000000'
if tog[0]:
root.option_add('*Background', dbg)
root.option_add('*Foreground', dfg)
root.configure(background=dbg, highlightbackground=dbg, highlightcolor=dfg)
for i in frames:
i.configure(background=dbg, highlightbackground=dbg, highlightcolor=dfg)
for i in widgets:
i.configure(background=dbg, foreground=dfg)
else:
root.option_add('*Background', nbg)
root.option_add('*Foreground', nfg)
root.configure(background=nbg, highlightbackground=nbg, highlightcolor=nfg)
for i in frames:
i.configure(background=nbg, highlightbackground=nbg, highlightcolor=nfg)
for i in widgets:
i.configure(background=nbg, foreground=nfg)
root = tk.Tk()
root.title('Notification Reminder')
root.resizable(False, False)
root.focusmodel('active')
root.geometry('310x230+200+200')
site_choice = tk.Variable(root)
priority_choice = tk.Variable(root)
time_down_choice = tk.Variable(root)
email_recipient = tk.Variable(root)
watching = []
entry_frame = tk.Frame(root)
site_label = tk.Label(entry_frame, text='Site:')
site_entry = tk.Entry(entry_frame, textvariable=site_choice, width=8)
time_label = tk.Label(entry_frame, text='Time Down:')
time_entry = tk.Entry(entry_frame, textvariable=time_down_choice, width=8)
priority_choice.set('Priority')
priority_c = tk.OptionMenu(entry_frame, priority_choice, 'P1', 'P2', 'P3')
priority_c['highlightthickness'] = 0
entry_frame.grid(row=0, column=0)
site_label.grid(row=0, column=0, sticky='e')
site_entry.grid(row=0, column=1, padx=5)
time_label.grid(row=0, column=2)
time_entry.grid(row=0, column=3, padx=5)
priority_c.grid(row=0, column=4, padx=5)
list_frame = tk.Frame(root)
site_list = tk.Listbox(list_frame, width=50)
add_button = tk.Button(list_frame, text='Add', borderwidth=0.5,
command=lambda: add_site(watching))
delete_button = tk.Button(list_frame, text='Remove', borderwidth=0.5,
command=remove_alarm)
change_button = tk.Button(list_frame, text='Change', borderwidth=0.5,
command=change_mode)
list_frame.grid(row=1, column=0)
site_list.grid(row=0, column=0, columnspan=3)
add_button.grid(row=2, column=0, padx=5, pady=5)
delete_button.grid(row=2, column=1, padx=5, pady=5)
change_button.grid(row=2, column=2, padx=5, pady=5)
every(30).seconds.do(check_alarms)
root.after(1000, run_schedule)
site_entry.bind('<Tab>', focus_next)
site_entry.bind('<Return>', add_site)
time_entry.bind('<Return>', add_site)
site_entry.focus()
root.mainloop() | 0.225076 | 0.058453 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
from collections import OrderedDict
from matplotlib.animation import FuncAnimation
from gym import utils
import sys
from six import StringIO, b
from IPython.display import HTML
COLORS = OrderedDict([
(b"W" , [160, 160, 160]),
(b"S" , [224, 224, 224]),
(b"E" , [224, 224, 224]),
(b"F" , [255, 0, 0]),
(b"H" , [20, 20, 20]),
(b"G" , [50, 255, 50]),
(b"P" , [51, 150, 255])
])
class Renderer:
"""
Rendering class for the gridworld environment. Three rendering
options are available; plot rendering, render and store(buffer)
and stdout. Plot rendering renders the grid whenever the render
method is called while buffer rendering stores the grid's rendered
image in the buffer for future animations. String(stdout) rendering
simply prints the grid or return the string at each call.
Args:
gridmap: Numpy array of char datatype of the grid map.
Methods:
visual_render: Directly renders the grid to the screen.
buffer_render: Renders the grid map and stores it.
animate: Animates the images in the buffer. Three modes
are available namely plot, js and html. Plot mode animates
and renders on the screen while js and html return js or
html files mostly for ipython.
analysis: Animates the value distribution and the greedy
policy constructed from the values in the buffer.
"""
def __init__(self, gridmap):
self.gridmap = gridmap
ncolors = len(COLORS)
tile_to_int = {c: i for i, c in zip(range(ncolors), COLORS.keys())}
self.background = np.array([tile_to_int[tile] for tile in self.gridmap.ravel()])
self.background = self.background.reshape(self.gridmap.shape)
bounds = np.linspace(0, len(COLORS), num=len(COLORS)+1)
self.norm = BoundaryNorm(bounds, len(COLORS))
self.cmap = ListedColormap([[v/255 for v in rgb] for rgb in COLORS.values()])
self.reset_buffer()
def visaul_render(self, state):
img = np.copy(self.background)
img[state] = 7
try:
self.plot.set_array(img)
except AttributeError:
self.reset_figure()
self.figure.canvas.draw()
self.figure.canvas.flush_events()
def buffer_render(self, state, info=None):
img = np.copy(self.background)
img[state] = 7
self.frames.append(img if info is None else (img, info))
def reset_figure(self):
plt.ion()
plt.axis('off')
self.figure, self.ax = plt.subplots()
self.plot = self.ax.imshow(self.background, cmap=self.cmap, norm=self.norm)
def reset_buffer(self):
self.frames = []
def animate(self, mode="js"):
plt.ioff()
heigth, width = self.background.shape
ratio = width/heigth
figure, ax = plt.subplots(figsize=(3*ratio,3))
im = plt.imshow(self.background, cmap=self.cmap, norm=self.norm, animated=True)
title = ax.text(0.5,0.90, "", bbox={'facecolor':'w', 'alpha':0.5, 'pad':5},
transform=ax.transAxes, ha="center")
ax.axis('off')
def update(i):
data = self.frames[i]
if isinstance(data, tuple):
img, text = data
title.set_text(text)
else:
img = data
im.set_array(img)
return im, title
ani = FuncAnimation(figure, update, frames=len(self.frames), interval=1000/60, blit=True, repeat=False)
if mode == "html":
return HTML(ani.to_html5_video())
elif mode == "js":
return HTML(ani.to_jshtml())
elif mode == "plot":
plt.show()
def string_render(self, state, mode):
outfile = StringIO() if mode == 'ansi' else sys.stdout
row, col = state
desc = self.gridmap.tolist()
desc = [[c.decode('utf-8') for c in line] for line in desc]
desc[row][col] = utils.colorize(desc[row][col], "red", highlight=True)
outfile.write("\n")
outfile.write("\n".join(''.join(line) for line in desc)+"\n")
if mode == 'ansi':
return outfile
def analysis(self, value_buffer, mode="notebook"):
plt.ioff()
heigth, width = self.background.shape
ratio = width/heigth
figure, ax = plt.subplots(figsize=(3*ratio,3))
X, Y = np.meshgrid(np.arange(heigth), np.arange(width))
ax.axis('off')
plt.imshow(self.background, cmap=self.cmap, norm=self.norm, animated=False)
im = plt.imshow(np.zeros(shape=self.background.shape), cmap="Blues", vmin=0, vmax=1, animated=True, alpha=0.5)
coords = np.argwhere(self.background != 0)
quiver = plt.quiver(coords[:, 1], coords[:, 0], *([np.ones(shape=X.shape)]*2))
arr_u = np.array([-1, 0, 1, 0])
arr_v = np.array([0, -1, 0, 1])
def update(i):
values = np.array([max(value_buffer[i][(x, y)]) for (x, y) in zip(X.ravel(), Y.ravel())])
actions = np.array([max(range(4), key=lambda a: value_buffer[i][(x, y)][a]) for (x, y) in coords])
quiver_u = arr_u[actions]
quiver_v = arr_v[actions]
quiver.set_UVC(quiver_u, quiver_v)
im.set_array(values.reshape(X.shape).transpose())
return im,
ani = FuncAnimation(figure, update, frames=len(value_buffer), interval=1000/60, blit=True, repeat=True)
if mode == "notebook":
return HTML(ani.to_jshtml())
elif mode == "plot":
plt.show()
def animater(buffer, mode="js"):
""" Animates the buffer for three modes.
"""
plt.ioff()
heigth, width, _ = buffer[0].shape
ratio = width/heigth
figure, ax = plt.subplots(figsize=(4*ratio,4))
im = plt.imshow(buffer[0])
ax.axis('off')
def update(i):
im.set_array(buffer[i])
return im,
ani = FuncAnimation(figure, update, frames=len(buffer), interval=1000/60, blit=True, repeat=False)
if mode == "html":
return HTML(ani.to_html5_video())
elif mode == "js":
return HTML(ani.to_jshtml())
elif mode == "plot":
plt.show()
def comparison(*log_name_pairs, texts=[[""]*3]):
""" Plots the given logs. There will be as many plots as
the length of the texts argument. Logs will be plotted on
top of each other so that they can be compared. For each
log, mean value is plotted and the area between the
+std and -std of the mean will be shaded.
"""
plt.ioff()
plt.close()
def plot_texts(title, xlabel, ylabel):
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for i, (title, xlabel, ylabel) in enumerate(texts):
for logs, name in log_name_pairs:
smoothed_logs = np.stack([smoother([x[i] for x in log], 7) for log in logs])
std_logs = np.std(smoothed_logs, axis=0)
mean_logs = np.mean(smoothed_logs, axis=0)
max_logs = np.max(smoothed_logs, axis=0)
min_logs = np.min(smoothed_logs, axis=0)
plot_texts(title, xlabel, ylabel)
plt.plot(mean_logs, label=name)
plt.legend()
plt.fill_between(np.arange(len(mean_logs)), np.minimum(mean_logs+std_logs, max_logs), np.minimum(mean_logs-std_logs, min_logs), alpha=0.4)
plt.show()
def smoother(array, ws):
return np.array([sum(array[i:i+ws])/ws for i in range(len(array) - ws)]) | blg604ehw1/env/render.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
from collections import OrderedDict
from matplotlib.animation import FuncAnimation
from gym import utils
import sys
from six import StringIO, b
from IPython.display import HTML
COLORS = OrderedDict([
(b"W" , [160, 160, 160]),
(b"S" , [224, 224, 224]),
(b"E" , [224, 224, 224]),
(b"F" , [255, 0, 0]),
(b"H" , [20, 20, 20]),
(b"G" , [50, 255, 50]),
(b"P" , [51, 150, 255])
])
class Renderer:
"""
Rendering class for the gridworld environment. Three rendering
options are available; plot rendering, render and store(buffer)
and stdout. Plot rendering renders the grid whenever the render
method is called while buffer rendering stores the grid's rendered
image in the buffer for future animations. String(stdout) rendering
simply prints the grid or return the string at each call.
Args:
gridmap: Numpy array of char datatype of the grid map.
Methods:
visual_render: Directly renders the grid to the screen.
buffer_render: Renders the grid map and stores it.
animate: Animates the images in the buffer. Three modes
are available namely plot, js and html. Plot mode animates
and renders on the screen while js and html return js or
html files mostly for ipython.
analysis: Animates the value distribution and the greedy
policy constructed from the values in the buffer.
"""
def __init__(self, gridmap):
self.gridmap = gridmap
ncolors = len(COLORS)
tile_to_int = {c: i for i, c in zip(range(ncolors), COLORS.keys())}
self.background = np.array([tile_to_int[tile] for tile in self.gridmap.ravel()])
self.background = self.background.reshape(self.gridmap.shape)
bounds = np.linspace(0, len(COLORS), num=len(COLORS)+1)
self.norm = BoundaryNorm(bounds, len(COLORS))
self.cmap = ListedColormap([[v/255 for v in rgb] for rgb in COLORS.values()])
self.reset_buffer()
def visaul_render(self, state):
img = np.copy(self.background)
img[state] = 7
try:
self.plot.set_array(img)
except AttributeError:
self.reset_figure()
self.figure.canvas.draw()
self.figure.canvas.flush_events()
def buffer_render(self, state, info=None):
img = np.copy(self.background)
img[state] = 7
self.frames.append(img if info is None else (img, info))
def reset_figure(self):
plt.ion()
plt.axis('off')
self.figure, self.ax = plt.subplots()
self.plot = self.ax.imshow(self.background, cmap=self.cmap, norm=self.norm)
def reset_buffer(self):
self.frames = []
def animate(self, mode="js"):
plt.ioff()
heigth, width = self.background.shape
ratio = width/heigth
figure, ax = plt.subplots(figsize=(3*ratio,3))
im = plt.imshow(self.background, cmap=self.cmap, norm=self.norm, animated=True)
title = ax.text(0.5,0.90, "", bbox={'facecolor':'w', 'alpha':0.5, 'pad':5},
transform=ax.transAxes, ha="center")
ax.axis('off')
def update(i):
data = self.frames[i]
if isinstance(data, tuple):
img, text = data
title.set_text(text)
else:
img = data
im.set_array(img)
return im, title
ani = FuncAnimation(figure, update, frames=len(self.frames), interval=1000/60, blit=True, repeat=False)
if mode == "html":
return HTML(ani.to_html5_video())
elif mode == "js":
return HTML(ani.to_jshtml())
elif mode == "plot":
plt.show()
def string_render(self, state, mode):
outfile = StringIO() if mode == 'ansi' else sys.stdout
row, col = state
desc = self.gridmap.tolist()
desc = [[c.decode('utf-8') for c in line] for line in desc]
desc[row][col] = utils.colorize(desc[row][col], "red", highlight=True)
outfile.write("\n")
outfile.write("\n".join(''.join(line) for line in desc)+"\n")
if mode == 'ansi':
return outfile
def analysis(self, value_buffer, mode="notebook"):
plt.ioff()
heigth, width = self.background.shape
ratio = width/heigth
figure, ax = plt.subplots(figsize=(3*ratio,3))
X, Y = np.meshgrid(np.arange(heigth), np.arange(width))
ax.axis('off')
plt.imshow(self.background, cmap=self.cmap, norm=self.norm, animated=False)
im = plt.imshow(np.zeros(shape=self.background.shape), cmap="Blues", vmin=0, vmax=1, animated=True, alpha=0.5)
coords = np.argwhere(self.background != 0)
quiver = plt.quiver(coords[:, 1], coords[:, 0], *([np.ones(shape=X.shape)]*2))
arr_u = np.array([-1, 0, 1, 0])
arr_v = np.array([0, -1, 0, 1])
def update(i):
values = np.array([max(value_buffer[i][(x, y)]) for (x, y) in zip(X.ravel(), Y.ravel())])
actions = np.array([max(range(4), key=lambda a: value_buffer[i][(x, y)][a]) for (x, y) in coords])
quiver_u = arr_u[actions]
quiver_v = arr_v[actions]
quiver.set_UVC(quiver_u, quiver_v)
im.set_array(values.reshape(X.shape).transpose())
return im,
ani = FuncAnimation(figure, update, frames=len(value_buffer), interval=1000/60, blit=True, repeat=True)
if mode == "notebook":
return HTML(ani.to_jshtml())
elif mode == "plot":
plt.show()
def animater(buffer, mode="js"):
""" Animates the buffer for three modes.
"""
plt.ioff()
heigth, width, _ = buffer[0].shape
ratio = width/heigth
figure, ax = plt.subplots(figsize=(4*ratio,4))
im = plt.imshow(buffer[0])
ax.axis('off')
def update(i):
im.set_array(buffer[i])
return im,
ani = FuncAnimation(figure, update, frames=len(buffer), interval=1000/60, blit=True, repeat=False)
if mode == "html":
return HTML(ani.to_html5_video())
elif mode == "js":
return HTML(ani.to_jshtml())
elif mode == "plot":
plt.show()
def comparison(*log_name_pairs, texts=[[""]*3]):
""" Plots the given logs. There will be as many plots as
the length of the texts argument. Logs will be plotted on
top of each other so that they can be compared. For each
log, mean value is plotted and the area between the
+std and -std of the mean will be shaded.
"""
plt.ioff()
plt.close()
def plot_texts(title, xlabel, ylabel):
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for i, (title, xlabel, ylabel) in enumerate(texts):
for logs, name in log_name_pairs:
smoothed_logs = np.stack([smoother([x[i] for x in log], 7) for log in logs])
std_logs = np.std(smoothed_logs, axis=0)
mean_logs = np.mean(smoothed_logs, axis=0)
max_logs = np.max(smoothed_logs, axis=0)
min_logs = np.min(smoothed_logs, axis=0)
plot_texts(title, xlabel, ylabel)
plt.plot(mean_logs, label=name)
plt.legend()
plt.fill_between(np.arange(len(mean_logs)), np.minimum(mean_logs+std_logs, max_logs), np.minimum(mean_logs-std_logs, min_logs), alpha=0.4)
plt.show()
def smoother(array, ws):
return np.array([sum(array[i:i+ws])/ws for i in range(len(array) - ws)]) | 0.606149 | 0.606615 |
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
import numpy as np
import random
import cv2 as cv
from model_torch import NeuralNetwork, test
import time
datasetFileName = "../master_dataset.npz"
#tfPath = 'tf/cv_image_tf_97'
tfPath = 'tf/cv_image_tf_512_99'
tfModel = tf.keras.models.load_model(tfPath)
trchPath = 'trch/cv_image_torch_93.pth'
model = NeuralNetwork()
model.load_state_dict(torch.load(trchPath))
# print(tfModel.summary())
# print(trchModel)
def importImageData(datasetFileName):
with np.load(datasetFileName, allow_pickle=True) as data:
dataImages = data['images']
dataLabels = data['labels']
dataLabelNames = data['labelnames']
desiredShape = (200, 200, 3)
N = len(dataImages)
shape = (N, desiredShape[0], desiredShape[1], desiredShape[2])
y = np.empty(shape, dtype='uint8')
for i in range(N):
y[i] = cv.resize(dataImages[i], [200, 200],
interpolation=cv.INTER_NEAREST)
dataImages = y
dataLabels = dataLabels.astype('uint8')
return dataImages, dataLabels
def tensorflowPredict(dataImage, dataLabels, i):
testImage = dataImage / 255.0
testLabel = dataLabels
tf_start_time = time.time()
predictions = tfModel.predict(testImage)
predictedLabel = np.argmax(predictions[i])
tf_infer_time = time.time()-tf_start_time
print(predictedLabel, testLabel[i], predictedLabel == testLabel[i])
print(
f'Predicted Class: {classes[predictedLabel]}\tActual Class: {classes[testLabel[i]]}')
testLoss, testAcc = tfModel.evaluate(testImage, testLabel, verbose=2)
print(f'\nTensorflow Test accuracy: {testAcc*100:.3f}%\n')
plt.figure()
imgRGB = testImage[i]
plt.imshow(imgRGB)
plt.xlabel(f'Predicted:{classes[predictedLabel]}')
plt.title('TensorFlow Prediction')
plt.grid(False)
plt.show()
return tf_infer_time
def pyTorchPredict(dataImages, dataLabels, i):
dataset = torch.tensor(dataImages)
all_data = []
for n in range(len(dataset)):
all_data.append([dataset[n], dataLabels[n]])
test_data = all_data
loss_fn = nn.CrossEntropyLoss()
model.eval()
x, y = test_data[i][0], test_data[i][1]
x1 = x
x = x.view(1, -1)
with torch.no_grad():
#test_loss, test_acc = test(test_dataloader, model, loss_fn)
trch_start_time = time.time()
pred = model(x.float())
predicted, actual = classes[pred[0].argmax(0).item()], classes[y]
pytorch_infer_time = time.time()-trch_start_time
print('Pytorch Result\n')
print(f'Predicted: "{predicted}", Actual: "{actual}"')
#print(f'PyTorch Test Accuracy: {test_acc:.3f}%')
img = x1.squeeze()
plt.title('PyTorch Prediction')
plt.imshow(img)
plt.xlabel(f"Predicted: {classes[y]}")
plt.show()
return pytorch_infer_time
if __name__ == '__main__':
i = 101
classes = ['afiq', 'azureen', 'gavin', 'goke', 'inamul',
'jincheng', 'mahmuda', 'numan', 'saseendran']
print('\n\n_________________________Result___________________________')
dataImages, dataLabels = importImageData(datasetFileName)
tf_time = tensorflowPredict(dataImages, dataLabels, i)
pt_time = pyTorchPredict(dataImages, dataLabels, i)
print(
f'\nTensorflow Predict Time:{tf_time}s\nPytorch Predict Time: {pt_time}s')
print('\n\n_________________________End_______________________________') | gavPrj/tf_vs_trch/cv_image_tensorFlow_vs_pytorch.py | import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
import numpy as np
import random
import cv2 as cv
from model_torch import NeuralNetwork, test
import time
datasetFileName = "../master_dataset.npz"
#tfPath = 'tf/cv_image_tf_97'
tfPath = 'tf/cv_image_tf_512_99'
tfModel = tf.keras.models.load_model(tfPath)
trchPath = 'trch/cv_image_torch_93.pth'
model = NeuralNetwork()
model.load_state_dict(torch.load(trchPath))
# print(tfModel.summary())
# print(trchModel)
def importImageData(datasetFileName):
with np.load(datasetFileName, allow_pickle=True) as data:
dataImages = data['images']
dataLabels = data['labels']
dataLabelNames = data['labelnames']
desiredShape = (200, 200, 3)
N = len(dataImages)
shape = (N, desiredShape[0], desiredShape[1], desiredShape[2])
y = np.empty(shape, dtype='uint8')
for i in range(N):
y[i] = cv.resize(dataImages[i], [200, 200],
interpolation=cv.INTER_NEAREST)
dataImages = y
dataLabels = dataLabels.astype('uint8')
return dataImages, dataLabels
def tensorflowPredict(dataImage, dataLabels, i):
testImage = dataImage / 255.0
testLabel = dataLabels
tf_start_time = time.time()
predictions = tfModel.predict(testImage)
predictedLabel = np.argmax(predictions[i])
tf_infer_time = time.time()-tf_start_time
print(predictedLabel, testLabel[i], predictedLabel == testLabel[i])
print(
f'Predicted Class: {classes[predictedLabel]}\tActual Class: {classes[testLabel[i]]}')
testLoss, testAcc = tfModel.evaluate(testImage, testLabel, verbose=2)
print(f'\nTensorflow Test accuracy: {testAcc*100:.3f}%\n')
plt.figure()
imgRGB = testImage[i]
plt.imshow(imgRGB)
plt.xlabel(f'Predicted:{classes[predictedLabel]}')
plt.title('TensorFlow Prediction')
plt.grid(False)
plt.show()
return tf_infer_time
def pyTorchPredict(dataImages, dataLabels, i):
dataset = torch.tensor(dataImages)
all_data = []
for n in range(len(dataset)):
all_data.append([dataset[n], dataLabels[n]])
test_data = all_data
loss_fn = nn.CrossEntropyLoss()
model.eval()
x, y = test_data[i][0], test_data[i][1]
x1 = x
x = x.view(1, -1)
with torch.no_grad():
#test_loss, test_acc = test(test_dataloader, model, loss_fn)
trch_start_time = time.time()
pred = model(x.float())
predicted, actual = classes[pred[0].argmax(0).item()], classes[y]
pytorch_infer_time = time.time()-trch_start_time
print('Pytorch Result\n')
print(f'Predicted: "{predicted}", Actual: "{actual}"')
#print(f'PyTorch Test Accuracy: {test_acc:.3f}%')
img = x1.squeeze()
plt.title('PyTorch Prediction')
plt.imshow(img)
plt.xlabel(f"Predicted: {classes[y]}")
plt.show()
return pytorch_infer_time
if __name__ == '__main__':
i = 101
classes = ['afiq', 'azureen', 'gavin', 'goke', 'inamul',
'jincheng', 'mahmuda', 'numan', 'saseendran']
print('\n\n_________________________Result___________________________')
dataImages, dataLabels = importImageData(datasetFileName)
tf_time = tensorflowPredict(dataImages, dataLabels, i)
pt_time = pyTorchPredict(dataImages, dataLabels, i)
print(
f'\nTensorflow Predict Time:{tf_time}s\nPytorch Predict Time: {pt_time}s')
print('\n\n_________________________End_______________________________') | 0.406744 | 0.628806 |
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
class CoreFrame:
def __init__(self, core_width=3, core_height=3):
self.width = core_width
self.height = core_height
def random_mat(self):
return np.random.random((self.height, self.width))
def dferf(self):
return np.array([[-1 / 8, -1 / 8, -1 / 8], # 3x3 differential core, not related to frame size yet
[-1 / 8, 2, - 1 / 8], # Mathematics work needed
[-1 / 8, -1 / 8, -1 / 8]])
def edge_detect(self):
return np.array([[-1, -1, -1], # 3x3 edge_detect core,not related to frame size yet
[-1, 8, -1], # Mathematics work needed
[-1, -1, -1]])
def itgrf(self):
return np.array([[1 / 8, 1 / 8, 1 / 8], # 3x3 integral core,not related to frame size yet
[1 / 8, 0, 1 / 8], # Mathematics work needed
[1 / 8, 1 / 8, 1 / 8]])
def convolution(img, core, frame, discrete):
height, width, channel = img.shape
newimg = np.zeros((height + frame.height - 1, width + frame.width - 1, channel))
new_R = signal.convolve(img[:, :, 0], core) # OR signal.fftconvolve(img, core)
new_G = signal.convolve(img[:, :, 1], core)
new_B = signal.convolve(img[:, :, 2], core)
empchan = np.zeros((height + frame.height - 1, width + frame.width - 1))
if discrete:
imgB, imgG, imgR = newimg.copy(), newimg.copy(), newimg.copy()
imgR[:, :, 0], imgG[:, :, 0], imgB[:, :, 0] = new_R, empchan, empchan
imgR[:, :, 1], imgG[:, :, 1], imgB[:, :, 1] = empchan, new_G, empchan
imgR[:, :, 2], imgG[:, :, 2], imgB[:, :, 2] = empchan, empchan, new_B
newimg = np.concatenate((imgR,imgG,imgB), axis=1)
else:
newimg[:, :, 0] = new_R
newimg[:, :, 1] = new_G
newimg[:, :, 2] = new_B
return newimg
def main(name, save=False, discrete=True):
file_name = './resource/' + str(name) + '.png'
img = mpimg.imread(file_name)
frame = CoreFrame()
core = frame.edge_detect()
print('Convolution Core:', core)
newimg = convolution(img, core, frame,discrete)
print(newimg.shape)
H, W, C =newimg.shape
H, W = H/120, W/120
plt.figure(figsize=(W, H), dpi=120)
plt.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[])
plt.imshow(newimg)
plt.axis('off')
if save:
plt.savefig('./output/' + str(name) + '_processed' + '.png', transparent=True)
print("Picture saved as :"+'./output/' + str(name) + '_processed' + '.png')
plt.show()
if __name__ == '__main__':
main(5, save=True) | Convolution_test.py | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
class CoreFrame:
def __init__(self, core_width=3, core_height=3):
self.width = core_width
self.height = core_height
def random_mat(self):
return np.random.random((self.height, self.width))
def dferf(self):
return np.array([[-1 / 8, -1 / 8, -1 / 8], # 3x3 differential core, not related to frame size yet
[-1 / 8, 2, - 1 / 8], # Mathematics work needed
[-1 / 8, -1 / 8, -1 / 8]])
def edge_detect(self):
return np.array([[-1, -1, -1], # 3x3 edge_detect core,not related to frame size yet
[-1, 8, -1], # Mathematics work needed
[-1, -1, -1]])
def itgrf(self):
return np.array([[1 / 8, 1 / 8, 1 / 8], # 3x3 integral core,not related to frame size yet
[1 / 8, 0, 1 / 8], # Mathematics work needed
[1 / 8, 1 / 8, 1 / 8]])
def convolution(img, core, frame, discrete):
height, width, channel = img.shape
newimg = np.zeros((height + frame.height - 1, width + frame.width - 1, channel))
new_R = signal.convolve(img[:, :, 0], core) # OR signal.fftconvolve(img, core)
new_G = signal.convolve(img[:, :, 1], core)
new_B = signal.convolve(img[:, :, 2], core)
empchan = np.zeros((height + frame.height - 1, width + frame.width - 1))
if discrete:
imgB, imgG, imgR = newimg.copy(), newimg.copy(), newimg.copy()
imgR[:, :, 0], imgG[:, :, 0], imgB[:, :, 0] = new_R, empchan, empchan
imgR[:, :, 1], imgG[:, :, 1], imgB[:, :, 1] = empchan, new_G, empchan
imgR[:, :, 2], imgG[:, :, 2], imgB[:, :, 2] = empchan, empchan, new_B
newimg = np.concatenate((imgR,imgG,imgB), axis=1)
else:
newimg[:, :, 0] = new_R
newimg[:, :, 1] = new_G
newimg[:, :, 2] = new_B
return newimg
def main(name, save=False, discrete=True):
file_name = './resource/' + str(name) + '.png'
img = mpimg.imread(file_name)
frame = CoreFrame()
core = frame.edge_detect()
print('Convolution Core:', core)
newimg = convolution(img, core, frame,discrete)
print(newimg.shape)
H, W, C =newimg.shape
H, W = H/120, W/120
plt.figure(figsize=(W, H), dpi=120)
plt.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[])
plt.imshow(newimg)
plt.axis('off')
if save:
plt.savefig('./output/' + str(name) + '_processed' + '.png', transparent=True)
print("Picture saved as :"+'./output/' + str(name) + '_processed' + '.png')
plt.show()
if __name__ == '__main__':
main(5, save=True) | 0.532425 | 0.418519 |
import pygame, sys, random, math, time
import constants
from buttons import draw_rect
from buttons import button_hover
from buttons import button_press
from buttons import text
# Randomly generates obstacles - draws them red and returns the coordinates of them
def random_fill(x, y, w, p):
obstacle = (x, y)
rand = random.randint(0, 50)
if rand < p:
pygame.draw.rect(surface, constants.RED, (x, y, w, w))
return obstacle
# draws in the correctly sized grid and calls random_fill() for obstacles
def draw(w, p, grid):
obst_list = []
x, y = 0, 0
for row in grid:
for col in row:
pygame.draw.rect(surface, constants.BLUE, (x, y, w, w), 1)
if x == 0 and y == 0:
pygame.draw.rect(surface, constants.GREEN, (x, y, w, w))
pass
elif x == 792 and y == 792 or x == 796 and y == 796 or x == constants.END_3X and y == constants.END_3Y:
continue
else:
val = random_fill(x, y, w, p)
if val is not None:
obst_list.append(val)
pygame.display.update()
x = x + w
y = y + w
x = 0
return obst_list
# straight line distance used for g
def distance(nx, ny, gx, gy):
g = math.sqrt((abs(gx - nx) ** 2) + (abs(gy - ny) ** 2))
return g # + h
# manhattan distance used for h
def manhattan(nx, ny, gx, gy):
h = math.sqrt(abs(nx - gx) + abs(ny - gy))
return h
# Generates all neighbors of the current node and removes based on if it is an obstacle, or if that node has been
# traveled to before. Applies heuristic to neighbors and travels based on minimum f score. Recursively calls itself
# and stores the path that it took for the repairing method.
def astar(x, y, blocked, end, w):
current = (x, y)
all_neighbors = [(x + w, y), (x, y + w), (x + w, y + w),
(x - w, y - w), (x - w, y), (x - w, y + w),
(x, y - w), (x + w, y - w)]
for i in blocked:
if i in all_neighbors:
all_neighbors.remove(i)
for i in constants.PATH:
if i in all_neighbors:
all_neighbors.remove(i)
neighbor_list1 = heuristic(all_neighbors, end)
try:
shortest = min(neighbor_list1, key=neighbor_list1.get)
constants.SUM += neighbor_list1.get(shortest)
for val, key in neighbor_list1.items():
if 0 <= val[0] < 800 and 0 <= val[1] < 800:
if val == shortest:
current = val
pygame.draw.rect(surface, constants.GREEN, (*current, w, w))
pygame.time.wait(1)
pygame.display.update()
constants.PATH_DIST.append(key)
try:
current_index = constants.PATH_DIST.index(key)
if constants.PATH_DIST[current_index] > constants.PATH_DIST[current_index - 3]:
if (constants.PATH_DIST[current_index] - constants.PATH_DIST[current_index - 3]) < 100:
blocked.append(current)
except IndexError:
continue
except ValueError:
pass
constants.PATH.append(current)
try:
if current != end:
astar(*current, blocked, end, w)
except RecursionError:
current_id = constants.PATH.index(current)
if current != constants.START and constants.PATH[current_id - 1] != constants.START:
blocked.append(current)
blocked.append(constants.PATH[current_id - 1])
# print("(R)")
return constants.SUM, constants.PATH
# Takes in neighbor list and using a dictionary, stores the coordinates and calculated f score. Returns dictionary.
def heuristic(neighbors, end):
neighbor_list = {}
counter = 0
if counter != len(neighbors):
for i in neighbors:
dist = distance(*i, *end) + (constants.INFLATION * manhattan(*i, *end)) # CONSTANT ENDING
neighbor_list[i] = dist
counter += 1
return neighbor_list
# Method to visually clear the path that was taken - clears up for next iteration.
def clear(path, w):
for i in path:
pygame.draw.rect(surface, constants.SEA_GREEN, (*i, w, w))
# iterates based on a decrementing W0, decremented inflation e is applied to the heuristic
def repairing(path_sum, blocked, path, end, w):
start_time = time.time()
while constants.W0 > 0:
clear(path, w)
pygame.draw.rect(surface, constants.GREEN, (*end, w, w))
pygame.display.update()
constants.PATH.clear()
sum_next = astar(*constants.START, blocked, end, w)
half_val = math.floor(sum_next[0] / 2)
if sum_next[0] < path_sum:
clear(path, w)
pygame.display.update()
elif half_val == math.floor(path_sum):
break
if constants.INFLATION >= 1:
constants.INFLATION -= 1
constants.W0 -= constants.W1
print("RUN TIME: %s seconds" % (time.time() - start_time))
# called based on button press
def choice(w, end, p, grid):
start_time = time.time()
constants.OBSTACLES = draw(w, p, grid)
print("GRID GENERATION: %s seconds" % (time.time() - start_time))
traveled = astar(*constants.START, constants.OBSTACLES, end, w)
repairing(traveled[0], constants.OBSTACLES, traveled[1], end, w)
pygame.display.update()
# main function
def main():
surface.fill(constants.BLACK)
text()
while constants.END is False:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
button_press(event, surface)
if event.type == pygame.QUIT:
sys.exit()
draw_rect(surface)
button_hover(surface)
pygame.init()
surface = pygame.display.set_mode((constants.WIDTH + 200, constants.HEIGHT))
main() | visualization.py |
import pygame, sys, random, math, time
import constants
from buttons import draw_rect
from buttons import button_hover
from buttons import button_press
from buttons import text
# Randomly generates obstacles - draws them red and returns the coordinates of them
def random_fill(x, y, w, p):
obstacle = (x, y)
rand = random.randint(0, 50)
if rand < p:
pygame.draw.rect(surface, constants.RED, (x, y, w, w))
return obstacle
# draws in the correctly sized grid and calls random_fill() for obstacles
def draw(w, p, grid):
obst_list = []
x, y = 0, 0
for row in grid:
for col in row:
pygame.draw.rect(surface, constants.BLUE, (x, y, w, w), 1)
if x == 0 and y == 0:
pygame.draw.rect(surface, constants.GREEN, (x, y, w, w))
pass
elif x == 792 and y == 792 or x == 796 and y == 796 or x == constants.END_3X and y == constants.END_3Y:
continue
else:
val = random_fill(x, y, w, p)
if val is not None:
obst_list.append(val)
pygame.display.update()
x = x + w
y = y + w
x = 0
return obst_list
# straight line distance used for g
def distance(nx, ny, gx, gy):
g = math.sqrt((abs(gx - nx) ** 2) + (abs(gy - ny) ** 2))
return g # + h
# manhattan distance used for h
def manhattan(nx, ny, gx, gy):
h = math.sqrt(abs(nx - gx) + abs(ny - gy))
return h
# Generates all neighbors of the current node and removes based on if it is an obstacle, or if that node has been
# traveled to before. Applies heuristic to neighbors and travels based on minimum f score. Recursively calls itself
# and stores the path that it took for the repairing method.
def astar(x, y, blocked, end, w):
current = (x, y)
all_neighbors = [(x + w, y), (x, y + w), (x + w, y + w),
(x - w, y - w), (x - w, y), (x - w, y + w),
(x, y - w), (x + w, y - w)]
for i in blocked:
if i in all_neighbors:
all_neighbors.remove(i)
for i in constants.PATH:
if i in all_neighbors:
all_neighbors.remove(i)
neighbor_list1 = heuristic(all_neighbors, end)
try:
shortest = min(neighbor_list1, key=neighbor_list1.get)
constants.SUM += neighbor_list1.get(shortest)
for val, key in neighbor_list1.items():
if 0 <= val[0] < 800 and 0 <= val[1] < 800:
if val == shortest:
current = val
pygame.draw.rect(surface, constants.GREEN, (*current, w, w))
pygame.time.wait(1)
pygame.display.update()
constants.PATH_DIST.append(key)
try:
current_index = constants.PATH_DIST.index(key)
if constants.PATH_DIST[current_index] > constants.PATH_DIST[current_index - 3]:
if (constants.PATH_DIST[current_index] - constants.PATH_DIST[current_index - 3]) < 100:
blocked.append(current)
except IndexError:
continue
except ValueError:
pass
constants.PATH.append(current)
try:
if current != end:
astar(*current, blocked, end, w)
except RecursionError:
current_id = constants.PATH.index(current)
if current != constants.START and constants.PATH[current_id - 1] != constants.START:
blocked.append(current)
blocked.append(constants.PATH[current_id - 1])
# print("(R)")
return constants.SUM, constants.PATH
# Takes in neighbor list and using a dictionary, stores the coordinates and calculated f score. Returns dictionary.
def heuristic(neighbors, end):
neighbor_list = {}
counter = 0
if counter != len(neighbors):
for i in neighbors:
dist = distance(*i, *end) + (constants.INFLATION * manhattan(*i, *end)) # CONSTANT ENDING
neighbor_list[i] = dist
counter += 1
return neighbor_list
# Method to visually clear the path that was taken - clears up for next iteration.
def clear(path, w):
for i in path:
pygame.draw.rect(surface, constants.SEA_GREEN, (*i, w, w))
# iterates based on a decrementing W0, decremented inflation e is applied to the heuristic
def repairing(path_sum, blocked, path, end, w):
start_time = time.time()
while constants.W0 > 0:
clear(path, w)
pygame.draw.rect(surface, constants.GREEN, (*end, w, w))
pygame.display.update()
constants.PATH.clear()
sum_next = astar(*constants.START, blocked, end, w)
half_val = math.floor(sum_next[0] / 2)
if sum_next[0] < path_sum:
clear(path, w)
pygame.display.update()
elif half_val == math.floor(path_sum):
break
if constants.INFLATION >= 1:
constants.INFLATION -= 1
constants.W0 -= constants.W1
print("RUN TIME: %s seconds" % (time.time() - start_time))
# called based on button press
def choice(w, end, p, grid):
start_time = time.time()
constants.OBSTACLES = draw(w, p, grid)
print("GRID GENERATION: %s seconds" % (time.time() - start_time))
traveled = astar(*constants.START, constants.OBSTACLES, end, w)
repairing(traveled[0], constants.OBSTACLES, traveled[1], end, w)
pygame.display.update()
# main function
def main():
surface.fill(constants.BLACK)
text()
while constants.END is False:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
button_press(event, surface)
if event.type == pygame.QUIT:
sys.exit()
draw_rect(surface)
button_hover(surface)
pygame.init()
surface = pygame.display.set_mode((constants.WIDTH + 200, constants.HEIGHT))
main() | 0.278257 | 0.38578 |
from django.db import models
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import datetime as dt
class Hood(models.Model):
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
image = models.ImageField(upload_to='images/',default='a.jpg')
police = models.IntegerField(null=True,blank=True)
health = models.IntegerField(null=True,blank=True)
def __str__(self):
return self.name
def create_hood(self):
self.save()
def delete_hood(self):
self.delete()
@classmethod
def find_hood(cls, hood_id):
return cls.objects.filter(id=hood_id)
@classmethod
def search_hood(cls,search):
hoods = cls.objects.filter(name__icontains=search)
return hoods
class Meta:
ordering = ["-pk"]
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
profile_picture = models.ImageField(upload_to='images/', default='default.jpg')
hood = models.ForeignKey(Hood, on_delete=models.CASCADE, related_name='user_hood',null=True, )
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def save_user(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
def delete_user(self):
self.delete()
class Business(models.Model):
name = models.CharField(max_length=200)
hood = models.ForeignKey(Hood, on_delete=models.CASCADE, related_name='hood_business',null=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="business")
business_email=models.EmailField(max_length=100, blank=True)
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def search_business(cls, name):
return cls.objects.filter(name__icontains=name).all()
class Post(models.Model):
title = models.CharField(max_length=200)
hood = models.ForeignKey(Hood, on_delete=models.CASCADE, related_name='hood_post',null=True)
post = models.TextField()
date = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.title
def create_post(self):
self.save()
def delete_post(self):
self.delete()
class Meta:
ordering = ["-pk"] | app/models.py | from django.db import models
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import datetime as dt
class Hood(models.Model):
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
image = models.ImageField(upload_to='images/',default='a.jpg')
police = models.IntegerField(null=True,blank=True)
health = models.IntegerField(null=True,blank=True)
def __str__(self):
return self.name
def create_hood(self):
self.save()
def delete_hood(self):
self.delete()
@classmethod
def find_hood(cls, hood_id):
return cls.objects.filter(id=hood_id)
@classmethod
def search_hood(cls,search):
hoods = cls.objects.filter(name__icontains=search)
return hoods
class Meta:
ordering = ["-pk"]
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
profile_picture = models.ImageField(upload_to='images/', default='default.jpg')
hood = models.ForeignKey(Hood, on_delete=models.CASCADE, related_name='user_hood',null=True, )
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def save_user(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
def delete_user(self):
self.delete()
class Business(models.Model):
name = models.CharField(max_length=200)
hood = models.ForeignKey(Hood, on_delete=models.CASCADE, related_name='hood_business',null=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="business")
business_email=models.EmailField(max_length=100, blank=True)
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def search_business(cls, name):
return cls.objects.filter(name__icontains=name).all()
class Post(models.Model):
title = models.CharField(max_length=200)
hood = models.ForeignKey(Hood, on_delete=models.CASCADE, related_name='hood_post',null=True)
post = models.TextField()
date = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.title
def create_post(self):
self.save()
def delete_post(self):
self.delete()
class Meta:
ordering = ["-pk"] | 0.526343 | 0.108095 |
from __future__ import annotations
import enum
import logging
import ssl
from collections.abc import Sequence
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from types import TracebackType
from typing import Any
import aiohttp
from dateutil.parser import parse
from neuro_logging import trace
from yarl import URL
from .config import KubeClientAuthType, KubeConfig
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class Metadata:
name: str
created_at: datetime
labels: dict[str, str] = field(default_factory=dict)
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Metadata:
return cls(
name=payload["name"],
created_at=parse(payload["creationTimestamp"]),
labels=payload.get("labels", {}),
)
@dataclass(frozen=True)
class Node:
metadata: Metadata
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Node:
return cls(metadata=Metadata.from_payload(payload["metadata"]))
class PodPhase(str, enum.Enum):
PENDING = "Pending"
RUNNING = "Running"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
UNKNOWN = "Unknown"
@dataclass(frozen=True)
class PodStatus:
phase: PodPhase
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> PodStatus:
return cls(phase=PodPhase(payload.get("phase", "Unknown")))
@dataclass(frozen=True)
class Resources:
cpu_m: int = 0
memory_mb: int = 0
gpu: int = 0
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Resources:
return cls(
cpu_m=cls._parse_cpu_m(payload.get("cpu", "0")),
memory_mb=cls._parse_memory_mb(payload.get("memory", "0Mi")),
gpu=int(payload.get("nvidia.com/gpu", 0)),
)
@classmethod
def _parse_cpu_m(cls, value: str) -> int:
if value.endswith("m"):
return int(value[:-1])
return int(float(value) * 1000)
@classmethod
def _parse_memory_mb(cls, value: str) -> int:
if value.endswith("Gi"):
return int(value[:-2]) * 1024
if value.endswith("Mi"):
return int(value[:-2])
raise ValueError("Memory unit is not supported")
@dataclass(frozen=True)
class Container:
name: str
resource_requests: Resources = field(default_factory=Resources)
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Container:
return cls(
name=payload["name"],
resource_requests=Resources.from_payload(
payload.get("resources", {}).get("requests", {})
),
)
@dataclass(frozen=True)
class Pod:
metadata: Metadata
status: PodStatus
containers: Sequence[Container]
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Pod:
return cls(
metadata=Metadata.from_payload(payload["metadata"]),
status=PodStatus.from_payload(payload.get("status", {})),
containers=[
Container.from_payload(c) for c in payload["spec"]["containers"]
],
)
class KubeClient:
def __init__(
self,
config: KubeConfig,
trace_configs: list[aiohttp.TraceConfig] | None = None,
) -> None:
self._config = config
self._trace_configs = trace_configs
self._client: aiohttp.ClientSession | None = None
def _create_ssl_context(self) -> ssl.SSLContext | None:
if self._config.url.scheme != "https":
return None
ssl_context = ssl.create_default_context(
cafile=self._config.cert_authority_path,
cadata=self._config.cert_authority_data_pem,
)
if self._config.auth_type == KubeClientAuthType.CERTIFICATE:
ssl_context.load_cert_chain(
self._config.client_cert_path, # type: ignore
self._config.client_key_path,
)
return ssl_context
async def __aenter__(self) -> "KubeClient":
self._client = await self._create_http_client()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
await self.aclose()
async def _create_http_client(self) -> aiohttp.ClientSession:
connector = aiohttp.TCPConnector(
limit=self._config.conn_pool_size, ssl=self._create_ssl_context()
)
if self._config.auth_type == KubeClientAuthType.TOKEN:
token = self._config.token
if not token:
assert self._config.token_path is not None
token = Path(self._config.token_path).read_text()
headers = {"Authorization": "Bearer " + token}
else:
headers = {}
timeout = aiohttp.ClientTimeout(
connect=self._config.conn_timeout_s, total=self._config.read_timeout_s
)
return aiohttp.ClientSession(
connector=connector,
timeout=timeout,
headers=headers,
trace_configs=self._trace_configs,
)
async def aclose(self) -> None:
assert self._client
await self._client.close()
def _get_pods_url(self, namespace: str) -> URL:
if namespace:
return self._config.url / "api/v1/namespaces" / namespace / "pods"
return self._config.url / "api/v1/pods"
@trace
async def get_node(self, name: str) -> Node:
assert self._client
async with self._client.get(
self._config.url / "api/v1/nodes" / name
) as response:
response.raise_for_status()
payload = await response.json()
assert payload["kind"] == "Node"
return Node.from_payload(payload)
@trace
async def get_pods(
self, namespace: str = "", field_selector: str = "", label_selector: str = ""
) -> Sequence[Pod]:
assert self._client
params: dict[str, str] = {}
if field_selector:
params["fieldSelector"] = field_selector
if label_selector:
params["labelSelector"] = label_selector
async with self._client.get(
self._get_pods_url(namespace), params=params or None
) as response:
response.raise_for_status()
payload = await response.json()
assert payload["kind"] == "PodList"
return [Pod.from_payload(i) for i in payload["items"]] | platform_reports/kube_client.py | from __future__ import annotations
import enum
import logging
import ssl
from collections.abc import Sequence
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from types import TracebackType
from typing import Any
import aiohttp
from dateutil.parser import parse
from neuro_logging import trace
from yarl import URL
from .config import KubeClientAuthType, KubeConfig
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class Metadata:
name: str
created_at: datetime
labels: dict[str, str] = field(default_factory=dict)
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Metadata:
return cls(
name=payload["name"],
created_at=parse(payload["creationTimestamp"]),
labels=payload.get("labels", {}),
)
@dataclass(frozen=True)
class Node:
metadata: Metadata
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Node:
return cls(metadata=Metadata.from_payload(payload["metadata"]))
class PodPhase(str, enum.Enum):
PENDING = "Pending"
RUNNING = "Running"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
UNKNOWN = "Unknown"
@dataclass(frozen=True)
class PodStatus:
phase: PodPhase
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> PodStatus:
return cls(phase=PodPhase(payload.get("phase", "Unknown")))
@dataclass(frozen=True)
class Resources:
cpu_m: int = 0
memory_mb: int = 0
gpu: int = 0
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Resources:
return cls(
cpu_m=cls._parse_cpu_m(payload.get("cpu", "0")),
memory_mb=cls._parse_memory_mb(payload.get("memory", "0Mi")),
gpu=int(payload.get("nvidia.com/gpu", 0)),
)
@classmethod
def _parse_cpu_m(cls, value: str) -> int:
if value.endswith("m"):
return int(value[:-1])
return int(float(value) * 1000)
@classmethod
def _parse_memory_mb(cls, value: str) -> int:
if value.endswith("Gi"):
return int(value[:-2]) * 1024
if value.endswith("Mi"):
return int(value[:-2])
raise ValueError("Memory unit is not supported")
@dataclass(frozen=True)
class Container:
name: str
resource_requests: Resources = field(default_factory=Resources)
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Container:
return cls(
name=payload["name"],
resource_requests=Resources.from_payload(
payload.get("resources", {}).get("requests", {})
),
)
@dataclass(frozen=True)
class Pod:
metadata: Metadata
status: PodStatus
containers: Sequence[Container]
@classmethod
def from_payload(cls, payload: dict[str, Any]) -> Pod:
return cls(
metadata=Metadata.from_payload(payload["metadata"]),
status=PodStatus.from_payload(payload.get("status", {})),
containers=[
Container.from_payload(c) for c in payload["spec"]["containers"]
],
)
class KubeClient:
def __init__(
self,
config: KubeConfig,
trace_configs: list[aiohttp.TraceConfig] | None = None,
) -> None:
self._config = config
self._trace_configs = trace_configs
self._client: aiohttp.ClientSession | None = None
def _create_ssl_context(self) -> ssl.SSLContext | None:
if self._config.url.scheme != "https":
return None
ssl_context = ssl.create_default_context(
cafile=self._config.cert_authority_path,
cadata=self._config.cert_authority_data_pem,
)
if self._config.auth_type == KubeClientAuthType.CERTIFICATE:
ssl_context.load_cert_chain(
self._config.client_cert_path, # type: ignore
self._config.client_key_path,
)
return ssl_context
async def __aenter__(self) -> "KubeClient":
self._client = await self._create_http_client()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
await self.aclose()
async def _create_http_client(self) -> aiohttp.ClientSession:
connector = aiohttp.TCPConnector(
limit=self._config.conn_pool_size, ssl=self._create_ssl_context()
)
if self._config.auth_type == KubeClientAuthType.TOKEN:
token = self._config.token
if not token:
assert self._config.token_path is not None
token = Path(self._config.token_path).read_text()
headers = {"Authorization": "Bearer " + token}
else:
headers = {}
timeout = aiohttp.ClientTimeout(
connect=self._config.conn_timeout_s, total=self._config.read_timeout_s
)
return aiohttp.ClientSession(
connector=connector,
timeout=timeout,
headers=headers,
trace_configs=self._trace_configs,
)
async def aclose(self) -> None:
assert self._client
await self._client.close()
def _get_pods_url(self, namespace: str) -> URL:
if namespace:
return self._config.url / "api/v1/namespaces" / namespace / "pods"
return self._config.url / "api/v1/pods"
@trace
async def get_node(self, name: str) -> Node:
assert self._client
async with self._client.get(
self._config.url / "api/v1/nodes" / name
) as response:
response.raise_for_status()
payload = await response.json()
assert payload["kind"] == "Node"
return Node.from_payload(payload)
@trace
async def get_pods(
self, namespace: str = "", field_selector: str = "", label_selector: str = ""
) -> Sequence[Pod]:
assert self._client
params: dict[str, str] = {}
if field_selector:
params["fieldSelector"] = field_selector
if label_selector:
params["labelSelector"] = label_selector
async with self._client.get(
self._get_pods_url(namespace), params=params or None
) as response:
response.raise_for_status()
payload = await response.json()
assert payload["kind"] == "PodList"
return [Pod.from_payload(i) for i in payload["items"]] | 0.801625 | 0.129788 |
import os
from os.path import join as pjoin
import collections
import glob
import numpy as np
import scipy.io as io
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
from torch.utils import data
from pysmg.data.transforms import default_transforms
class PascalVOC(data.Dataset):
"""Data loader for the Pascal VOC semantic segmentation dataset.
Annotations from both the original VOC data (which consist of RGB images
in which colours map to specific classes) and the SBD (Berkely) dataset
(where annotations are stored as .mat files) are converted into a common
`label_mask` format. Under this format, each mask is an (M,N) array of
integer values from 0 to 21, where 0 represents the background class.
The label masks are stored in a new folder, called `pre_encoded`, which
is added as a subdirectory of the `SegmentationClass` folder in the
original Pascal VOC data layout.
A total of five data splits are provided for working with the VOC data:
train: The original VOC 2012 training data - 1464 images
val: The original VOC 2012 validation data - 1449 images
trainval: The combination of `train` and `val` - 2913 images
train_aug: The unique images present in both the train split and
training images from SBD: - 8829 images (the unique members
of the result of combining lists of length 1464 and 8498)
train_aug_val: The original VOC 2012 validation data minus the images
present in `train_aug` (This is done with the same logic as
the validation set used in FCN PAMI paper, but with VOC 2012
rather than VOC 2011) - 904 images
"""
def __init__(
self,
root,
sbd_path=None,
split="train_aug",
is_transform=False,
img_size="same",
augmentations=None,
normalize_mean=[0.485, 0.456, 0.406],
normalize_std=[0.229, 0.224, 0.225],
):
self.root = root
self.sbd_path = sbd_path
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 21
self.files = collections.defaultdict(list)
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.normalize = (normalize_mean, normalize_std)
for split in ["train", "val", "trainval"]:
path = pjoin(self.root, "ImageSets", "Segmentation", split + ".txt")
file_list = tuple(open(path, "r"))
file_list = [id_.rstrip() for id_ in file_list]
self.files[split] = file_list
self.setup_annotations()
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
im_name = self.files[self.split][index]
im_path = pjoin(self.root, "JPEGImages", im_name + ".jpg")
lbl_path = pjoin(self.root, "SegmentationClass", "pre_encoded", im_name + ".png")
im = Image.open(im_path)
lbl = Image.open(lbl_path)
if self.augmentations is not None:
im, lbl = self.augmentations(im, lbl)
if self.is_transform:
im, lbl = self.transform(im, lbl)
return im, lbl
def transform(self, img, lbl):
img, lbl = default_transforms(
img,
lbl,
normalize=self.normalize,
size=self.img_size
)
lbl[lbl == 255] = 0
return img, lbl
def decode_segmap(self, label_mask, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
label_colours = self.get_pascal_labels()
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, self.n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def setup_annotations(self):
"""Sets up Berkley annotations by adding image indices to the
`train_aug` split and pre-encode all segmentation labels into the
common label_mask format (if this has not already been done). This
function also defines the `train_aug` and `train_aug_val` data splits
according to the description in the class docstring
"""
sbd_path = self.sbd_path
target_path = pjoin(self.root, "SegmentationClass", "pre_encoded")
if not os.path.exists(target_path):
os.makedirs(target_path)
path = pjoin(sbd_path, "dataset", "train.txt")
sbd_train_list = tuple(open(path, "r"))
sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]
train_aug = self.files["train"] + sbd_train_list
# keep unique elements (stable)
train_aug = [train_aug[i] for i in sorted(np.unique(train_aug, return_index=True)[1])]
self.files["train_aug"] = train_aug
set_diff = set(self.files["val"]) - set(train_aug) # remove overlap
self.files["train_aug_val"] = list(set_diff)
pre_encoded = glob.glob(pjoin(target_path, "*.png"))
expected = np.unique(self.files["train_aug"] + self.files["val"]).size
if len(pre_encoded) != expected:
print("Pre-encoding segmentation masks...")
for ii in tqdm(sbd_train_list):
lbl_path = pjoin(sbd_path, "dataset", "cls", ii + ".mat")
data = io.loadmat(lbl_path)
lbl = data["GTcls"][0]["Segmentation"][0].astype(np.int32)
lbl = Image.fromarray(lbl)
lbl.save(pjoin(target_path, ii + ".png"))
for ii in tqdm(self.files["trainval"]):
fname = ii + ".png"
lbl_path = pjoin(self.root, "SegmentationClass", fname)
lbl = Image.open(lbl_path)
lbl = np.array(lbl)
lbl[lbl == 255] = 0
lbl = Image.fromarray(lbl)
lbl.save(pjoin(target_path, fname))
assert expected == 9733, "unexpected dataset sizes" | pysmg/data/pascal_voc.py | import os
from os.path import join as pjoin
import collections
import glob
import numpy as np
import scipy.io as io
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
from torch.utils import data
from pysmg.data.transforms import default_transforms
class PascalVOC(data.Dataset):
"""Data loader for the Pascal VOC semantic segmentation dataset.
Annotations from both the original VOC data (which consist of RGB images
in which colours map to specific classes) and the SBD (Berkely) dataset
(where annotations are stored as .mat files) are converted into a common
`label_mask` format. Under this format, each mask is an (M,N) array of
integer values from 0 to 21, where 0 represents the background class.
The label masks are stored in a new folder, called `pre_encoded`, which
is added as a subdirectory of the `SegmentationClass` folder in the
original Pascal VOC data layout.
A total of five data splits are provided for working with the VOC data:
train: The original VOC 2012 training data - 1464 images
val: The original VOC 2012 validation data - 1449 images
trainval: The combination of `train` and `val` - 2913 images
train_aug: The unique images present in both the train split and
training images from SBD: - 8829 images (the unique members
of the result of combining lists of length 1464 and 8498)
train_aug_val: The original VOC 2012 validation data minus the images
present in `train_aug` (This is done with the same logic as
the validation set used in FCN PAMI paper, but with VOC 2012
rather than VOC 2011) - 904 images
"""
def __init__(
self,
root,
sbd_path=None,
split="train_aug",
is_transform=False,
img_size="same",
augmentations=None,
normalize_mean=[0.485, 0.456, 0.406],
normalize_std=[0.229, 0.224, 0.225],
):
self.root = root
self.sbd_path = sbd_path
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 21
self.files = collections.defaultdict(list)
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.normalize = (normalize_mean, normalize_std)
for split in ["train", "val", "trainval"]:
path = pjoin(self.root, "ImageSets", "Segmentation", split + ".txt")
file_list = tuple(open(path, "r"))
file_list = [id_.rstrip() for id_ in file_list]
self.files[split] = file_list
self.setup_annotations()
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
im_name = self.files[self.split][index]
im_path = pjoin(self.root, "JPEGImages", im_name + ".jpg")
lbl_path = pjoin(self.root, "SegmentationClass", "pre_encoded", im_name + ".png")
im = Image.open(im_path)
lbl = Image.open(lbl_path)
if self.augmentations is not None:
im, lbl = self.augmentations(im, lbl)
if self.is_transform:
im, lbl = self.transform(im, lbl)
return im, lbl
def transform(self, img, lbl):
img, lbl = default_transforms(
img,
lbl,
normalize=self.normalize,
size=self.img_size
)
lbl[lbl == 255] = 0
return img, lbl
def decode_segmap(self, label_mask, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
label_colours = self.get_pascal_labels()
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, self.n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def setup_annotations(self):
"""Sets up Berkley annotations by adding image indices to the
`train_aug` split and pre-encode all segmentation labels into the
common label_mask format (if this has not already been done). This
function also defines the `train_aug` and `train_aug_val` data splits
according to the description in the class docstring
"""
sbd_path = self.sbd_path
target_path = pjoin(self.root, "SegmentationClass", "pre_encoded")
if not os.path.exists(target_path):
os.makedirs(target_path)
path = pjoin(sbd_path, "dataset", "train.txt")
sbd_train_list = tuple(open(path, "r"))
sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]
train_aug = self.files["train"] + sbd_train_list
# keep unique elements (stable)
train_aug = [train_aug[i] for i in sorted(np.unique(train_aug, return_index=True)[1])]
self.files["train_aug"] = train_aug
set_diff = set(self.files["val"]) - set(train_aug) # remove overlap
self.files["train_aug_val"] = list(set_diff)
pre_encoded = glob.glob(pjoin(target_path, "*.png"))
expected = np.unique(self.files["train_aug"] + self.files["val"]).size
if len(pre_encoded) != expected:
print("Pre-encoding segmentation masks...")
for ii in tqdm(sbd_train_list):
lbl_path = pjoin(sbd_path, "dataset", "cls", ii + ".mat")
data = io.loadmat(lbl_path)
lbl = data["GTcls"][0]["Segmentation"][0].astype(np.int32)
lbl = Image.fromarray(lbl)
lbl.save(pjoin(target_path, ii + ".png"))
for ii in tqdm(self.files["trainval"]):
fname = ii + ".png"
lbl_path = pjoin(self.root, "SegmentationClass", fname)
lbl = Image.open(lbl_path)
lbl = np.array(lbl)
lbl[lbl == 255] = 0
lbl = Image.fromarray(lbl)
lbl.save(pjoin(target_path, fname))
assert expected == 9733, "unexpected dataset sizes" | 0.785555 | 0.599397 |
import os
import json
import argparse
from dateutil.parser import parse
from datetime import date, datetime, timedelta
from github import Github, BadCredentialsException
SCRIPT_FOLDER = 'git-tools'
CONFIG_FILENAME = 'daily-report.json'
parser = argparse.ArgumentParser(
description="Show daily activity on GitHub and (optionally) send it via e-mail.",
epilog="Find more information at https://digitalduke.github.io/git-tools/"
)
parser.add_argument(
'--version',
action='version',
version='GitHub daily report version 1.0'
)
parser.add_argument(
'--date',
action='store',
default='today',
metavar="YYYY-MM-DD",
type=str,
dest='date',
help='date in ISO 8601 format, for example, 2018-10-16, or today if omitted'
)
parser.add_argument(
'--store-token',
action='store',
default=None,
metavar="TOKEN",
type=str,
dest='store_token',
help='save GitHub access token into configuration file'
)
parser.add_argument(
'--remove-token',
action='store_true',
dest='remove_token',
help='remove GitHub access token from configuration file'
)
parser.add_argument(
'--store-repository',
action='store',
default=None,
metavar="REPOSITORY",
type=str,
dest='store_repository',
help='save repository name into configuration file'
)
parser.add_argument(
'--remove-repository',
action='store',
default=None,
metavar="REPOSITORY",
type=str,
dest='remove_repository',
help='remove repository name from configuration file'
)
parser.add_argument(
'--list-repositories',
action='store_true',
dest='list_repositories',
help='list repositories stored in configuration file'
)
def get_config_path():
xdg_config_home_dir = os.environ.get('XDG_CONFIG_HOME', '')
home_dir = os.environ.get('HOME', '')
path = ""
if xdg_config_home_dir:
path = os.path.join(xdg_config_home_dir, SCRIPT_FOLDER)
elif home_dir:
path = os.path.join(home_dir, '.config', SCRIPT_FOLDER)
return path
def get_config_file_full_path():
return os.path.join(get_config_path(), CONFIG_FILENAME)
def get_options():
options = {}
try:
with open(get_config_file_full_path()) as config_file:
options = json.load(config_file)
except FileNotFoundError:
print("Error while reading options: file does not exist.")
return options
def save_options(options):
os.makedirs(get_config_path(), exist_ok=True)
with open(get_config_file_full_path(), 'w') as config_file:
json.dump(options, config_file)
def run():
args = parser.parse_args()
options = get_options()
if args.store_token:
options.update(token=args.store_token)
save_options(options)
print("Token successfully stored in config file.")
elif args.remove_token:
options.update(token="")
save_options(options)
print("Token successfully removed from config file.")
elif args.store_repository:
repositories = list(options.get('repositories', ''))
new_repo = args.store_repository
if new_repo not in repositories:
repositories.append(new_repo)
options.update(repositories=repositories)
save_options(options)
print("Repository %s successfully stored in config file." % new_repo)
else:
print("Repository %s already in config file." % new_repo)
elif args.remove_repository:
repositories = list(options.get('repositories', ''))
repo = args.remove_repository
if repo in repositories:
repositories.remove(repo)
options.update(repositories=repositories)
save_options(options)
print("Repository %s successfully removed from config file." % repo)
else:
print("Repository %s not in config file." % repo)
elif args.list_repositories:
repositories = list(options.get('repositories', ''))
for repo in repositories:
print(repo)
else:
if args.date == 'today':
date_since = date.today()
else:
date_since = parse(args.date)
date_since = datetime.combine(date_since, datetime.min.time())
date_until = datetime.combine(date_since.date() + timedelta(days=1), datetime.min.time())
github = Github(options.get('token', ''))
user = github.get_user()
repositories = list(options.get('repositories', ''))
for repository in repositories:
try:
repo = github.get_repo(repository)
except (Exception, BadCredentialsException) as error:
print("Can't get repo %s" % repository)
try:
closed_issues = user.get_issues(state="closed", since=date_since)
print("List of daily closed issues")
for issue in closed_issues:
if date_since <= issue.closed_at <= date_until:
print(issue.number, issue.html_url, issue.title)
print()
except (Exception, BadCredentialsException) as error:
print("Can't get list of closed issues.")
try:
commits = repo.get_commits(since=date_since, author=user)
print("List of daily commits in repo \"%s\" in PR which already closed" % repository)
for commit in commits:
print(commit.sha[:7], commit.html_url, commit.commit.message)
print()
except (Exception, BadCredentialsException) as error:
print("Can't get list of commits in PR which already closed.")
try:
pulls = repo.get_pulls(state="open", base="master")
if pulls:
print("List of daily commits in repo \"%s\" in PR which don't closed" % repository)
for pr in pulls:
commits = pr.get_commits()
print_pr_number = True
for commit in commits:
if not commit.committer.login == user.login:
continue
if date_since <= commit.commit.author.date <= date_until:
if print_pr_number:
print("PR#%s %s %s" % (pr.number, pr.html_url, pr.title))
print_pr_number = False
print(commit.sha[:7], commit.html_url, commit.commit.message)
except (Exception, BadCredentialsException) as error:
print("Can't get list of commits in opened PR.")
print("\r\nDone.")
if __name__ == "__main__":
run() | daily-report.py | import os
import json
import argparse
from dateutil.parser import parse
from datetime import date, datetime, timedelta
from github import Github, BadCredentialsException
SCRIPT_FOLDER = 'git-tools'
CONFIG_FILENAME = 'daily-report.json'
parser = argparse.ArgumentParser(
description="Show daily activity on GitHub and (optionally) send it via e-mail.",
epilog="Find more information at https://digitalduke.github.io/git-tools/"
)
parser.add_argument(
'--version',
action='version',
version='GitHub daily report version 1.0'
)
parser.add_argument(
'--date',
action='store',
default='today',
metavar="YYYY-MM-DD",
type=str,
dest='date',
help='date in ISO 8601 format, for example, 2018-10-16, or today if omitted'
)
parser.add_argument(
'--store-token',
action='store',
default=None,
metavar="TOKEN",
type=str,
dest='store_token',
help='save GitHub access token into configuration file'
)
parser.add_argument(
'--remove-token',
action='store_true',
dest='remove_token',
help='remove GitHub access token from configuration file'
)
parser.add_argument(
'--store-repository',
action='store',
default=None,
metavar="REPOSITORY",
type=str,
dest='store_repository',
help='save repository name into configuration file'
)
parser.add_argument(
'--remove-repository',
action='store',
default=None,
metavar="REPOSITORY",
type=str,
dest='remove_repository',
help='remove repository name from configuration file'
)
parser.add_argument(
'--list-repositories',
action='store_true',
dest='list_repositories',
help='list repositories stored in configuration file'
)
def get_config_path():
xdg_config_home_dir = os.environ.get('XDG_CONFIG_HOME', '')
home_dir = os.environ.get('HOME', '')
path = ""
if xdg_config_home_dir:
path = os.path.join(xdg_config_home_dir, SCRIPT_FOLDER)
elif home_dir:
path = os.path.join(home_dir, '.config', SCRIPT_FOLDER)
return path
def get_config_file_full_path():
return os.path.join(get_config_path(), CONFIG_FILENAME)
def get_options():
options = {}
try:
with open(get_config_file_full_path()) as config_file:
options = json.load(config_file)
except FileNotFoundError:
print("Error while reading options: file does not exist.")
return options
def save_options(options):
os.makedirs(get_config_path(), exist_ok=True)
with open(get_config_file_full_path(), 'w') as config_file:
json.dump(options, config_file)
def run():
args = parser.parse_args()
options = get_options()
if args.store_token:
options.update(token=args.store_token)
save_options(options)
print("Token successfully stored in config file.")
elif args.remove_token:
options.update(token="")
save_options(options)
print("Token successfully removed from config file.")
elif args.store_repository:
repositories = list(options.get('repositories', ''))
new_repo = args.store_repository
if new_repo not in repositories:
repositories.append(new_repo)
options.update(repositories=repositories)
save_options(options)
print("Repository %s successfully stored in config file." % new_repo)
else:
print("Repository %s already in config file." % new_repo)
elif args.remove_repository:
repositories = list(options.get('repositories', ''))
repo = args.remove_repository
if repo in repositories:
repositories.remove(repo)
options.update(repositories=repositories)
save_options(options)
print("Repository %s successfully removed from config file." % repo)
else:
print("Repository %s not in config file." % repo)
elif args.list_repositories:
repositories = list(options.get('repositories', ''))
for repo in repositories:
print(repo)
else:
if args.date == 'today':
date_since = date.today()
else:
date_since = parse(args.date)
date_since = datetime.combine(date_since, datetime.min.time())
date_until = datetime.combine(date_since.date() + timedelta(days=1), datetime.min.time())
github = Github(options.get('token', ''))
user = github.get_user()
repositories = list(options.get('repositories', ''))
for repository in repositories:
try:
repo = github.get_repo(repository)
except (Exception, BadCredentialsException) as error:
print("Can't get repo %s" % repository)
try:
closed_issues = user.get_issues(state="closed", since=date_since)
print("List of daily closed issues")
for issue in closed_issues:
if date_since <= issue.closed_at <= date_until:
print(issue.number, issue.html_url, issue.title)
print()
except (Exception, BadCredentialsException) as error:
print("Can't get list of closed issues.")
try:
commits = repo.get_commits(since=date_since, author=user)
print("List of daily commits in repo \"%s\" in PR which already closed" % repository)
for commit in commits:
print(commit.sha[:7], commit.html_url, commit.commit.message)
print()
except (Exception, BadCredentialsException) as error:
print("Can't get list of commits in PR which already closed.")
try:
pulls = repo.get_pulls(state="open", base="master")
if pulls:
print("List of daily commits in repo \"%s\" in PR which don't closed" % repository)
for pr in pulls:
commits = pr.get_commits()
print_pr_number = True
for commit in commits:
if not commit.committer.login == user.login:
continue
if date_since <= commit.commit.author.date <= date_until:
if print_pr_number:
print("PR#%s %s %s" % (pr.number, pr.html_url, pr.title))
print_pr_number = False
print(commit.sha[:7], commit.html_url, commit.commit.message)
except (Exception, BadCredentialsException) as error:
print("Can't get list of commits in opened PR.")
print("\r\nDone.")
if __name__ == "__main__":
run() | 0.140189 | 0.05744 |
import torch
import argparse
import torchvision
import torch.nn as nn
from torch.autograd import Function
from model.base_model import Base_Model
class CIBHash(Base_Model):
def __init__(self, hparams):
super().__init__(hparams=hparams)
def define_parameters(self):
self.vgg = torchvision.models.vgg16(pretrained=True)
self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier.children())[:6])
for param in self.vgg.parameters():
param.requires_grad = False
self.encoder = nn.Sequential(nn.Linear(4096, 1024),
nn.ReLU(),
nn.Linear(1024, self.hparams.encode_length),
)
self.criterion = NtXentLoss(self.hparams.batch_size, self.hparams.temperature)
def forward(self, imgi, imgj, device):
imgi = self.vgg.features(imgi)
imgi = imgi.view(imgi.size(0), -1)
imgi = self.vgg.classifier(imgi)
prob_i = torch.sigmoid(self.encoder(imgi))
z_i = hash_layer(prob_i - 0.5)
imgj = self.vgg.features(imgj)
imgj = imgj.view(imgj.size(0), -1)
imgj = self.vgg.classifier(imgj)
prob_j = torch.sigmoid(self.encoder(imgj))
z_j = hash_layer(prob_j - 0.5)
kl_loss = (self.compute_kl(prob_i, prob_j) + self.compute_kl(prob_j, prob_i)) / 2
contra_loss = self.criterion(z_i, z_j, device)
loss = contra_loss + self.hparams.weight * kl_loss
return {'loss': loss, 'contra_loss': contra_loss, 'kl_loss': kl_loss}
def encode_discrete(self, x):
x = self.vgg.features(x)
x = x.view(x.size(0), -1)
x = self.vgg.classifier(x)
prob = torch.sigmoid(self.encoder(x))
z = hash_layer(prob - 0.5)
return z
def compute_kl(self, prob, prob_v):
prob_v = prob_v.detach()
# prob = prob.detach()
kl = prob * (torch.log(prob + 1e-8) - torch.log(prob_v + 1e-8)) + (1 - prob) * (torch.log(1 - prob + 1e-8 ) - torch.log(1 - prob_v + 1e-8))
kl = torch.mean(torch.sum(kl, axis = 1))
return kl
def configure_optimizers(self):
return torch.optim.Adam([{'params': self.encoder.parameters()}], lr = self.hparams.lr)
def get_hparams_grid(self):
grid = Base_Model.get_general_hparams_grid()
grid.update({
'temperature': [0.2, 0.3, 0.4],
'weight': [0.001, 0.005, 0.0005, 0.0001, 0.00005, 0.00001]
})
return grid
@staticmethod
def get_model_specific_argparser():
parser = Base_Model.get_general_argparser()
parser.add_argument("-t", "--temperature", default = 0.3, type = float,
help = "Temperature [%(default)d]",)
parser.add_argument('-w',"--weight", default = 0.001, type=float,
help='weight of I(x,z) [%(default)f]')
return parser
class hash(Function):
@staticmethod
def forward(ctx, input):
# ctx.save_for_backward(input)
return torch.sign(input)
@staticmethod
def backward(ctx, grad_output):
# input, = ctx.saved_tensors
# grad_output = grad_output.data
return grad_output
def hash_layer(input):
return hash.apply(input)
class NtXentLoss(nn.Module):
def __init__(self, batch_size, temperature):
super(NtXentLoss, self).__init__()
#self.batch_size = batch_size
self.temperature = temperature
#self.device = device
#self.mask = self.mask_correlated_samples(batch_size)
self.similarityF = nn.CosineSimilarity(dim = 2)
self.criterion = nn.CrossEntropyLoss(reduction = 'sum')
def mask_correlated_samples(self, batch_size):
N = 2 * batch_size
mask = torch.ones((N, N), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(batch_size):
mask[i, batch_size + i] = 0
mask[batch_size + i, i] = 0
return mask
def forward(self, z_i, z_j, device):
"""
We do not sample negative examples explicitly.
Instead, given a positive pair, similar to (Chen et al., 2017), we treat the other 2(N − 1) augmented examples within a minibatch as negative examples.
"""
batch_size = z_i.shape[0]
N = 2 * batch_size
z = torch.cat((z_i, z_j), dim=0)
sim = self.similarityF(z.unsqueeze(1), z.unsqueeze(0)) / self.temperature
#sim = 0.5 * (z_i.shape[1] - torch.tensordot(z.unsqueeze(1), z.T.unsqueeze(0), dims = 2)) / z_i.shape[1] / self.temperature
sim_i_j = torch.diag(sim, batch_size )
sim_j_i = torch.diag(sim, -batch_size )
mask = self.mask_correlated_samples(batch_size)
positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).view(N, 1)
negative_samples = sim[mask].view(N, -1)
labels = torch.zeros(N).to(device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= N
return loss | model/CIBHash.py | import torch
import argparse
import torchvision
import torch.nn as nn
from torch.autograd import Function
from model.base_model import Base_Model
class CIBHash(Base_Model):
def __init__(self, hparams):
super().__init__(hparams=hparams)
def define_parameters(self):
self.vgg = torchvision.models.vgg16(pretrained=True)
self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier.children())[:6])
for param in self.vgg.parameters():
param.requires_grad = False
self.encoder = nn.Sequential(nn.Linear(4096, 1024),
nn.ReLU(),
nn.Linear(1024, self.hparams.encode_length),
)
self.criterion = NtXentLoss(self.hparams.batch_size, self.hparams.temperature)
def forward(self, imgi, imgj, device):
imgi = self.vgg.features(imgi)
imgi = imgi.view(imgi.size(0), -1)
imgi = self.vgg.classifier(imgi)
prob_i = torch.sigmoid(self.encoder(imgi))
z_i = hash_layer(prob_i - 0.5)
imgj = self.vgg.features(imgj)
imgj = imgj.view(imgj.size(0), -1)
imgj = self.vgg.classifier(imgj)
prob_j = torch.sigmoid(self.encoder(imgj))
z_j = hash_layer(prob_j - 0.5)
kl_loss = (self.compute_kl(prob_i, prob_j) + self.compute_kl(prob_j, prob_i)) / 2
contra_loss = self.criterion(z_i, z_j, device)
loss = contra_loss + self.hparams.weight * kl_loss
return {'loss': loss, 'contra_loss': contra_loss, 'kl_loss': kl_loss}
def encode_discrete(self, x):
x = self.vgg.features(x)
x = x.view(x.size(0), -1)
x = self.vgg.classifier(x)
prob = torch.sigmoid(self.encoder(x))
z = hash_layer(prob - 0.5)
return z
def compute_kl(self, prob, prob_v):
prob_v = prob_v.detach()
# prob = prob.detach()
kl = prob * (torch.log(prob + 1e-8) - torch.log(prob_v + 1e-8)) + (1 - prob) * (torch.log(1 - prob + 1e-8 ) - torch.log(1 - prob_v + 1e-8))
kl = torch.mean(torch.sum(kl, axis = 1))
return kl
def configure_optimizers(self):
return torch.optim.Adam([{'params': self.encoder.parameters()}], lr = self.hparams.lr)
def get_hparams_grid(self):
grid = Base_Model.get_general_hparams_grid()
grid.update({
'temperature': [0.2, 0.3, 0.4],
'weight': [0.001, 0.005, 0.0005, 0.0001, 0.00005, 0.00001]
})
return grid
@staticmethod
def get_model_specific_argparser():
parser = Base_Model.get_general_argparser()
parser.add_argument("-t", "--temperature", default = 0.3, type = float,
help = "Temperature [%(default)d]",)
parser.add_argument('-w',"--weight", default = 0.001, type=float,
help='weight of I(x,z) [%(default)f]')
return parser
class hash(Function):
@staticmethod
def forward(ctx, input):
# ctx.save_for_backward(input)
return torch.sign(input)
@staticmethod
def backward(ctx, grad_output):
# input, = ctx.saved_tensors
# grad_output = grad_output.data
return grad_output
def hash_layer(input):
return hash.apply(input)
class NtXentLoss(nn.Module):
def __init__(self, batch_size, temperature):
super(NtXentLoss, self).__init__()
#self.batch_size = batch_size
self.temperature = temperature
#self.device = device
#self.mask = self.mask_correlated_samples(batch_size)
self.similarityF = nn.CosineSimilarity(dim = 2)
self.criterion = nn.CrossEntropyLoss(reduction = 'sum')
def mask_correlated_samples(self, batch_size):
N = 2 * batch_size
mask = torch.ones((N, N), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(batch_size):
mask[i, batch_size + i] = 0
mask[batch_size + i, i] = 0
return mask
def forward(self, z_i, z_j, device):
"""
We do not sample negative examples explicitly.
Instead, given a positive pair, similar to (Chen et al., 2017), we treat the other 2(N − 1) augmented examples within a minibatch as negative examples.
"""
batch_size = z_i.shape[0]
N = 2 * batch_size
z = torch.cat((z_i, z_j), dim=0)
sim = self.similarityF(z.unsqueeze(1), z.unsqueeze(0)) / self.temperature
#sim = 0.5 * (z_i.shape[1] - torch.tensordot(z.unsqueeze(1), z.T.unsqueeze(0), dims = 2)) / z_i.shape[1] / self.temperature
sim_i_j = torch.diag(sim, batch_size )
sim_j_i = torch.diag(sim, -batch_size )
mask = self.mask_correlated_samples(batch_size)
positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).view(N, 1)
negative_samples = sim[mask].view(N, -1)
labels = torch.zeros(N).to(device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= N
return loss | 0.891386 | 0.277085 |
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
from collections import defaultdict
#typing
import pytest
import numpy
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer, TokenIndexer
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import pad_sequence_to_length
class DictReturningTokenIndexer(TokenIndexer):
u"""
A stub TokenIndexer that returns multiple arrays of different lengths.
"""
def count_vocab_items(self, token , counter ):
pass
def tokens_to_indices(self, tokens ,
vocabulary ,
index_name ) : # pylint: disable=unused-argument
return {
u"token_ids": [10, 15] +\
[vocabulary.get_token_index(token.text, u'words') for token in tokens] +\
[25],
u"additional_key": [22, 29]
}
def get_padding_token(self) :
return 0
def get_padding_lengths(self, token ) : # pylint: disable=unused-argument
return {}
def pad_token_sequence(self,
tokens ,
desired_num_tokens ,
padding_lengths ) : # pylint: disable=unused-argument
return dict((key, pad_sequence_to_length(val, desired_num_tokens[key])) for key, val in list(tokens.items()))
def get_keys(self, index_name ) :
# pylint: disable=unused-argument,no-self-use
return [u"token_ids", u"additional_key"]
class TestTextField(AllenNlpTestCase):
def setUp(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace(u"sentence", namespace=u'words')
self.vocab.add_token_to_namespace(u"A", namespace=u'words')
self.vocab.add_token_to_namespace(u"A", namespace=u'characters')
self.vocab.add_token_to_namespace(u"s", namespace=u'characters')
self.vocab.add_token_to_namespace(u"e", namespace=u'characters')
self.vocab.add_token_to_namespace(u"n", namespace=u'characters')
self.vocab.add_token_to_namespace(u"t", namespace=u'characters')
self.vocab.add_token_to_namespace(u"c", namespace=u'characters')
super(TestTextField, self).setUp()
def test_field_counts_vocab_items_correctly(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts[u"words"][u"This"] == 1
assert namespace_token_counts[u"words"][u"is"] == 1
assert namespace_token_counts[u"words"][u"a"] == 1
assert namespace_token_counts[u"words"][u"sentence"] == 1
assert namespace_token_counts[u"words"][u"."] == 1
assert list(namespace_token_counts.keys()) == [u"words"]
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts[u"characters"][u"T"] == 1
assert namespace_token_counts[u"characters"][u"h"] == 1
assert namespace_token_counts[u"characters"][u"i"] == 2
assert namespace_token_counts[u"characters"][u"s"] == 3
assert namespace_token_counts[u"characters"][u"a"] == 1
assert namespace_token_counts[u"characters"][u"e"] == 3
assert namespace_token_counts[u"characters"][u"n"] == 2
assert namespace_token_counts[u"characters"][u"t"] == 1
assert namespace_token_counts[u"characters"][u"c"] == 1
assert namespace_token_counts[u"characters"][u"."] == 1
assert list(namespace_token_counts.keys()) == [u"characters"]
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words"),
u"characters": TokenCharactersIndexer(u"characters")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts[u"characters"][u"T"] == 1
assert namespace_token_counts[u"characters"][u"h"] == 1
assert namespace_token_counts[u"characters"][u"i"] == 2
assert namespace_token_counts[u"characters"][u"s"] == 3
assert namespace_token_counts[u"characters"][u"a"] == 1
assert namespace_token_counts[u"characters"][u"e"] == 3
assert namespace_token_counts[u"characters"][u"n"] == 2
assert namespace_token_counts[u"characters"][u"t"] == 1
assert namespace_token_counts[u"characters"][u"c"] == 1
assert namespace_token_counts[u"characters"][u"."] == 1
assert namespace_token_counts[u"words"][u"This"] == 1
assert namespace_token_counts[u"words"][u"is"] == 1
assert namespace_token_counts[u"words"][u"a"] == 1
assert namespace_token_counts[u"words"][u"sentence"] == 1
assert namespace_token_counts[u"words"][u"."] == 1
assert set(namespace_token_counts.keys()) == set([u"words", u"characters"])
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
sentence_index = vocab.add_token_to_namespace(u"sentence", namespace=u'words')
capital_a_index = vocab.add_token_to_namespace(u"A", namespace=u'words')
capital_a_char_index = vocab.add_token_to_namespace(u"A", namespace=u'characters')
s_index = vocab.add_token_to_namespace(u"s", namespace=u'characters')
e_index = vocab.add_token_to_namespace(u"e", namespace=u'characters')
n_index = vocab.add_token_to_namespace(u"n", namespace=u'characters')
t_index = vocab.add_token_to_namespace(u"t", namespace=u'characters')
c_index = vocab.add_token_to_namespace(u"c", namespace=u'characters')
field = TextField([Token(t) for t in [u"A", u"sentence"]],
{u"words": SingleIdTokenIndexer(namespace=u"words")})
field.index(vocab)
# pylint: disable=protected-access
assert field._indexed_tokens[u"words"] == [capital_a_index, sentence_index]
field1 = TextField([Token(t) for t in [u"A", u"sentence"]],
{u"characters": TokenCharactersIndexer(namespace=u"characters")})
field1.index(vocab)
assert field1._indexed_tokens[u"characters"] == [[capital_a_char_index],
[s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]]
field2 = TextField([Token(t) for t in [u"A", u"sentence"]],
token_indexers={u"words": SingleIdTokenIndexer(namespace=u"words"),
u"characters": TokenCharactersIndexer(namespace=u"characters")})
field2.index(vocab)
assert field2._indexed_tokens[u"words"] == [capital_a_index, sentence_index]
assert field2._indexed_tokens[u"characters"] == [[capital_a_char_index],
[s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]]
# pylint: enable=protected-access
def test_get_padding_lengths_raises_if_no_indexed_tokens(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
with pytest.raises(ConfigurationError):
field.get_padding_lengths()
def test_padding_lengths_are_computed_correctly(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {u"num_tokens": 5}
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {u"num_tokens": 5, u"num_token_characters": 8}
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters"),
u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {u"num_tokens": 5, u"num_token_characters": 8}
def test_as_tensor_handles_words(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1]))
def test_as_tensor_handles_longer_lengths(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths[u"num_tokens"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1, 0, 0, 0, 0, 0]))
def test_as_tensor_handles_characters(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
expected_character_array = numpy.array([[1, 1, 1, 3, 0, 0, 0, 0],
[1, 3, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4],
[1, 0, 0, 0, 0, 0, 0, 0]])
numpy.testing.assert_array_almost_equal(tensor_dict[u"characters"].detach().cpu().numpy(),
expected_character_array)
def test_as_tensor_handles_words_and_characters_with_longer_lengths(self):
field = TextField([Token(t) for t in [u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words"),
u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths[u"num_tokens"] = 5
padding_lengths[u"num_token_characters"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"].detach().cpu().numpy(),
numpy.array([1, 2, 1, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict[u"characters"].detach().cpu().numpy(),
numpy.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
def test_printing_doesnt_crash(self):
field = TextField([Token(t) for t in [u"A", u"sentence"]],
{u"words": SingleIdTokenIndexer(namespace=u"words")})
print(field)
def test_token_embedder_returns_dict(self):
field = TextField([Token(t) for t in [u"A", u"sentence"]],
token_indexers={u"field_with_dict": DictReturningTokenIndexer(),
u"words": SingleIdTokenIndexer(u"words"),
u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
u'token_ids': 5,
u'additional_key': 2,
u'words': 2,
u'characters': 2,
u'num_token_characters': 8
}
padding_lengths[u'token_ids'] = 7
padding_lengths[u'additional_key'] = 3
padding_lengths[u'words'] = 4
padding_lengths[u'characters'] = 4
tensors = field.as_tensor(padding_lengths)
assert list(tensors[u'token_ids'].shape) == [7]
assert list(tensors[u'additional_key'].shape) == [3]
assert list(tensors[u'words'].shape) == [4]
assert list(tensors[u'characters'].shape) == [4, 8] | pymagnitude/third_party/allennlp/tests/data/fields/text_field_test.py |
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
from collections import defaultdict
#typing
import pytest
import numpy
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer, TokenIndexer
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import pad_sequence_to_length
class DictReturningTokenIndexer(TokenIndexer):
u"""
A stub TokenIndexer that returns multiple arrays of different lengths.
"""
def count_vocab_items(self, token , counter ):
pass
def tokens_to_indices(self, tokens ,
vocabulary ,
index_name ) : # pylint: disable=unused-argument
return {
u"token_ids": [10, 15] +\
[vocabulary.get_token_index(token.text, u'words') for token in tokens] +\
[25],
u"additional_key": [22, 29]
}
def get_padding_token(self) :
return 0
def get_padding_lengths(self, token ) : # pylint: disable=unused-argument
return {}
def pad_token_sequence(self,
tokens ,
desired_num_tokens ,
padding_lengths ) : # pylint: disable=unused-argument
return dict((key, pad_sequence_to_length(val, desired_num_tokens[key])) for key, val in list(tokens.items()))
def get_keys(self, index_name ) :
# pylint: disable=unused-argument,no-self-use
return [u"token_ids", u"additional_key"]
class TestTextField(AllenNlpTestCase):
def setUp(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace(u"sentence", namespace=u'words')
self.vocab.add_token_to_namespace(u"A", namespace=u'words')
self.vocab.add_token_to_namespace(u"A", namespace=u'characters')
self.vocab.add_token_to_namespace(u"s", namespace=u'characters')
self.vocab.add_token_to_namespace(u"e", namespace=u'characters')
self.vocab.add_token_to_namespace(u"n", namespace=u'characters')
self.vocab.add_token_to_namespace(u"t", namespace=u'characters')
self.vocab.add_token_to_namespace(u"c", namespace=u'characters')
super(TestTextField, self).setUp()
def test_field_counts_vocab_items_correctly(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts[u"words"][u"This"] == 1
assert namespace_token_counts[u"words"][u"is"] == 1
assert namespace_token_counts[u"words"][u"a"] == 1
assert namespace_token_counts[u"words"][u"sentence"] == 1
assert namespace_token_counts[u"words"][u"."] == 1
assert list(namespace_token_counts.keys()) == [u"words"]
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts[u"characters"][u"T"] == 1
assert namespace_token_counts[u"characters"][u"h"] == 1
assert namespace_token_counts[u"characters"][u"i"] == 2
assert namespace_token_counts[u"characters"][u"s"] == 3
assert namespace_token_counts[u"characters"][u"a"] == 1
assert namespace_token_counts[u"characters"][u"e"] == 3
assert namespace_token_counts[u"characters"][u"n"] == 2
assert namespace_token_counts[u"characters"][u"t"] == 1
assert namespace_token_counts[u"characters"][u"c"] == 1
assert namespace_token_counts[u"characters"][u"."] == 1
assert list(namespace_token_counts.keys()) == [u"characters"]
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words"),
u"characters": TokenCharactersIndexer(u"characters")})
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts[u"characters"][u"T"] == 1
assert namespace_token_counts[u"characters"][u"h"] == 1
assert namespace_token_counts[u"characters"][u"i"] == 2
assert namespace_token_counts[u"characters"][u"s"] == 3
assert namespace_token_counts[u"characters"][u"a"] == 1
assert namespace_token_counts[u"characters"][u"e"] == 3
assert namespace_token_counts[u"characters"][u"n"] == 2
assert namespace_token_counts[u"characters"][u"t"] == 1
assert namespace_token_counts[u"characters"][u"c"] == 1
assert namespace_token_counts[u"characters"][u"."] == 1
assert namespace_token_counts[u"words"][u"This"] == 1
assert namespace_token_counts[u"words"][u"is"] == 1
assert namespace_token_counts[u"words"][u"a"] == 1
assert namespace_token_counts[u"words"][u"sentence"] == 1
assert namespace_token_counts[u"words"][u"."] == 1
assert set(namespace_token_counts.keys()) == set([u"words", u"characters"])
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
sentence_index = vocab.add_token_to_namespace(u"sentence", namespace=u'words')
capital_a_index = vocab.add_token_to_namespace(u"A", namespace=u'words')
capital_a_char_index = vocab.add_token_to_namespace(u"A", namespace=u'characters')
s_index = vocab.add_token_to_namespace(u"s", namespace=u'characters')
e_index = vocab.add_token_to_namespace(u"e", namespace=u'characters')
n_index = vocab.add_token_to_namespace(u"n", namespace=u'characters')
t_index = vocab.add_token_to_namespace(u"t", namespace=u'characters')
c_index = vocab.add_token_to_namespace(u"c", namespace=u'characters')
field = TextField([Token(t) for t in [u"A", u"sentence"]],
{u"words": SingleIdTokenIndexer(namespace=u"words")})
field.index(vocab)
# pylint: disable=protected-access
assert field._indexed_tokens[u"words"] == [capital_a_index, sentence_index]
field1 = TextField([Token(t) for t in [u"A", u"sentence"]],
{u"characters": TokenCharactersIndexer(namespace=u"characters")})
field1.index(vocab)
assert field1._indexed_tokens[u"characters"] == [[capital_a_char_index],
[s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]]
field2 = TextField([Token(t) for t in [u"A", u"sentence"]],
token_indexers={u"words": SingleIdTokenIndexer(namespace=u"words"),
u"characters": TokenCharactersIndexer(namespace=u"characters")})
field2.index(vocab)
assert field2._indexed_tokens[u"words"] == [capital_a_index, sentence_index]
assert field2._indexed_tokens[u"characters"] == [[capital_a_char_index],
[s_index, e_index, n_index, t_index,
e_index, n_index, c_index, e_index]]
# pylint: enable=protected-access
def test_get_padding_lengths_raises_if_no_indexed_tokens(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
with pytest.raises(ConfigurationError):
field.get_padding_lengths()
def test_padding_lengths_are_computed_correctly(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {u"num_tokens": 5}
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {u"num_tokens": 5, u"num_token_characters": 8}
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters"),
u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {u"num_tokens": 5, u"num_token_characters": 8}
def test_as_tensor_handles_words(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1]))
def test_as_tensor_handles_longer_lengths(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths[u"num_tokens"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1, 0, 0, 0, 0, 0]))
def test_as_tensor_handles_characters(self):
field = TextField([Token(t) for t in [u"This", u"is", u"a", u"sentence", u"."]],
token_indexers={u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
expected_character_array = numpy.array([[1, 1, 1, 3, 0, 0, 0, 0],
[1, 3, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4],
[1, 0, 0, 0, 0, 0, 0, 0]])
numpy.testing.assert_array_almost_equal(tensor_dict[u"characters"].detach().cpu().numpy(),
expected_character_array)
def test_as_tensor_handles_words_and_characters_with_longer_lengths(self):
field = TextField([Token(t) for t in [u"a", u"sentence", u"."]],
token_indexers={u"words": SingleIdTokenIndexer(u"words"),
u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths[u"num_tokens"] = 5
padding_lengths[u"num_token_characters"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict[u"words"].detach().cpu().numpy(),
numpy.array([1, 2, 1, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict[u"characters"].detach().cpu().numpy(),
numpy.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
def test_printing_doesnt_crash(self):
field = TextField([Token(t) for t in [u"A", u"sentence"]],
{u"words": SingleIdTokenIndexer(namespace=u"words")})
print(field)
def test_token_embedder_returns_dict(self):
field = TextField([Token(t) for t in [u"A", u"sentence"]],
token_indexers={u"field_with_dict": DictReturningTokenIndexer(),
u"words": SingleIdTokenIndexer(u"words"),
u"characters": TokenCharactersIndexer(u"characters")})
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
u'token_ids': 5,
u'additional_key': 2,
u'words': 2,
u'characters': 2,
u'num_token_characters': 8
}
padding_lengths[u'token_ids'] = 7
padding_lengths[u'additional_key'] = 3
padding_lengths[u'words'] = 4
padding_lengths[u'characters'] = 4
tensors = field.as_tensor(padding_lengths)
assert list(tensors[u'token_ids'].shape) == [7]
assert list(tensors[u'additional_key'].shape) == [3]
assert list(tensors[u'words'].shape) == [4]
assert list(tensors[u'characters'].shape) == [4, 8] | 0.713232 | 0.292693 |
import os
import sys
import shutil
import errno
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import scipy as sp
import scipy.stats
import csv
import logging
sns.set(style="darkgrid")
def delete_dirs(path_to_dir):
_logger = logging.getLogger(__name__)
episode_dirs = glob.glob(os.path.join(path_to_dir) + '/episode_*/')
try:
for episode_dir in episode_dirs:
shutil.rmtree(episode_dir)
except:
_logger.critical("Can't delete directory - %s" % str(episode_dir))
sys.exit()
def create_dir(path_to_dir):
_logger = logging.getLogger(__name__)
if not os.path.isdir(path_to_dir):
try:
os.makedirs(path_to_dir)
except:
_logger.critical("Can't create directory - %s" % str(path_to_dir))
sys.exit()
return path_to_dir
def copy_file(src, dest):
_logger = logging.getLogger(__name__)
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
_logger.critical("Can't copy file - %s" % str(e))
sys.exit()
# eg. source or destination doesn't exist
except IOError as e:
_logger.critical("Can't copy file - %s" % str(e))
sys.exit()
def write_stats_file(path_to_file, *args):
_logger = logging.getLogger(__name__)
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
# creating new line for file
line = ''
for arg in args:
if type(arg) is list:
for elem in arg:
line += str(elem) + ','
else:
line += str(arg) + ','
line = line[:-1] + '\n'
# write to file
try:
file_handle = os.open(path_to_file, flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file already exists.
with open(path_to_file, 'a+') as f:
f.write(line)
else: # Something unexpected went wrong so reraise the exception.
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
else: # No exception, so the file must have been created successfully.
with os.fdopen(file_handle, 'w') as file_obj:
# Using `os.fdopen` converts the handle to an object that acts
# like a regular Python file object, and the `with` context
# manager means the file will be automatically closed when
# we're done with it.
file_obj.write(line)
def mean_confidence_interval(my_list, confidence=0.95):
my_array = 1.0 * np.array(my_list)
array_mean, array_se = np.mean(my_array), scipy.stats.sem(my_array)
margin = array_se * sp.stats.t._ppf((1 + confidence) / 2.,
len(my_array) - 1)
return array_mean, array_mean - margin, array_mean + margin
def summarize_runs_results(path_to_dir):
_logger = logging.getLogger(__name__)
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
run_files = [os.path.join(run_dir, 'stats_run.csv')
for run_dir in run_dirs]
df = pd.concat((pd.read_csv(run_file) for run_file in run_files))
steps = df.groupby(['episode'])['steps_mean']
steps = list(steps)
reward = df.groupby(['episode'])['reward_mean']
reward = list(reward)
effort = df.groupby(['episode'])['step_count']
effort = list(effort)
summary = []
for episode in range(0, len(reward)):
step_mean, step_lower, step_upper = \
mean_confidence_interval(steps[episode][1])
reward_mean, reward_lower, reward_upper = \
mean_confidence_interval(reward[episode][1])
effort_mean, effort_lower, effort_upper = \
mean_confidence_interval(effort[episode][1])
summary.append([int(steps[episode][0]),
step_mean, step_lower, step_upper,
reward_mean, reward_lower, reward_upper,
effort_mean, effort_lower, effort_upper])
header = ['episode', 'steps_mean', 'steps_lower', 'steps_upper',
'reward_mean', 'reward_lower', 'reward_upper',
'effort_mean', 'effort_lower', 'effort_upper']
try:
with open(os.path.join(path_to_dir, 'stats_task.csv'), 'w') \
as csvfile:
writer = csv.writer(csvfile,
dialect='excel',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(header)
for data in summary:
writer.writerow(data)
except IOError as e:
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
def summarize_runs_policy_choice(path_to_dir, kind='probs'):
_logger = logging.getLogger(__name__)
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
policy_files = [os.path.join(run_dir, 'stats_policy_' + kind + '.csv')
for run_dir in run_dirs]
df = pd.concat((pd.read_csv(policy_usage_file)
for policy_usage_file in policy_files))
policies = list(df)
policies = [policy for policy in policies if 'episode' not in policy]
for policy in policies:
usage = df.groupby(['episode'])[policy]
usage = list(usage)
summary = []
for episode in range(0, len(usage)):
mean_value, lower_value, upper_value = \
mean_confidence_interval(usage[episode][1])
summary.append([int(usage[episode][0]),
mean_value, lower_value, upper_value])
header = ['episode', 'mean', 'lower', 'upper']
try:
with open(os.path.join(path_to_dir,
kind + '_'+str(policy)+'.csv'),
'w') as csvfile:
writer = csv.writer(csvfile,
dialect='excel',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(header)
for data in summary:
writer.writerow(data)
except IOError as e:
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
def plot_run(path_to_dir):
df = pd.read_csv(os.path.join(path_to_dir, 'stats_run.csv'))
# print(df)
for column in df.columns:
plt.figure(figsize=(10, 4), dpi=80)
plt.plot(df['episode'], df[column],
label=column, color='blue', linewidth=2.0)
plt.ylabel(column, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend()
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(column) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_runs(path_to_dir):
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
dfs = []
for run_dir in run_dirs:
dfs.append(pd.read_csv(os.path.join(run_dir, 'stats_run.csv')))
for column in dfs[0].columns:
plt.figure(figsize=(10, 4), dpi=80)
run_count = 1
for df in dfs:
plt.plot(df['episode'], df[column],
label=column+'_'+str(run_count), linewidth=2.0)
run_count += 1
plt.ylabel(column, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend()
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(column) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_task(path_to_dir):
df = pd.read_csv(os.path.join(path_to_dir, 'stats_task.csv'))
factors = ['steps', 'reward', 'effort']
colors = ['blue', 'green', 'red']
for factor, color in zip(factors, colors):
plt.figure(figsize=(10, 4), dpi=80)
if factor == 'steps':
df[factor + '_mean'] = df[factor + '_mean'].clip(0.0, 100.0)
df[factor + '_lower'] = df[factor + '_lower'].clip(0.0, 100.0)
df[factor + '_upper'] = df[factor + '_upper'].clip(0.0, 100.0)
if factor == 'reward':
df[factor + '_mean'] = df[factor + '_mean'].clip(0.0, 1.0)
df[factor + '_lower'] = df[factor + '_lower'].clip(0.0, 1.0)
df[factor + '_upper'] = df[factor + '_upper'].clip(0.0, 1.0)
plt.plot(df['episode'], df[factor + '_mean'],
label=factor+'_mean', color=color, linewidth=2.0)
plt.plot(df['episode'], df[factor + '_lower'],
label=factor+'_lower', color=color,
alpha=0.2, linewidth=1.0)
plt.plot(df['episode'], df[factor + '_upper'],
label=factor+'_upper', color=color,
alpha=0.2, linewidth=1.0)
plt.fill_between(df['episode'], df[factor + '_mean'],
df[factor + '_lower'],
facecolor=color, alpha=0.2)
plt.fill_between(df['episode'], df[factor + '_mean'],
df[factor + '_upper'],
facecolor=color, alpha=0.2)
plt.ylabel(factor, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend(fontsize=14)
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(factor) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_policy_choice(path_to_dir, kind='probs'):
if kind == 'probs':
ylabel = 'policy probability [%]'
elif kind == 'absolute':
ylabel = 'policy mean [steps]'
elif kind == 'W':
ylabel = 'Reuse gain [gain per episode]'
elif kind == 'W_mean':
ylabel = 'Average Reuse gain []'
elif kind == 'U':
ylabel = 'policy usage [count]'
elif kind == 'P':
ylabel = 'policy probability [%]'
else:
pass
df = pd.read_csv(os.path.join(path_to_dir,
'stats_policy_' + kind + '.csv'))
plt.figure(figsize=(10, 4), dpi=80)
df.plot(x='episode')
plt.ylabel(ylabel, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend(fontsize=14)
plt.savefig(os.path.join(path_to_dir, 'plot_policy_' + kind + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_policy_choice_summary(path_to_dir, kind='probs'):
limit_lower = 0
if kind == 'probs':
ylabel = 'policy probability [%]'
skip = 6
limit_upper = 1.0
elif kind == 'absolute':
ylabel = 'policy mean [steps]'
skip = 9
limit_upper = 100.0
elif kind == 'W':
ylabel = 'Reuse gain [gain per episode]'
skip = 2
limit_upper = 1.0
elif kind == 'W_mean':
ylabel = 'Average Reuse gain []'
skip = 7
limit_upper = 1.0
elif kind == 'U':
ylabel = 'policy usage [count]'
skip = 2
limit_upper = 1000.0
elif kind == 'P':
ylabel = 'policy probability [%]'
skip = 2
limit_upper = 1.0
else:
pass
policy_files = glob.glob(
os.path.join(path_to_dir) + '/' + kind + '_*.csv')
colors = ['red', 'green', 'blue', 'yellow', 'black', 'brown', 'orange']
plt.figure(figsize=(10, 4), dpi=80)
color_count = 0
for policy_file in policy_files:
df = pd.read_csv(policy_file)
policy_name = policy_file.split('/')
policy_name = policy_name[-1].split('.')
policy_name = policy_name[0][skip:]
df['mean'] = df['mean'].clip(limit_lower, limit_upper)
df['lower'] = df['lower'].clip(limit_lower, limit_upper)
df['upper'] = df['upper'].clip(limit_lower, limit_upper)
plt.plot(df['episode'], df['mean'],
label=policy_name, color=colors[color_count], linewidth=2.0)
plt.plot(df['episode'], df['lower'],
label='_nolegend_', color=colors[color_count],
alpha=0.2, linewidth=1.0)
plt.plot(df['episode'], df['upper'],
label='_nolegend_', color=colors[color_count],
alpha=0.2, linewidth=1.0)
plt.fill_between(df['episode'], df['mean'],
df['lower'],
facecolor=colors[color_count], alpha=0.2)
plt.fill_between(df['episode'], df['mean'],
df['upper'],
facecolor=colors[color_count], alpha=0.2)
color_count += 1
plt.ylabel(ylabel, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# plt.xlim(0, 1000)
plt.legend(fontsize=14, loc='upper left')
plt.savefig(os.path.join(path_to_dir, 'plot_policy_' + kind + '.png'),
bbox_inches='tight')
plt.close('all') | helper.py | import os
import sys
import shutil
import errno
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import scipy as sp
import scipy.stats
import csv
import logging
sns.set(style="darkgrid")
def delete_dirs(path_to_dir):
_logger = logging.getLogger(__name__)
episode_dirs = glob.glob(os.path.join(path_to_dir) + '/episode_*/')
try:
for episode_dir in episode_dirs:
shutil.rmtree(episode_dir)
except:
_logger.critical("Can't delete directory - %s" % str(episode_dir))
sys.exit()
def create_dir(path_to_dir):
_logger = logging.getLogger(__name__)
if not os.path.isdir(path_to_dir):
try:
os.makedirs(path_to_dir)
except:
_logger.critical("Can't create directory - %s" % str(path_to_dir))
sys.exit()
return path_to_dir
def copy_file(src, dest):
_logger = logging.getLogger(__name__)
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
_logger.critical("Can't copy file - %s" % str(e))
sys.exit()
# eg. source or destination doesn't exist
except IOError as e:
_logger.critical("Can't copy file - %s" % str(e))
sys.exit()
def write_stats_file(path_to_file, *args):
_logger = logging.getLogger(__name__)
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
# creating new line for file
line = ''
for arg in args:
if type(arg) is list:
for elem in arg:
line += str(elem) + ','
else:
line += str(arg) + ','
line = line[:-1] + '\n'
# write to file
try:
file_handle = os.open(path_to_file, flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file already exists.
with open(path_to_file, 'a+') as f:
f.write(line)
else: # Something unexpected went wrong so reraise the exception.
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
else: # No exception, so the file must have been created successfully.
with os.fdopen(file_handle, 'w') as file_obj:
# Using `os.fdopen` converts the handle to an object that acts
# like a regular Python file object, and the `with` context
# manager means the file will be automatically closed when
# we're done with it.
file_obj.write(line)
def mean_confidence_interval(my_list, confidence=0.95):
my_array = 1.0 * np.array(my_list)
array_mean, array_se = np.mean(my_array), scipy.stats.sem(my_array)
margin = array_se * sp.stats.t._ppf((1 + confidence) / 2.,
len(my_array) - 1)
return array_mean, array_mean - margin, array_mean + margin
def summarize_runs_results(path_to_dir):
_logger = logging.getLogger(__name__)
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
run_files = [os.path.join(run_dir, 'stats_run.csv')
for run_dir in run_dirs]
df = pd.concat((pd.read_csv(run_file) for run_file in run_files))
steps = df.groupby(['episode'])['steps_mean']
steps = list(steps)
reward = df.groupby(['episode'])['reward_mean']
reward = list(reward)
effort = df.groupby(['episode'])['step_count']
effort = list(effort)
summary = []
for episode in range(0, len(reward)):
step_mean, step_lower, step_upper = \
mean_confidence_interval(steps[episode][1])
reward_mean, reward_lower, reward_upper = \
mean_confidence_interval(reward[episode][1])
effort_mean, effort_lower, effort_upper = \
mean_confidence_interval(effort[episode][1])
summary.append([int(steps[episode][0]),
step_mean, step_lower, step_upper,
reward_mean, reward_lower, reward_upper,
effort_mean, effort_lower, effort_upper])
header = ['episode', 'steps_mean', 'steps_lower', 'steps_upper',
'reward_mean', 'reward_lower', 'reward_upper',
'effort_mean', 'effort_lower', 'effort_upper']
try:
with open(os.path.join(path_to_dir, 'stats_task.csv'), 'w') \
as csvfile:
writer = csv.writer(csvfile,
dialect='excel',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(header)
for data in summary:
writer.writerow(data)
except IOError as e:
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
def summarize_runs_policy_choice(path_to_dir, kind='probs'):
_logger = logging.getLogger(__name__)
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
policy_files = [os.path.join(run_dir, 'stats_policy_' + kind + '.csv')
for run_dir in run_dirs]
df = pd.concat((pd.read_csv(policy_usage_file)
for policy_usage_file in policy_files))
policies = list(df)
policies = [policy for policy in policies if 'episode' not in policy]
for policy in policies:
usage = df.groupby(['episode'])[policy]
usage = list(usage)
summary = []
for episode in range(0, len(usage)):
mean_value, lower_value, upper_value = \
mean_confidence_interval(usage[episode][1])
summary.append([int(usage[episode][0]),
mean_value, lower_value, upper_value])
header = ['episode', 'mean', 'lower', 'upper']
try:
with open(os.path.join(path_to_dir,
kind + '_'+str(policy)+'.csv'),
'w') as csvfile:
writer = csv.writer(csvfile,
dialect='excel',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(header)
for data in summary:
writer.writerow(data)
except IOError as e:
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
def plot_run(path_to_dir):
df = pd.read_csv(os.path.join(path_to_dir, 'stats_run.csv'))
# print(df)
for column in df.columns:
plt.figure(figsize=(10, 4), dpi=80)
plt.plot(df['episode'], df[column],
label=column, color='blue', linewidth=2.0)
plt.ylabel(column, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend()
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(column) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_runs(path_to_dir):
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
dfs = []
for run_dir in run_dirs:
dfs.append(pd.read_csv(os.path.join(run_dir, 'stats_run.csv')))
for column in dfs[0].columns:
plt.figure(figsize=(10, 4), dpi=80)
run_count = 1
for df in dfs:
plt.plot(df['episode'], df[column],
label=column+'_'+str(run_count), linewidth=2.0)
run_count += 1
plt.ylabel(column, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend()
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(column) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_task(path_to_dir):
df = pd.read_csv(os.path.join(path_to_dir, 'stats_task.csv'))
factors = ['steps', 'reward', 'effort']
colors = ['blue', 'green', 'red']
for factor, color in zip(factors, colors):
plt.figure(figsize=(10, 4), dpi=80)
if factor == 'steps':
df[factor + '_mean'] = df[factor + '_mean'].clip(0.0, 100.0)
df[factor + '_lower'] = df[factor + '_lower'].clip(0.0, 100.0)
df[factor + '_upper'] = df[factor + '_upper'].clip(0.0, 100.0)
if factor == 'reward':
df[factor + '_mean'] = df[factor + '_mean'].clip(0.0, 1.0)
df[factor + '_lower'] = df[factor + '_lower'].clip(0.0, 1.0)
df[factor + '_upper'] = df[factor + '_upper'].clip(0.0, 1.0)
plt.plot(df['episode'], df[factor + '_mean'],
label=factor+'_mean', color=color, linewidth=2.0)
plt.plot(df['episode'], df[factor + '_lower'],
label=factor+'_lower', color=color,
alpha=0.2, linewidth=1.0)
plt.plot(df['episode'], df[factor + '_upper'],
label=factor+'_upper', color=color,
alpha=0.2, linewidth=1.0)
plt.fill_between(df['episode'], df[factor + '_mean'],
df[factor + '_lower'],
facecolor=color, alpha=0.2)
plt.fill_between(df['episode'], df[factor + '_mean'],
df[factor + '_upper'],
facecolor=color, alpha=0.2)
plt.ylabel(factor, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend(fontsize=14)
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(factor) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_policy_choice(path_to_dir, kind='probs'):
if kind == 'probs':
ylabel = 'policy probability [%]'
elif kind == 'absolute':
ylabel = 'policy mean [steps]'
elif kind == 'W':
ylabel = 'Reuse gain [gain per episode]'
elif kind == 'W_mean':
ylabel = 'Average Reuse gain []'
elif kind == 'U':
ylabel = 'policy usage [count]'
elif kind == 'P':
ylabel = 'policy probability [%]'
else:
pass
df = pd.read_csv(os.path.join(path_to_dir,
'stats_policy_' + kind + '.csv'))
plt.figure(figsize=(10, 4), dpi=80)
df.plot(x='episode')
plt.ylabel(ylabel, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend(fontsize=14)
plt.savefig(os.path.join(path_to_dir, 'plot_policy_' + kind + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_policy_choice_summary(path_to_dir, kind='probs'):
limit_lower = 0
if kind == 'probs':
ylabel = 'policy probability [%]'
skip = 6
limit_upper = 1.0
elif kind == 'absolute':
ylabel = 'policy mean [steps]'
skip = 9
limit_upper = 100.0
elif kind == 'W':
ylabel = 'Reuse gain [gain per episode]'
skip = 2
limit_upper = 1.0
elif kind == 'W_mean':
ylabel = 'Average Reuse gain []'
skip = 7
limit_upper = 1.0
elif kind == 'U':
ylabel = 'policy usage [count]'
skip = 2
limit_upper = 1000.0
elif kind == 'P':
ylabel = 'policy probability [%]'
skip = 2
limit_upper = 1.0
else:
pass
policy_files = glob.glob(
os.path.join(path_to_dir) + '/' + kind + '_*.csv')
colors = ['red', 'green', 'blue', 'yellow', 'black', 'brown', 'orange']
plt.figure(figsize=(10, 4), dpi=80)
color_count = 0
for policy_file in policy_files:
df = pd.read_csv(policy_file)
policy_name = policy_file.split('/')
policy_name = policy_name[-1].split('.')
policy_name = policy_name[0][skip:]
df['mean'] = df['mean'].clip(limit_lower, limit_upper)
df['lower'] = df['lower'].clip(limit_lower, limit_upper)
df['upper'] = df['upper'].clip(limit_lower, limit_upper)
plt.plot(df['episode'], df['mean'],
label=policy_name, color=colors[color_count], linewidth=2.0)
plt.plot(df['episode'], df['lower'],
label='_nolegend_', color=colors[color_count],
alpha=0.2, linewidth=1.0)
plt.plot(df['episode'], df['upper'],
label='_nolegend_', color=colors[color_count],
alpha=0.2, linewidth=1.0)
plt.fill_between(df['episode'], df['mean'],
df['lower'],
facecolor=colors[color_count], alpha=0.2)
plt.fill_between(df['episode'], df['mean'],
df['upper'],
facecolor=colors[color_count], alpha=0.2)
color_count += 1
plt.ylabel(ylabel, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# plt.xlim(0, 1000)
plt.legend(fontsize=14, loc='upper left')
plt.savefig(os.path.join(path_to_dir, 'plot_policy_' + kind + '.png'),
bbox_inches='tight')
plt.close('all') | 0.228931 | 0.164483 |
from time import sleep
import logging
from flaky import flaky
import pytest
from swimpy.routes import ROUTES
from swimpy.model.message import Ping, Ack, PingReq, Alive
from swimpy.model.node import Node
from swimpy.runtime import Runtime
from swimpy.util import send_message
LOGGER = logging.getLogger(__name__)
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_ping():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=1338)
r = Runtime(routes=ROUTES, node=n1)
try:
r.start()
sleep(1)
assert r.is_alive()
ping = Ping(seqno=55, node=n1)
ack = send_message(n1.addr, n1.port, ping, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == ping.seqno
finally:
r.stop()
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_pingreq():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9000)
r1 = Runtime(routes=ROUTES, node=n1)
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9001)
r2 = Runtime(routes=ROUTES, node=n2)
try:
LOGGER.info('Starting node1')
r1.start()
LOGGER.info('Starting node2')
r2.start()
sleep(1)
assert r1.is_alive()
assert r2.is_alive()
LOGGER.info('node1 and node2 are alive')
# Send a ping-req to node-1 for node-2 and wait for an ack
pingreq = PingReq(seqno=101, node=n1, target_node=n2)
ack = send_message(n1.addr, n1.port, pingreq, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == pingreq.seqno
finally:
r1.stop()
r2.stop()
@flaky
@pytest.mark.timeout(15)
@pytest.mark.parametrize('num_nodes,deadline', [
(3, 1),
(12, 7),
])
@pytest.mark.integration()
def test_join(num_nodes, deadline):
"""
Test that we're able to join <num_nodes> into a cluster within <deadline> secs
This *usually* passes, but the flaky decorator will retry in the improbable
case it does fail
"""
nodes = {}
runtimes = {}
port = 10090
for i in xrange(num_nodes):
node_id = 'node-{}'.format(i)
nodes[node_id] = Node(node_id=node_id, addr='127.0.0.1', port=port + i)
runtimes[node_id] = Runtime(routes=ROUTES, node=nodes[node_id])
try:
for runtime in runtimes.values():
runtime.start()
sleep(1)
for node_id, runtime in runtimes.iteritems():
assert runtime.is_alive()
LOGGER.info('{} is alive'.format(node_id))
node_ids = nodes.keys()
for i, node_id in enumerate(node_ids[:-1]):
next_node_id = node_ids[i + 1]
alive = Alive(node=nodes[next_node_id], sender=nodes[next_node_id])
node = nodes[node_id]
send_message(node.addr, node.port, alive)
LOGGER.info('Sleeping for {} seconds'.format(deadline))
sleep(deadline)
for node_id in nodes:
for runtime in runtimes.values():
LOGGER.info('checking if {} is in runtime {}'.format(node_id, runtime.nodes.keys()))
assert node_id in runtime.nodes.keys() # .keys() gives us better debug output
finally:
LOGGER.info('Shutting down runtimes')
for runtime in runtimes.values():
runtime.stop()
@pytest.mark.timeout(15)
@pytest.mark.integration()
def test_join_with_seed_nodes():
# Create three swimpy Runtime objects
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9900)
r1 = Runtime(routes=ROUTES, node=n1)
# Configure a list of seed nodes to send JOINs to on startup
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9901)
r2 = Runtime(routes=ROUTES, node=n2, seed_nodes=[('127.0.0.1', 9900)])
n3 = Node(node_id='node-3', addr='127.0.0.1', port=9902)
r3 = Runtime(routes=ROUTES, node=n3, seed_nodes=[('127.0.0.1', 9901)])
try:
r1.start()
sleep(1)
r2.start()
sleep(1)
r3.start()
sleep(1)
for runtime in [r1, r2, r3]:
nodes_dict = runtime.nodes
LOGGER.info('Checking {} for all three nodes'.format(runtime))
assert sorted(nodes_dict) == ['node-1', 'node-2', 'node-3']
except Exception as e:
LOGGER.exception(e)
finally:
try:
r1.stop()
r2.stop()
r3.stop()
except Exception as e:
LOGGER.exception(e)
raise | test/test_integ.py | from time import sleep
import logging
from flaky import flaky
import pytest
from swimpy.routes import ROUTES
from swimpy.model.message import Ping, Ack, PingReq, Alive
from swimpy.model.node import Node
from swimpy.runtime import Runtime
from swimpy.util import send_message
LOGGER = logging.getLogger(__name__)
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_ping():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=1338)
r = Runtime(routes=ROUTES, node=n1)
try:
r.start()
sleep(1)
assert r.is_alive()
ping = Ping(seqno=55, node=n1)
ack = send_message(n1.addr, n1.port, ping, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == ping.seqno
finally:
r.stop()
@pytest.mark.timeout(10)
@pytest.mark.integration()
def test_runtime_responds_to_pingreq():
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9000)
r1 = Runtime(routes=ROUTES, node=n1)
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9001)
r2 = Runtime(routes=ROUTES, node=n2)
try:
LOGGER.info('Starting node1')
r1.start()
LOGGER.info('Starting node2')
r2.start()
sleep(1)
assert r1.is_alive()
assert r2.is_alive()
LOGGER.info('node1 and node2 are alive')
# Send a ping-req to node-1 for node-2 and wait for an ack
pingreq = PingReq(seqno=101, node=n1, target_node=n2)
ack = send_message(n1.addr, n1.port, pingreq, reply_cls=Ack)[0]
# Make sure the sequence numbers match
assert ack.seqno == pingreq.seqno
finally:
r1.stop()
r2.stop()
@flaky
@pytest.mark.timeout(15)
@pytest.mark.parametrize('num_nodes,deadline', [
(3, 1),
(12, 7),
])
@pytest.mark.integration()
def test_join(num_nodes, deadline):
"""
Test that we're able to join <num_nodes> into a cluster within <deadline> secs
This *usually* passes, but the flaky decorator will retry in the improbable
case it does fail
"""
nodes = {}
runtimes = {}
port = 10090
for i in xrange(num_nodes):
node_id = 'node-{}'.format(i)
nodes[node_id] = Node(node_id=node_id, addr='127.0.0.1', port=port + i)
runtimes[node_id] = Runtime(routes=ROUTES, node=nodes[node_id])
try:
for runtime in runtimes.values():
runtime.start()
sleep(1)
for node_id, runtime in runtimes.iteritems():
assert runtime.is_alive()
LOGGER.info('{} is alive'.format(node_id))
node_ids = nodes.keys()
for i, node_id in enumerate(node_ids[:-1]):
next_node_id = node_ids[i + 1]
alive = Alive(node=nodes[next_node_id], sender=nodes[next_node_id])
node = nodes[node_id]
send_message(node.addr, node.port, alive)
LOGGER.info('Sleeping for {} seconds'.format(deadline))
sleep(deadline)
for node_id in nodes:
for runtime in runtimes.values():
LOGGER.info('checking if {} is in runtime {}'.format(node_id, runtime.nodes.keys()))
assert node_id in runtime.nodes.keys() # .keys() gives us better debug output
finally:
LOGGER.info('Shutting down runtimes')
for runtime in runtimes.values():
runtime.stop()
@pytest.mark.timeout(15)
@pytest.mark.integration()
def test_join_with_seed_nodes():
# Create three swimpy Runtime objects
n1 = Node(node_id='node-1', addr='127.0.0.1', port=9900)
r1 = Runtime(routes=ROUTES, node=n1)
# Configure a list of seed nodes to send JOINs to on startup
n2 = Node(node_id='node-2', addr='127.0.0.1', port=9901)
r2 = Runtime(routes=ROUTES, node=n2, seed_nodes=[('127.0.0.1', 9900)])
n3 = Node(node_id='node-3', addr='127.0.0.1', port=9902)
r3 = Runtime(routes=ROUTES, node=n3, seed_nodes=[('127.0.0.1', 9901)])
try:
r1.start()
sleep(1)
r2.start()
sleep(1)
r3.start()
sleep(1)
for runtime in [r1, r2, r3]:
nodes_dict = runtime.nodes
LOGGER.info('Checking {} for all three nodes'.format(runtime))
assert sorted(nodes_dict) == ['node-1', 'node-2', 'node-3']
except Exception as e:
LOGGER.exception(e)
finally:
try:
r1.stop()
r2.stop()
r3.stop()
except Exception as e:
LOGGER.exception(e)
raise | 0.418697 | 0.33482 |
from ....core.log.log import Log
from ....core.comm.comm_utils import CommUtils
from ....applications.garrus.image_control.image_control_messages import ImageData
from ....applications.garrus.image_control.image_control_messages import ImageModeSelect
from ...garrus.image_control.image_control_messages import COLOR
from ...garrus.image_control.image_control_messages import MONOCHROME
from ..comm.comm_ctxt import CommCtxt
from .core.workspace_base import WorkspaceBase
from tkinter import ttk
from tkinter import Canvas
from tkinter import TOP
from tkinter import BOTTOM
from tkinter import PhotoImage
from tkinter import Label
from PIL import Image
from PIL import ImageTk
from time import time
import numpy as np
import cv2
class WsImage(WorkspaceBase):
def __init__(self, parent_frame, ws_controller, ws_resolution):
WorkspaceBase.__init__(self, parent_frame, ws_controller, ws_resolution)
self.active = False
self.rendering = False
self.color_mode = COLOR
self.image_label = Label(self)
self.image_label.pack(side = TOP)
self.image = None
self.image_mode_button = ttk.Button(self, text = "Monochrome", command = lambda : self.toggle_image_mode())
self.image_mode_button.pack(side = BOTTOM)
self.hex_row_buffer = []
self.hex_row_buffer_size = 0
@staticmethod
def get_id():
return "Image"
def toggle_image_mode(self):
if self.color_mode == COLOR:
self.color_mode = MONOCHROME
self.image_mode_button.configure(text = "Color")
else:
self.color_mode = COLOR
self.image_mode_button.configure(text = "Monochrome")
msg = ImageModeSelect(color_mode=self.color_mode)
CommCtxt.get_comm_if().send_message(msg)
def refresh(self):
if True == self.rendering:
Log.log("Returning due to rendering")
return
if self.active and False == self.rendering:
self.rendering = True
msg = CommCtxt.get_comm_if().get_message(ImageData.get_msg_id())
if None != msg:
now = time()
Log.log("Age from creation to sending: " + str(msg.msg_send_time - msg.msg_create_time))
Log.log("Age from sending to now: " + str(now - msg.msg_send_time))
Log.log("Age from receiving to now: " + str(now - msg.msg_receive_time))
Log.log("Total age: " + str(now - msg.msg_create_time))
self.show_image(msg.resolution, msg.color_mode, msg.image_data)
self.rendering = False
self.after(200, self.refresh)
def show_image(self, resolution, color_mode, image):
#Log.log("enter show_image")
to_show = None
if COLOR == color_mode:
to_show = np.frombuffer(image, dtype=np.uint8).reshape((resolution[1], resolution[0], 3))
elif MONOCHROME == color_mode:
to_show = np.frombuffer(image, dtype=np.uint8).reshape((resolution[1], resolution[0]))
to_show = Image.fromarray(to_show)
self.image = ImageTk.PhotoImage(to_show)
self.image_label.configure(image=self.image)
#Log.log("exit show_image")
def activate(self):
self.active = True
self.after(0, self.refresh)
def deactivate(self):
self.active = False
while self.rendering:
Log.log("Waiting...")
msg = ImageModeSelect(color_mode=COLOR)
CommCtxt.get_comm_if().send_message(msg) | python/robot_controller/applications/gui/workspace/ws_image.py | from ....core.log.log import Log
from ....core.comm.comm_utils import CommUtils
from ....applications.garrus.image_control.image_control_messages import ImageData
from ....applications.garrus.image_control.image_control_messages import ImageModeSelect
from ...garrus.image_control.image_control_messages import COLOR
from ...garrus.image_control.image_control_messages import MONOCHROME
from ..comm.comm_ctxt import CommCtxt
from .core.workspace_base import WorkspaceBase
from tkinter import ttk
from tkinter import Canvas
from tkinter import TOP
from tkinter import BOTTOM
from tkinter import PhotoImage
from tkinter import Label
from PIL import Image
from PIL import ImageTk
from time import time
import numpy as np
import cv2
class WsImage(WorkspaceBase):
def __init__(self, parent_frame, ws_controller, ws_resolution):
WorkspaceBase.__init__(self, parent_frame, ws_controller, ws_resolution)
self.active = False
self.rendering = False
self.color_mode = COLOR
self.image_label = Label(self)
self.image_label.pack(side = TOP)
self.image = None
self.image_mode_button = ttk.Button(self, text = "Monochrome", command = lambda : self.toggle_image_mode())
self.image_mode_button.pack(side = BOTTOM)
self.hex_row_buffer = []
self.hex_row_buffer_size = 0
@staticmethod
def get_id():
return "Image"
def toggle_image_mode(self):
if self.color_mode == COLOR:
self.color_mode = MONOCHROME
self.image_mode_button.configure(text = "Color")
else:
self.color_mode = COLOR
self.image_mode_button.configure(text = "Monochrome")
msg = ImageModeSelect(color_mode=self.color_mode)
CommCtxt.get_comm_if().send_message(msg)
def refresh(self):
if True == self.rendering:
Log.log("Returning due to rendering")
return
if self.active and False == self.rendering:
self.rendering = True
msg = CommCtxt.get_comm_if().get_message(ImageData.get_msg_id())
if None != msg:
now = time()
Log.log("Age from creation to sending: " + str(msg.msg_send_time - msg.msg_create_time))
Log.log("Age from sending to now: " + str(now - msg.msg_send_time))
Log.log("Age from receiving to now: " + str(now - msg.msg_receive_time))
Log.log("Total age: " + str(now - msg.msg_create_time))
self.show_image(msg.resolution, msg.color_mode, msg.image_data)
self.rendering = False
self.after(200, self.refresh)
def show_image(self, resolution, color_mode, image):
#Log.log("enter show_image")
to_show = None
if COLOR == color_mode:
to_show = np.frombuffer(image, dtype=np.uint8).reshape((resolution[1], resolution[0], 3))
elif MONOCHROME == color_mode:
to_show = np.frombuffer(image, dtype=np.uint8).reshape((resolution[1], resolution[0]))
to_show = Image.fromarray(to_show)
self.image = ImageTk.PhotoImage(to_show)
self.image_label.configure(image=self.image)
#Log.log("exit show_image")
def activate(self):
self.active = True
self.after(0, self.refresh)
def deactivate(self):
self.active = False
while self.rendering:
Log.log("Waiting...")
msg = ImageModeSelect(color_mode=COLOR)
CommCtxt.get_comm_if().send_message(msg) | 0.486088 | 0.107531 |
"""test resolution of dotted names
"""
import unittest
class Test_resolve(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.dottedname.resolve import resolve
return resolve(*args, **kw)
def test_no_dots_non_importable(self):
self.assertRaises(ImportError,
self._callFUT, '_non_importable_module_')
def test_no_dots(self):
self.assertTrue(self._callFUT('unittest') is unittest)
def test_module_attr_nonesuch(self):
self.assertRaises(ImportError, self._callFUT,'unittest.nonesuch')
def test_module_attr(self):
self.assertTrue(
self._callFUT('unittest.TestCase') is unittest.TestCase)
def test_submodule(self):
from zope import dottedname
self.assertTrue(
self._callFUT('zope.dottedname') is dottedname)
def test_submodule_not_yet_imported(self):
import sys
import zope.dottedname
try:
del sys.modules['zope.dottedname.example']
except KeyError:
pass
try:
del zope.dottedname.example
except AttributeError:
pass
found = self._callFUT('zope.dottedname.example')
self.assertTrue(found is sys.modules['zope.dottedname.example'])
def test_submodule_attr(self):
from zope.dottedname.resolve import resolve
self.assertTrue(
self._callFUT('zope.dottedname.resolve.resolve') is resolve)
def test_relative_no_module(self):
self.assertRaises(ValueError, self._callFUT,'.resolve')
def test_relative_w_module(self):
from zope.dottedname.resolve import resolve
self.assertTrue(
self._callFUT('.resolve.resolve', 'zope.dottedname') is resolve)
def test_relative_w_module_multiple_dots(self):
from zope.dottedname import resolve
self.assertTrue(
self._callFUT('..resolve', 'zope.dottedname.tests') is resolve)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(Test_resolve),
)) | ven2/lib/python2.7/site-packages/zope/dottedname/tests.py | """test resolution of dotted names
"""
import unittest
class Test_resolve(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.dottedname.resolve import resolve
return resolve(*args, **kw)
def test_no_dots_non_importable(self):
self.assertRaises(ImportError,
self._callFUT, '_non_importable_module_')
def test_no_dots(self):
self.assertTrue(self._callFUT('unittest') is unittest)
def test_module_attr_nonesuch(self):
self.assertRaises(ImportError, self._callFUT,'unittest.nonesuch')
def test_module_attr(self):
self.assertTrue(
self._callFUT('unittest.TestCase') is unittest.TestCase)
def test_submodule(self):
from zope import dottedname
self.assertTrue(
self._callFUT('zope.dottedname') is dottedname)
def test_submodule_not_yet_imported(self):
import sys
import zope.dottedname
try:
del sys.modules['zope.dottedname.example']
except KeyError:
pass
try:
del zope.dottedname.example
except AttributeError:
pass
found = self._callFUT('zope.dottedname.example')
self.assertTrue(found is sys.modules['zope.dottedname.example'])
def test_submodule_attr(self):
from zope.dottedname.resolve import resolve
self.assertTrue(
self._callFUT('zope.dottedname.resolve.resolve') is resolve)
def test_relative_no_module(self):
self.assertRaises(ValueError, self._callFUT,'.resolve')
def test_relative_w_module(self):
from zope.dottedname.resolve import resolve
self.assertTrue(
self._callFUT('.resolve.resolve', 'zope.dottedname') is resolve)
def test_relative_w_module_multiple_dots(self):
from zope.dottedname import resolve
self.assertTrue(
self._callFUT('..resolve', 'zope.dottedname.tests') is resolve)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(Test_resolve),
)) | 0.516839 | 0.576363 |
import torch
import os
import numpy as np
import cv2
from PIL import Image
from csr_model import csr_network
import torchvision.transforms.functional as TF
import matplotlib.pyplot as plt
def csr_retouch(path_to_model_state, path_to_old_images, path_to_new_images):
cuda = torch.cuda.is_available()
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
network = csr_network()
network.load_state_dict(torch.load(
path_to_model_state, map_location=torch.device('cpu')))
network.eval()
# img = image_file_to_tensor(image_path)
# result = network(img)
items = os.listdir(path_to_old_images)
for item in items:
if item.endswith(".jpg"):
load_path = os.path.join(path_to_old_images, item)
save_path = os.path.join(path_to_new_images, item)
image = Image.open(load_path)
image = TF.to_tensor(image).type(Tensor)
image = image.unsqueeze(0)
result = network(image)
result = result.squeeze().mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(result)
im.save(save_path, quality=95)
return 1
'''
def image_file_to_tensor(image_path):
items = os.listdir(image_path)
img = Image.open(os.path.join(image_path, items[0])).convert("RGB")
width, height = img.size
# images = torch.zeros(len(items), 3, height, width)
images = torch.zeros(1, 3, height, width, requires_grad=False)
index = 0
for item in items:
if item.endswith(".jpg"):
load_path = os.path.join(image_path, item)
image = Image.open(load_path).convert("RGB")
image = TF.to_tensor(image).type(torch.FloatTensor)
images[index, :, :, :] = image
index += 1
if index >= 1:
break
return images
'''
def main():
csr_retouch("../../model_parameter/csrnet.pth", ".../image_folder", ".../save_folder)
if __name__ == "__main__":
main() | image_retouching/csrnet/csr_eval.py | import torch
import os
import numpy as np
import cv2
from PIL import Image
from csr_model import csr_network
import torchvision.transforms.functional as TF
import matplotlib.pyplot as plt
def csr_retouch(path_to_model_state, path_to_old_images, path_to_new_images):
cuda = torch.cuda.is_available()
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
network = csr_network()
network.load_state_dict(torch.load(
path_to_model_state, map_location=torch.device('cpu')))
network.eval()
# img = image_file_to_tensor(image_path)
# result = network(img)
items = os.listdir(path_to_old_images)
for item in items:
if item.endswith(".jpg"):
load_path = os.path.join(path_to_old_images, item)
save_path = os.path.join(path_to_new_images, item)
image = Image.open(load_path)
image = TF.to_tensor(image).type(Tensor)
image = image.unsqueeze(0)
result = network(image)
result = result.squeeze().mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(result)
im.save(save_path, quality=95)
return 1
'''
def image_file_to_tensor(image_path):
items = os.listdir(image_path)
img = Image.open(os.path.join(image_path, items[0])).convert("RGB")
width, height = img.size
# images = torch.zeros(len(items), 3, height, width)
images = torch.zeros(1, 3, height, width, requires_grad=False)
index = 0
for item in items:
if item.endswith(".jpg"):
load_path = os.path.join(image_path, item)
image = Image.open(load_path).convert("RGB")
image = TF.to_tensor(image).type(torch.FloatTensor)
images[index, :, :, :] = image
index += 1
if index >= 1:
break
return images
'''
def main():
csr_retouch("../../model_parameter/csrnet.pth", ".../image_folder", ".../save_folder)
if __name__ == "__main__":
main() | 0.42656 | 0.471162 |
from .. import db, flask_bcryp
from ..config import key
from app.main.model.tokens import Token
import jwt
import datetime
class User(db.Model):
"""User Model for storing user related details consider user as LMS"""
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
public_id = db.Column(db.String(99), unique=True)
username = db.Column(db.String(49), unique=True)
password_hash = db.Column(db.String(99))
email = db.Column(db.String(255), unique=True, nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
max_connection = db.Column(db.Integer, nullable=False, default=1)
connected = db.Column(db.Integer, default=0)
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = <PASSWORD>.generate_password_hash(password).decode('utf8')
def check_password(self, password):
return flask_bcryp.check_password_hash(self.password_hash, password)
def __repr__(self):
return "<User '{}'>".format(self.username)
def encode_auth_token(self, user_id):
"""
Generate the Auth Token
:param face_id:
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
key,
algorithm="HS256"
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Decode the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, key)
is_token = Token.check_token(auth_token)
if is_token:
return 'Token blacklisted, please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.' | app/main/model/user.py |
from .. import db, flask_bcryp
from ..config import key
from app.main.model.tokens import Token
import jwt
import datetime
class User(db.Model):
"""User Model for storing user related details consider user as LMS"""
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
public_id = db.Column(db.String(99), unique=True)
username = db.Column(db.String(49), unique=True)
password_hash = db.Column(db.String(99))
email = db.Column(db.String(255), unique=True, nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
max_connection = db.Column(db.Integer, nullable=False, default=1)
connected = db.Column(db.Integer, default=0)
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = <PASSWORD>.generate_password_hash(password).decode('utf8')
def check_password(self, password):
return flask_bcryp.check_password_hash(self.password_hash, password)
def __repr__(self):
return "<User '{}'>".format(self.username)
def encode_auth_token(self, user_id):
"""
Generate the Auth Token
:param face_id:
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
key,
algorithm="HS256"
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Decode the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, key)
is_token = Token.check_token(auth_token)
if is_token:
return 'Token blacklisted, please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.' | 0.569972 | 0.060335 |
from enum import Enum
from typing import Tuple, Union
import geopandas as gpd
import triangle
from .common import (
FloatArray,
IntArray,
check_geodataframe,
invalid_option,
repr,
separate,
to_ugrid,
)
from .triangle_geometry import collect_geometry, polygon_holes
class DelaunayAlgorithm(Enum):
DIVIDE_AND_CONQUER = ""
INCREMENTAL = "i"
SWEEPLINE = "F"
class TriangleMesher:
"""
Wrapper for the python bindings to Triangle. This class must be initialized
with a geopandas GeoDataFrame containing at least one polygon, and a column
named ``"cellsize"``.
Optionally, multiple polygons with different cell sizes can be included in
the geodataframe. These can be used to achieve local mesh remfinement.
Linestrings and points may also be included. The segments of linestrings
will be directly forced into the triangulation. Points can also be forced
into the triangulation. The cell size values associated with these
geometries willl not be used.
Triangle cannot automatically resolve overlapping polygons, or points
located exactly on segments. During initialization, the geometries of
the geodataframe are checked:
* Polygons should not have any overlap with each other.
* Linestrings should not intersect each other.
* Every linestring should be fully contained by a single polygon;
a linestring may not intersect two or more polygons.
* Linestrings and points should not "touch" / be located on
polygon borders.
* Holes in polygons are fully supported, but they most not contain
any linestrings or points.
If such cases are detected, the initialization will error.
For more details on Triangle, see:
https://www.cs.cmu.edu/~quake/triangle.defs.html
"""
def __init__(self, gdf: gpd.GeoDataFrame) -> None:
check_geodataframe(gdf)
polygons, linestrings, points = separate(gdf)
self.vertices, self.segments, self.regions = collect_geometry(
polygons, linestrings, points
)
self.holes = polygon_holes(polygons)
# Set default values for meshing parameters
self.minimum_angle = 20.0
self.conforming_delaunay = True
self.suppress_exact_arithmetic = False
self.maximum_steiner_points = None
self.delaunay_algorithm = DelaunayAlgorithm.DIVIDE_AND_CONQUER
self.consistency_check = False
def __repr__(self):
return repr(self)
@property
def minimum_angle(self) -> float:
"""
Minimum allowed angle for any triangle in the mesh.
See:
https://www.cs.cmu.edu/~quake/triangle.q.html
"""
return self._minimum_angle
@minimum_angle.setter
def minimum_angle(self, value: float):
if not isinstance(value, float):
raise TypeError("minimum angle must be a float")
if value >= 34.0 or value <= 0.0:
raise ValueError("minimum_angle should fall in the interval: (0.0, 34.0)")
self._minimum_angle = value
@property
def conforming_delaunay(self) -> bool:
"""
Conforming Delaunay: use this switch if you want all triangles in the
mesh to be Delaunay, and not just constrained Delaunay; or if you want
to ensure that all Voronoi vertices lie within the triangulation.
"""
return self._conforming_delaunay
@conforming_delaunay.setter
def conforming_delaunay(self, value: bool):
if not isinstance(value, bool):
raise TypeError("conforming_delaunay must be a bool")
self._conforming_delaunay = value
@property
def suppress_exact_arithmetic(self) -> bool:
"""
Suppresses exact arithmetic.
See:
https://www.cs.cmu.edu/~quake/triangle.exact.html
"""
return self._suppress_exact_arithmetic
@suppress_exact_arithmetic.setter
def suppress_exact_arithmetic(self, value: bool):
if not isinstance(value, bool):
raise TypeError("suppress_exact_arithmetic must be a bool")
self._suppress_exact_arithmetic = value
@property
def maximum_steiner_points(self) -> int:
"""
Specifies the maximum number of added Steiner points
See:
https://www.cs.cmu.edu/~quake/triangle.S.html
"""
return self._maximum_steiner_points
@maximum_steiner_points.setter
def maximum_steiner_points(self, value: Union[int, None]):
if not isinstance(value, (int, type(None))):
raise TypeError("maximum_steiner_points must be an int or None")
self._maximum_steiner_points = value
@property
def delaunay_algorithm(self) -> DelaunayAlgorithm:
"""
``DelaunayAlgoritm.DIVIDE_AND_CONQUER``: Default algorithm.
``DelaunayAlgoritm.INCREMENTAL``: Uses the incremental algorithm for
Delaunay triangulation, rather than the divide-and-conquer algorithm.
``DelaunayAlgoritm.SWEEPLINE``: Uses Steven Fortune’s sweepline
algorithm for Delaunay triangulation, rather than the
divide-and-conquer algorithm.
"""
return self._delaunay_algorithm
@delaunay_algorithm.setter
def delaunay_algorithm(self, value: DelaunayAlgorithm):
if value not in DelaunayAlgorithm:
raise ValueError(invalid_option(value, DelaunayAlgorithm))
self._delaunay_algorithm = value
@property
def consistency_check(self) -> bool:
"""
Check the consistency of the final mesh. Uses exact arithmetic for
checking, even if ``suppress_exact_arithmetic`` is set to ``False``.
Useful if you suspect Triangle is buggy.
"""
return self._consistency_check
@consistency_check.setter
def consistency_check(self, value: bool):
if not isinstance(value, int):
raise TypeError("consistency_check must be a bool")
self._consistency_check = value
def generate(self) -> Tuple[FloatArray, IntArray]:
"""
Generate a mesh of triangles.
Returns
-------
vertices: np.ndarray of floats with shape ``(n_vertex, 2)``
triangles: np.ndarray of integers with shape ``(n_triangle, 3)``
"""
options = (
"p"
f"q{self._minimum_angle}"
"a"
f"{'D' if self._conforming_delaunay else ''}"
f"{'X' if self._suppress_exact_arithmetic else ''}"
f"{'S' + str(self._maximum_steiner_points) if self._maximum_steiner_points is not None else ''}"
f"{self._delaunay_algorithm.value}"
f"{'C' if self.consistency_check else ''}"
)
tri = {"vertices": self.vertices, "segments": self.segments}
if self.holes is not None:
tri["holes"] = self.holes
if len(self.regions) > 0:
tri["regions"] = self.regions
result = triangle.triangulate(tri=tri, opts=options)
return result["vertices"], result["triangles"]
def generate_ugrid(self) -> "xugrid.Ugrid2d": # type: ignore # noqa
return to_ugrid(*self.generate()) | pandamesh/triangle_mesher.py | from enum import Enum
from typing import Tuple, Union
import geopandas as gpd
import triangle
from .common import (
FloatArray,
IntArray,
check_geodataframe,
invalid_option,
repr,
separate,
to_ugrid,
)
from .triangle_geometry import collect_geometry, polygon_holes
class DelaunayAlgorithm(Enum):
DIVIDE_AND_CONQUER = ""
INCREMENTAL = "i"
SWEEPLINE = "F"
class TriangleMesher:
"""
Wrapper for the python bindings to Triangle. This class must be initialized
with a geopandas GeoDataFrame containing at least one polygon, and a column
named ``"cellsize"``.
Optionally, multiple polygons with different cell sizes can be included in
the geodataframe. These can be used to achieve local mesh remfinement.
Linestrings and points may also be included. The segments of linestrings
will be directly forced into the triangulation. Points can also be forced
into the triangulation. The cell size values associated with these
geometries willl not be used.
Triangle cannot automatically resolve overlapping polygons, or points
located exactly on segments. During initialization, the geometries of
the geodataframe are checked:
* Polygons should not have any overlap with each other.
* Linestrings should not intersect each other.
* Every linestring should be fully contained by a single polygon;
a linestring may not intersect two or more polygons.
* Linestrings and points should not "touch" / be located on
polygon borders.
* Holes in polygons are fully supported, but they most not contain
any linestrings or points.
If such cases are detected, the initialization will error.
For more details on Triangle, see:
https://www.cs.cmu.edu/~quake/triangle.defs.html
"""
def __init__(self, gdf: gpd.GeoDataFrame) -> None:
check_geodataframe(gdf)
polygons, linestrings, points = separate(gdf)
self.vertices, self.segments, self.regions = collect_geometry(
polygons, linestrings, points
)
self.holes = polygon_holes(polygons)
# Set default values for meshing parameters
self.minimum_angle = 20.0
self.conforming_delaunay = True
self.suppress_exact_arithmetic = False
self.maximum_steiner_points = None
self.delaunay_algorithm = DelaunayAlgorithm.DIVIDE_AND_CONQUER
self.consistency_check = False
def __repr__(self):
return repr(self)
@property
def minimum_angle(self) -> float:
"""
Minimum allowed angle for any triangle in the mesh.
See:
https://www.cs.cmu.edu/~quake/triangle.q.html
"""
return self._minimum_angle
@minimum_angle.setter
def minimum_angle(self, value: float):
if not isinstance(value, float):
raise TypeError("minimum angle must be a float")
if value >= 34.0 or value <= 0.0:
raise ValueError("minimum_angle should fall in the interval: (0.0, 34.0)")
self._minimum_angle = value
@property
def conforming_delaunay(self) -> bool:
"""
Conforming Delaunay: use this switch if you want all triangles in the
mesh to be Delaunay, and not just constrained Delaunay; or if you want
to ensure that all Voronoi vertices lie within the triangulation.
"""
return self._conforming_delaunay
@conforming_delaunay.setter
def conforming_delaunay(self, value: bool):
if not isinstance(value, bool):
raise TypeError("conforming_delaunay must be a bool")
self._conforming_delaunay = value
@property
def suppress_exact_arithmetic(self) -> bool:
"""
Suppresses exact arithmetic.
See:
https://www.cs.cmu.edu/~quake/triangle.exact.html
"""
return self._suppress_exact_arithmetic
@suppress_exact_arithmetic.setter
def suppress_exact_arithmetic(self, value: bool):
if not isinstance(value, bool):
raise TypeError("suppress_exact_arithmetic must be a bool")
self._suppress_exact_arithmetic = value
@property
def maximum_steiner_points(self) -> int:
"""
Specifies the maximum number of added Steiner points
See:
https://www.cs.cmu.edu/~quake/triangle.S.html
"""
return self._maximum_steiner_points
@maximum_steiner_points.setter
def maximum_steiner_points(self, value: Union[int, None]):
if not isinstance(value, (int, type(None))):
raise TypeError("maximum_steiner_points must be an int or None")
self._maximum_steiner_points = value
@property
def delaunay_algorithm(self) -> DelaunayAlgorithm:
"""
``DelaunayAlgoritm.DIVIDE_AND_CONQUER``: Default algorithm.
``DelaunayAlgoritm.INCREMENTAL``: Uses the incremental algorithm for
Delaunay triangulation, rather than the divide-and-conquer algorithm.
``DelaunayAlgoritm.SWEEPLINE``: Uses Steven Fortune’s sweepline
algorithm for Delaunay triangulation, rather than the
divide-and-conquer algorithm.
"""
return self._delaunay_algorithm
@delaunay_algorithm.setter
def delaunay_algorithm(self, value: DelaunayAlgorithm):
if value not in DelaunayAlgorithm:
raise ValueError(invalid_option(value, DelaunayAlgorithm))
self._delaunay_algorithm = value
@property
def consistency_check(self) -> bool:
"""
Check the consistency of the final mesh. Uses exact arithmetic for
checking, even if ``suppress_exact_arithmetic`` is set to ``False``.
Useful if you suspect Triangle is buggy.
"""
return self._consistency_check
@consistency_check.setter
def consistency_check(self, value: bool):
if not isinstance(value, int):
raise TypeError("consistency_check must be a bool")
self._consistency_check = value
def generate(self) -> Tuple[FloatArray, IntArray]:
"""
Generate a mesh of triangles.
Returns
-------
vertices: np.ndarray of floats with shape ``(n_vertex, 2)``
triangles: np.ndarray of integers with shape ``(n_triangle, 3)``
"""
options = (
"p"
f"q{self._minimum_angle}"
"a"
f"{'D' if self._conforming_delaunay else ''}"
f"{'X' if self._suppress_exact_arithmetic else ''}"
f"{'S' + str(self._maximum_steiner_points) if self._maximum_steiner_points is not None else ''}"
f"{self._delaunay_algorithm.value}"
f"{'C' if self.consistency_check else ''}"
)
tri = {"vertices": self.vertices, "segments": self.segments}
if self.holes is not None:
tri["holes"] = self.holes
if len(self.regions) > 0:
tri["regions"] = self.regions
result = triangle.triangulate(tri=tri, opts=options)
return result["vertices"], result["triangles"]
def generate_ugrid(self) -> "xugrid.Ugrid2d": # type: ignore # noqa
return to_ugrid(*self.generate()) | 0.940415 | 0.513303 |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import numpy as np
import utils
def format_data(data):
try:
return data.reshape((data.shape[0], 1)) if len(data.shape) == 1 else data
except AttributeError as e:
print('ERROR! data is not a numpy object, format_data failed!')
exit(0)
class CustomDatasetFromCSV(Dataset):
def __init__(self,csv_path,transform=None):
"""
args:
csv_path(string): path of the data csv file
transform: pytorch transforms for transforms and tensor conversion
"""
self.data = pd.read_csv(csv_path)
self.labels = np.asarray(self.data.iloc[:,0])
print(self.labels.shape)
self.transform = transform
self.data_len = len(self.data.index)
def __getitem__(self,index):
single_label = self.labels[index]
single_data = np.asarray(self.data.iloc[index][1:]).astype(np.float32)
if self.transform is not None:
single_data = self.transform(single_data)
data_as_tensor = torch.from_numpy(single_data)
return (data_as_tensor,single_label)
def __len__(self):
return self.data_len
class CustomDatasetFromTxt(Dataset):
def __init__(self,app_name,train=True,transform=None):
"""
args:
app_name(string): such as 'blackscholes','fft','inversek2j',
'jmeint','jpeg','kmeans','sobel',
transform: pytorch transforms for transforms and tensor conversion
"""
if train == True:
self.x = np.loadtxt('./data/' + app_name + '/train.x',dtype=np.float32)
self.y = np.loadtxt('./data/' + app_name + '/train.y',dtype=np.float32)
else:
self.x = np.loadtxt('./data/' + app_name + '/test.x',dtype=np.float32)
self.y = np.loadtxt('./data/' + app_name + '/test.y',dtype=np.float32)
self.transform = transform
self.x = format_data(self.x)
self.y = format_data(self.y)
self.input_size = self.x.shape[1]
self.out_size = self.y.shape[1]
self.data_len = self.x.shape[0]
def __getitem__(self,index):
x_index = self.x[index]
y_index = self.y[index]
if self.transform is not None:
x_index = self.transform(x_index)
x_index_tensor = torch.from_numpy(x_index)
y_index_tensor = torch.from_numpy(y_index)
return (x_index_tensor,y_index_tensor)
def __len__(self):
return self.data_len
def input_out_size(self):
return [self.input_size,self.out_size] | custom_dataset.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import numpy as np
import utils
def format_data(data):
try:
return data.reshape((data.shape[0], 1)) if len(data.shape) == 1 else data
except AttributeError as e:
print('ERROR! data is not a numpy object, format_data failed!')
exit(0)
class CustomDatasetFromCSV(Dataset):
def __init__(self,csv_path,transform=None):
"""
args:
csv_path(string): path of the data csv file
transform: pytorch transforms for transforms and tensor conversion
"""
self.data = pd.read_csv(csv_path)
self.labels = np.asarray(self.data.iloc[:,0])
print(self.labels.shape)
self.transform = transform
self.data_len = len(self.data.index)
def __getitem__(self,index):
single_label = self.labels[index]
single_data = np.asarray(self.data.iloc[index][1:]).astype(np.float32)
if self.transform is not None:
single_data = self.transform(single_data)
data_as_tensor = torch.from_numpy(single_data)
return (data_as_tensor,single_label)
def __len__(self):
return self.data_len
class CustomDatasetFromTxt(Dataset):
def __init__(self,app_name,train=True,transform=None):
"""
args:
app_name(string): such as 'blackscholes','fft','inversek2j',
'jmeint','jpeg','kmeans','sobel',
transform: pytorch transforms for transforms and tensor conversion
"""
if train == True:
self.x = np.loadtxt('./data/' + app_name + '/train.x',dtype=np.float32)
self.y = np.loadtxt('./data/' + app_name + '/train.y',dtype=np.float32)
else:
self.x = np.loadtxt('./data/' + app_name + '/test.x',dtype=np.float32)
self.y = np.loadtxt('./data/' + app_name + '/test.y',dtype=np.float32)
self.transform = transform
self.x = format_data(self.x)
self.y = format_data(self.y)
self.input_size = self.x.shape[1]
self.out_size = self.y.shape[1]
self.data_len = self.x.shape[0]
def __getitem__(self,index):
x_index = self.x[index]
y_index = self.y[index]
if self.transform is not None:
x_index = self.transform(x_index)
x_index_tensor = torch.from_numpy(x_index)
y_index_tensor = torch.from_numpy(y_index)
return (x_index_tensor,y_index_tensor)
def __len__(self):
return self.data_len
def input_out_size(self):
return [self.input_size,self.out_size] | 0.649134 | 0.430267 |
import os
import sys
from collections import OrderedDict
import ProbabilisticParser.common.tokens as tok
import pycrfsuite
try:
TAGGER = pycrfsuite.Tagger()
TAGGER.open(tok.MODEL_PATH + tok.MODEL_FILE)
print('Using model from', tok.MODEL_PATH + tok.MODEL_FILE)
except IOError:
print('ERROR: cannot find the CRF model file', tok.MODEL_FILE, 'from', tok.MODEL_PATH)
sys.exit(-9)
def _parse(raw_string):
"""
Private function to parse strings using a trained model.
Should not be called directly, but rather using parse and other functions.
:param raw_string: input string to parse
:type raw_string: str
:return: a tuple of tokens and labels
:rtype: tuple
"""
tokens = tok.tokenize(raw_string)
if not tokens:
return []
features = tok.tokens2features(tokens)
tags = TAGGER.tag(features)
return tokens, tags
def parse(raw_string):
"""
Parse the given input string using a trained model. Returns a list of tokens and labels.
:param raw_string: input string to parse
:type raw_string: str
:return: a list of tokens and labels
:rtype: list
"""
tokens, tags = _parse(raw_string)
return list(zip(tokens, tags))
def parse_with_marginal_probability(raw_string):
"""
Parse the given input string using a trained model.
Returns a list of tokens, labels, and marginal probabilities.
:param raw_string: input string to parse
:type raw_string: str
:return: a list of tokens, labels, and marginal probabilities
:rtype: list
"""
tokens, tags = _parse(raw_string)
marginals = [TAGGER.marginal(tag, i) for i, tag in enumerate(tags)]
return list(zip(tokens, tags, marginals))
def parse_with_probabilities(raw_string):
"""
Parse the given input string using a trained model.
Returns a dictionary with
:param raw_string: input string to parse
:type raw_string: str
:return: a dictionary holding the results
:rtype: OrderedDict
"""
tokens, tags = _parse(raw_string)
marginals = [TAGGER.marginal(tag, i) for i, tag in enumerate(tags)]
sequence_probability = TAGGER.probability(tags)
out = OrderedDict(tokens=tokens, tags=tags, marginal_probabilites=marginals,
sequence_probability=sequence_probability)
return out
def tag(raw_string):
"""
Parse the given input string using a trained model. Returns an ordered dictionary of tokens and labels.
Unlike the parse function returns a complete label i.e. joins multiple labels to a single string and
labels the full string given the label.
:param raw_string: input string to parse and label
:type raw_string: str
:return: a dictionary of tokens and labels
:rtype: Ordered Dictionary
"""
tagged = OrderedDict()
for token, label in parse(raw_string):
tagged.setdefault(label, []).append(token)
for token in tagged:
component = ' '.join(tagged[token])
component = component.strip(' ,;')
tagged[token] = component
return tagged
def test(raw_string='ONS LIMITED FLAT 1 12 OXFORD STREET STREET ST1 2FW', verbose=False):
"""
A simple test to check that the calling mechanism from Python gives the same
results as if CRFsuite were called directly from the command line. Requires
a compiled version of the CRFsuite.
:param raw_string: input string to test
:type raw_string: str
:param verbose: additional debugging output
:type verbose: bool
:return: None
"""
print('Input string:', raw_string)
print('Python Results:', tag(raw_string))
tokens = tok.tokenize(raw_string)
features = tok.tokens2features(tokens)
if verbose:
print('features:', features)
tags = TAGGER.tag(features)
print('Inferred tags:', tags)
print('Probability of the sequence:', round(TAGGER.probability(tags), 6))
assert round(TAGGER.probability(tags), 6) == 0.992256, 'Sequence probability not correct'
results = [0.999999, 0.999999, 0.999846, 0.993642, 0.999728, 1., 1., 0.998874, 1., 1.]
for i, tg in enumerate(tags):
prob = round(TAGGER.marginal(tg, i), 6)
print('Marginal probability of', tg, 'in position', i, 'is', prob)
assert prob == results[i], 'Marginal Probability of a Label not correct'
if verbose:
print(TAGGER.info().transitions)
print(TAGGER.info().state_features)
print(TAGGER.info().attributes)
# store the ItemSequence temporarily
tmp = pycrfsuite.ItemSequence(features)
# write to a text file
fh = open('training/test.txt', 'w')
for i, tg in enumerate(tags):
fh.write(tg + '\t')
items = tmp.items()[i]
for item in sorted(items):
itemtext = str(item)
fh.write(itemtext.replace(':', '\:') + ':' + str(items[item]) + '\t')
fh.write('\n')
fh.close()
# command line call to the C code to test the output
print('\nCRFsuite call results:')
os.system('crfsuite tag -pit -m training/addressCRF.crfsuite training/test.txt')
def test_county(raw_string = '7a to 10c <NAME>, Exeter, Devon, EX2 6GA, ENGLAND'):
print('Input string:', raw_string)
print('Python Results:', tag(raw_string))
if __name__ == "__main__":
tag(' long street new milton hampshirer po14 ')
test_county() | DataScience/ProbabilisticParser/parser.py | import os
import sys
from collections import OrderedDict
import ProbabilisticParser.common.tokens as tok
import pycrfsuite
try:
TAGGER = pycrfsuite.Tagger()
TAGGER.open(tok.MODEL_PATH + tok.MODEL_FILE)
print('Using model from', tok.MODEL_PATH + tok.MODEL_FILE)
except IOError:
print('ERROR: cannot find the CRF model file', tok.MODEL_FILE, 'from', tok.MODEL_PATH)
sys.exit(-9)
def _parse(raw_string):
"""
Private function to parse strings using a trained model.
Should not be called directly, but rather using parse and other functions.
:param raw_string: input string to parse
:type raw_string: str
:return: a tuple of tokens and labels
:rtype: tuple
"""
tokens = tok.tokenize(raw_string)
if not tokens:
return []
features = tok.tokens2features(tokens)
tags = TAGGER.tag(features)
return tokens, tags
def parse(raw_string):
"""
Parse the given input string using a trained model. Returns a list of tokens and labels.
:param raw_string: input string to parse
:type raw_string: str
:return: a list of tokens and labels
:rtype: list
"""
tokens, tags = _parse(raw_string)
return list(zip(tokens, tags))
def parse_with_marginal_probability(raw_string):
"""
Parse the given input string using a trained model.
Returns a list of tokens, labels, and marginal probabilities.
:param raw_string: input string to parse
:type raw_string: str
:return: a list of tokens, labels, and marginal probabilities
:rtype: list
"""
tokens, tags = _parse(raw_string)
marginals = [TAGGER.marginal(tag, i) for i, tag in enumerate(tags)]
return list(zip(tokens, tags, marginals))
def parse_with_probabilities(raw_string):
"""
Parse the given input string using a trained model.
Returns a dictionary with
:param raw_string: input string to parse
:type raw_string: str
:return: a dictionary holding the results
:rtype: OrderedDict
"""
tokens, tags = _parse(raw_string)
marginals = [TAGGER.marginal(tag, i) for i, tag in enumerate(tags)]
sequence_probability = TAGGER.probability(tags)
out = OrderedDict(tokens=tokens, tags=tags, marginal_probabilites=marginals,
sequence_probability=sequence_probability)
return out
def tag(raw_string):
"""
Parse the given input string using a trained model. Returns an ordered dictionary of tokens and labels.
Unlike the parse function returns a complete label i.e. joins multiple labels to a single string and
labels the full string given the label.
:param raw_string: input string to parse and label
:type raw_string: str
:return: a dictionary of tokens and labels
:rtype: Ordered Dictionary
"""
tagged = OrderedDict()
for token, label in parse(raw_string):
tagged.setdefault(label, []).append(token)
for token in tagged:
component = ' '.join(tagged[token])
component = component.strip(' ,;')
tagged[token] = component
return tagged
def test(raw_string='ONS LIMITED FLAT 1 12 OXFORD STREET STREET ST1 2FW', verbose=False):
"""
A simple test to check that the calling mechanism from Python gives the same
results as if CRFsuite were called directly from the command line. Requires
a compiled version of the CRFsuite.
:param raw_string: input string to test
:type raw_string: str
:param verbose: additional debugging output
:type verbose: bool
:return: None
"""
print('Input string:', raw_string)
print('Python Results:', tag(raw_string))
tokens = tok.tokenize(raw_string)
features = tok.tokens2features(tokens)
if verbose:
print('features:', features)
tags = TAGGER.tag(features)
print('Inferred tags:', tags)
print('Probability of the sequence:', round(TAGGER.probability(tags), 6))
assert round(TAGGER.probability(tags), 6) == 0.992256, 'Sequence probability not correct'
results = [0.999999, 0.999999, 0.999846, 0.993642, 0.999728, 1., 1., 0.998874, 1., 1.]
for i, tg in enumerate(tags):
prob = round(TAGGER.marginal(tg, i), 6)
print('Marginal probability of', tg, 'in position', i, 'is', prob)
assert prob == results[i], 'Marginal Probability of a Label not correct'
if verbose:
print(TAGGER.info().transitions)
print(TAGGER.info().state_features)
print(TAGGER.info().attributes)
# store the ItemSequence temporarily
tmp = pycrfsuite.ItemSequence(features)
# write to a text file
fh = open('training/test.txt', 'w')
for i, tg in enumerate(tags):
fh.write(tg + '\t')
items = tmp.items()[i]
for item in sorted(items):
itemtext = str(item)
fh.write(itemtext.replace(':', '\:') + ':' + str(items[item]) + '\t')
fh.write('\n')
fh.close()
# command line call to the C code to test the output
print('\nCRFsuite call results:')
os.system('crfsuite tag -pit -m training/addressCRF.crfsuite training/test.txt')
def test_county(raw_string = '7a to 10c <NAME>, Exeter, Devon, EX2 6GA, ENGLAND'):
print('Input string:', raw_string)
print('Python Results:', tag(raw_string))
if __name__ == "__main__":
tag(' long street new milton hampshirer po14 ')
test_county() | 0.624064 | 0.677167 |
import re
import pandas as pd
import numpy as np
# When running this script first time
# Uncomment these two lines
import nltk
nltk.download('stopwords')
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from nltk.stem import PorterStemmer
pst = PorterStemmer()
df = pd.read_csv('datasets/original dataset.csv')
# Removing data of videos which are deleted
deleted_videos = df[df['video_error_or_removed'] == True].index
df = df.drop(df.index[deleted_videos])
# Removing duplicated videos
df = df.drop_duplicates(subset='video_id', keep='first')
# Text Columns
def preprocess_str(col_name):
df[col_name] = df.apply(
lambda row: re.sub(r'[^0-9A-Za-z\s]', '', row[col_name]).lower(),
axis=1
)
def text_cols():
# Video title
df['original_title'] = df['title']
preprocess_str('title')
# Channel title
df['original_channel_title'] = df['channel_title']
preprocess_str('channel_title')
# Description
df['original_description'] = df['description']
df['description'] = df['description'].fillna('')
df['description'] = df.apply(
lambda row: re.sub(r'[^0-9A-Za-z\s]', '', re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', row['description'])).lower(),
axis=1
)
# Tags
def tags_preprocess(row):
tags = re.sub(r'"', '', re.sub(r'\|[^a-zA-Z]+\|', '|', row['tags']))
tags = re.sub(r'[!@#$%^&*()-_+={}\[\]|\\"\';:.,<>/?`~]', ' ', tags)
tags = word_tokenize(tags)
final_tags = ''
for tag in tags:
stemed_tag = pst.stem(tag.lower())
if stemed_tag not in final_tags and not any([word for word in stop_words if stemed_tag in word]):
final_tags += stemed_tag + ' '
return final_tags[:-1]
def tags():
df.loc[df['tags'] == '[none]', ['tags']] = '' # Some videos have no tags
df['tags'].fillna('')
df['tags'] = df.apply(tags_preprocess, axis=1)
# Dates
def dates():
df['trending_date'] = df.apply(lambda row: row['trending_date'].replace('.', '-'), axis=1) # Trending Date
df['publish_date'] = df.apply(lambda row: row['publish_time'].split('T')[0], axis=1) # Publish Date
# Calling functions to preprocess data
text_cols()
tags()
dates()
# Comments count
df['total_comments'] = df['comment_count']
# Rearranging Columns
# And ignoring unnecessary Columns
df = df[['title', 'video_id', 'channel_title', 'views', 'likes', 'dislikes', 'total_comments', 'description', 'tags', 'publish_date', 'trending_date', 'thumbnail_link', 'original_title', 'original_channel_title', 'original_description']]
# Saving
df.to_csv('datasets/preprocessed.csv', index=False)
# print(df)
print('Preprocessing is done!\nPreprocessed data is saved to datasets/preprocessed.csv\nNow run the server.') | preprocessing/Preprocessing.py | import re
import pandas as pd
import numpy as np
# When running this script first time
# Uncomment these two lines
import nltk
nltk.download('stopwords')
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from nltk.stem import PorterStemmer
pst = PorterStemmer()
df = pd.read_csv('datasets/original dataset.csv')
# Removing data of videos which are deleted
deleted_videos = df[df['video_error_or_removed'] == True].index
df = df.drop(df.index[deleted_videos])
# Removing duplicated videos
df = df.drop_duplicates(subset='video_id', keep='first')
# Text Columns
def preprocess_str(col_name):
df[col_name] = df.apply(
lambda row: re.sub(r'[^0-9A-Za-z\s]', '', row[col_name]).lower(),
axis=1
)
def text_cols():
# Video title
df['original_title'] = df['title']
preprocess_str('title')
# Channel title
df['original_channel_title'] = df['channel_title']
preprocess_str('channel_title')
# Description
df['original_description'] = df['description']
df['description'] = df['description'].fillna('')
df['description'] = df.apply(
lambda row: re.sub(r'[^0-9A-Za-z\s]', '', re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', row['description'])).lower(),
axis=1
)
# Tags
def tags_preprocess(row):
tags = re.sub(r'"', '', re.sub(r'\|[^a-zA-Z]+\|', '|', row['tags']))
tags = re.sub(r'[!@#$%^&*()-_+={}\[\]|\\"\';:.,<>/?`~]', ' ', tags)
tags = word_tokenize(tags)
final_tags = ''
for tag in tags:
stemed_tag = pst.stem(tag.lower())
if stemed_tag not in final_tags and not any([word for word in stop_words if stemed_tag in word]):
final_tags += stemed_tag + ' '
return final_tags[:-1]
def tags():
df.loc[df['tags'] == '[none]', ['tags']] = '' # Some videos have no tags
df['tags'].fillna('')
df['tags'] = df.apply(tags_preprocess, axis=1)
# Dates
def dates():
df['trending_date'] = df.apply(lambda row: row['trending_date'].replace('.', '-'), axis=1) # Trending Date
df['publish_date'] = df.apply(lambda row: row['publish_time'].split('T')[0], axis=1) # Publish Date
# Calling functions to preprocess data
text_cols()
tags()
dates()
# Comments count
df['total_comments'] = df['comment_count']
# Rearranging Columns
# And ignoring unnecessary Columns
df = df[['title', 'video_id', 'channel_title', 'views', 'likes', 'dislikes', 'total_comments', 'description', 'tags', 'publish_date', 'trending_date', 'thumbnail_link', 'original_title', 'original_channel_title', 'original_description']]
# Saving
df.to_csv('datasets/preprocessed.csv', index=False)
# print(df)
print('Preprocessing is done!\nPreprocessed data is saved to datasets/preprocessed.csv\nNow run the server.') | 0.380989 | 0.173603 |
from flask import redirect, url_for, g, current_app, render_template, request
from maintain_frontend.decorators import requires_permission
from maintain_frontend.constants.permissions import Permissions
from maintain_frontend.send_payment_link.validation.payment_reason_validator import PaymentReasonValidator
def register_routes(bp):
bp.add_url_rule('/payment-for', view_func=get_payment_for, methods=['GET'])
bp.add_url_rule('/payment-for', view_func=post_payment_for, methods=['POST'])
@requires_permission([Permissions.add_lon])
def get_payment_for():
current_app.logger.info('Endpoint called')
if g.session.send_payment_link_info is None:
current_app.logger.info('Redirecting to: %s', url_for("send_payment_link.send_payment_link"))
return redirect(url_for("send_payment_link.send_payment_link"))
current_app.logger.info("Displaying page 'payment_for.html")
return render_template('payment_for.html',
request_body=None,
submit_url=url_for("send_payment_link.post_payment_for"))
@requires_permission([Permissions.add_lon])
def post_payment_for():
payment_for = request.form.get('payment_for')
current_app.logger.info("Endpoint called with payment for '{}'".format(payment_for))
validator = PaymentReasonValidator.validate(payment_for)
if validator.errors:
current_app.logger.warning("Validation errors found")
return render_template(
'payment_for.html',
validation_errors=validator.errors,
validation_summary_heading=validator.summary_heading_text,
error_heading_message=validator.summary_heading_text,
request_body=request.form,
submit_url=url_for("send_payment_link.post_payment_for")
), 400
if payment_for == 'lon':
return redirect(url_for("send_payment_link.get_enter_email"))
else:
# TODO(official_search) replace with functional code for an official search, in a later story
return redirect(url_for("home.get")) | maintain_frontend/send_payment_link/payment_for.py | from flask import redirect, url_for, g, current_app, render_template, request
from maintain_frontend.decorators import requires_permission
from maintain_frontend.constants.permissions import Permissions
from maintain_frontend.send_payment_link.validation.payment_reason_validator import PaymentReasonValidator
def register_routes(bp):
bp.add_url_rule('/payment-for', view_func=get_payment_for, methods=['GET'])
bp.add_url_rule('/payment-for', view_func=post_payment_for, methods=['POST'])
@requires_permission([Permissions.add_lon])
def get_payment_for():
current_app.logger.info('Endpoint called')
if g.session.send_payment_link_info is None:
current_app.logger.info('Redirecting to: %s', url_for("send_payment_link.send_payment_link"))
return redirect(url_for("send_payment_link.send_payment_link"))
current_app.logger.info("Displaying page 'payment_for.html")
return render_template('payment_for.html',
request_body=None,
submit_url=url_for("send_payment_link.post_payment_for"))
@requires_permission([Permissions.add_lon])
def post_payment_for():
payment_for = request.form.get('payment_for')
current_app.logger.info("Endpoint called with payment for '{}'".format(payment_for))
validator = PaymentReasonValidator.validate(payment_for)
if validator.errors:
current_app.logger.warning("Validation errors found")
return render_template(
'payment_for.html',
validation_errors=validator.errors,
validation_summary_heading=validator.summary_heading_text,
error_heading_message=validator.summary_heading_text,
request_body=request.form,
submit_url=url_for("send_payment_link.post_payment_for")
), 400
if payment_for == 'lon':
return redirect(url_for("send_payment_link.get_enter_email"))
else:
# TODO(official_search) replace with functional code for an official search, in a later story
return redirect(url_for("home.get")) | 0.474875 | 0.046313 |
import json
import os
import pickle
from ._utils import (
check_file,
load_graph,
check_available,
generate_session,
sentencepiece_tokenizer_bert,
sentencepiece_tokenizer_xlnet,
)
from .._models._sklearn_model import BINARY_BAYES, MULTICLASS_BAYES
from .._models._bert_model import MULTICLASS_BERT, BINARY_BERT
from .._models._xlnet_model import MULTICLASS_XLNET, BINARY_XLNET
from .._transformer._bert import bert_num_layers
def multinomial(path, s3_path, class_name, label, validate = True):
if validate:
check_file(path['multinomial'], s3_path['multinomial'])
else:
if not check_available(path['multinomial']):
raise Exception(
'%s/multinomial is not available, please `validate = True`'
% (class_name)
)
try:
with open(path['multinomial']['model'], 'rb') as fopen:
multinomial = pickle.load(fopen)
with open(path['multinomial']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/multinomial') and try again"
% (class_name)
)
from ..stem import _classification_textcleaning_stemmer
if len(label) > 2:
selected_class = MULTICLASS_BAYES
else:
selected_class = BINARY_BAYES
return selected_class(
multinomial = multinomial,
label = label,
vectorize = vectorize,
cleaning = _classification_textcleaning_stemmer,
)
def transformer(
path,
s3_path,
class_name,
label,
model = 'bert',
size = 'base',
validate = True,
):
if validate:
check_file(path[model][size], s3_path[model][size])
else:
if not check_available(path[model][size]):
raise Exception(
'%s/%s/%s is not available, please `validate = True`'
% (class_name, model, size)
)
try:
g = load_graph(path[model][size]['model'])
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/%s/%s') and try again"
% (class_name, model, size)
)
if len(label) > 2 or class_name == 'relevancy':
if model in ['albert', 'bert']:
selected_class = MULTICLASS_BERT
if model in ['xlnet']:
selected_class = MULTICLASS_XLNET
else:
if model in ['albert', 'bert']:
selected_class = BINARY_BERT
if model in ['xlnet']:
selected_class = BINARY_XLNET
if model in ['albert', 'bert']:
if model == 'bert':
from .._transformer._bert import _extract_attention_weights_import
if model == 'albert':
from .._transformer._albert import _extract_attention_weights_import
tokenizer, cls, sep = sentencepiece_tokenizer_bert(
path[model][size]['tokenizer'], path[model][size]['vocab']
)
return selected_class(
X = g.get_tensor_by_name('import/Placeholder:0'),
segment_ids = None,
input_masks = None,
logits = g.get_tensor_by_name('import/logits:0'),
logits_seq = g.get_tensor_by_name('import/logits_seq:0'),
sess = generate_session(graph = g),
tokenizer = tokenizer,
label = label,
cls = cls,
sep = sep,
attns = _extract_attention_weights_import(bert_num_layers[size], g),
class_name = class_name,
)
if model in ['xlnet']:
from .._transformer._xlnet import _extract_attention_weights_import
tokenizer = sentencepiece_tokenizer_xlnet(
path[model][size]['tokenizer']
)
return selected_class(
X = g.get_tensor_by_name('import/Placeholder:0'),
segment_ids = g.get_tensor_by_name('import/Placeholder_1:0'),
input_masks = g.get_tensor_by_name('import/Placeholder_2:0'),
logits = g.get_tensor_by_name('import/logits:0'),
logits_seq = g.get_tensor_by_name('import/logits_seq:0'),
sess = generate_session(graph = g),
tokenizer = tokenizer,
label = label,
attns = _extract_attention_weights_import(g),
class_name = class_name,
) | malaya/_utils/_softmax_class.py | import json
import os
import pickle
from ._utils import (
check_file,
load_graph,
check_available,
generate_session,
sentencepiece_tokenizer_bert,
sentencepiece_tokenizer_xlnet,
)
from .._models._sklearn_model import BINARY_BAYES, MULTICLASS_BAYES
from .._models._bert_model import MULTICLASS_BERT, BINARY_BERT
from .._models._xlnet_model import MULTICLASS_XLNET, BINARY_XLNET
from .._transformer._bert import bert_num_layers
def multinomial(path, s3_path, class_name, label, validate = True):
if validate:
check_file(path['multinomial'], s3_path['multinomial'])
else:
if not check_available(path['multinomial']):
raise Exception(
'%s/multinomial is not available, please `validate = True`'
% (class_name)
)
try:
with open(path['multinomial']['model'], 'rb') as fopen:
multinomial = pickle.load(fopen)
with open(path['multinomial']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/multinomial') and try again"
% (class_name)
)
from ..stem import _classification_textcleaning_stemmer
if len(label) > 2:
selected_class = MULTICLASS_BAYES
else:
selected_class = BINARY_BAYES
return selected_class(
multinomial = multinomial,
label = label,
vectorize = vectorize,
cleaning = _classification_textcleaning_stemmer,
)
def transformer(
path,
s3_path,
class_name,
label,
model = 'bert',
size = 'base',
validate = True,
):
if validate:
check_file(path[model][size], s3_path[model][size])
else:
if not check_available(path[model][size]):
raise Exception(
'%s/%s/%s is not available, please `validate = True`'
% (class_name, model, size)
)
try:
g = load_graph(path[model][size]['model'])
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/%s/%s') and try again"
% (class_name, model, size)
)
if len(label) > 2 or class_name == 'relevancy':
if model in ['albert', 'bert']:
selected_class = MULTICLASS_BERT
if model in ['xlnet']:
selected_class = MULTICLASS_XLNET
else:
if model in ['albert', 'bert']:
selected_class = BINARY_BERT
if model in ['xlnet']:
selected_class = BINARY_XLNET
if model in ['albert', 'bert']:
if model == 'bert':
from .._transformer._bert import _extract_attention_weights_import
if model == 'albert':
from .._transformer._albert import _extract_attention_weights_import
tokenizer, cls, sep = sentencepiece_tokenizer_bert(
path[model][size]['tokenizer'], path[model][size]['vocab']
)
return selected_class(
X = g.get_tensor_by_name('import/Placeholder:0'),
segment_ids = None,
input_masks = None,
logits = g.get_tensor_by_name('import/logits:0'),
logits_seq = g.get_tensor_by_name('import/logits_seq:0'),
sess = generate_session(graph = g),
tokenizer = tokenizer,
label = label,
cls = cls,
sep = sep,
attns = _extract_attention_weights_import(bert_num_layers[size], g),
class_name = class_name,
)
if model in ['xlnet']:
from .._transformer._xlnet import _extract_attention_weights_import
tokenizer = sentencepiece_tokenizer_xlnet(
path[model][size]['tokenizer']
)
return selected_class(
X = g.get_tensor_by_name('import/Placeholder:0'),
segment_ids = g.get_tensor_by_name('import/Placeholder_1:0'),
input_masks = g.get_tensor_by_name('import/Placeholder_2:0'),
logits = g.get_tensor_by_name('import/logits:0'),
logits_seq = g.get_tensor_by_name('import/logits_seq:0'),
sess = generate_session(graph = g),
tokenizer = tokenizer,
label = label,
attns = _extract_attention_weights_import(g),
class_name = class_name,
) | 0.361165 | 0.131814 |
import sys
import argparse
import logging
import time
from contextlib import closing
from dae.gpf_instance.gpf_instance import GPFInstance
from dae.backends.impala.impala_variants import ImpalaVariants
logger = logging.getLogger("impala_tables_stats")
def parse_cli_arguments(argv, gpf_instance):
parser = argparse.ArgumentParser(
description="loading study parquet files in impala db",
conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--verbose', '-V', action='count', default=0)
parser.add_argument(
"--studies",
type=str,
metavar="<studies IDs>",
help="comma separated list of study IDs",
)
argv = parser.parse_args(argv)
return argv
def variants_region_bins(study_backend):
impala = study_backend._impala_helpers
region_bins = []
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
q = f"SELECT DISTINCT(region_bin) FROM " \
f"{study_backend.db}.{study_backend.variants_table}"
print(q)
cursor.execute(q)
for row in cursor:
region_bins.append(row[0])
print(region_bins)
return region_bins
def variants_compute_stats(study_backend, region_bin=None):
impala = study_backend._impala_helpers
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
if region_bin is not None:
q = f"COMPUTE INCREMENTAL STATS " \
f"{study_backend.db}.{study_backend.variants_table} " \
f"PARTITION (region_bin='{region_bin}')"
else:
q = f"COMPUTE STATS " \
f"{study_backend.db}.{study_backend.variants_table}"
logger.info(f"compute stats for variants table: {q}")
cursor.execute(q)
def summary_variants_compute_stats(study_backend, region_bin=None):
impala = study_backend._impala_helpers
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
if region_bin is not None:
q = f"COMPUTE INCREMENTAL STATS " \
f"{study_backend.db}." \
f"{study_backend.summary_variants_table} " \
f"PARTITION (region_bin='{region_bin}')"
else:
q = f"COMPUTE STATS " \
f"{study_backend.db}." \
f"{study_backend.summary_variants_table}"
logger.info(f"compute stats for variants table: {q}")
cursor.execute(q)
def pedigree_compute_stats(study_backend):
impala = study_backend._impala_helpers
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
q = f"COMPUTE STATS " \
f"{study_backend.db}.{study_backend.pedigree_table}"
logger.info(f"compute stats for pedigree table: {q}")
cursor.execute(q)
def main(argv=sys.argv[1:], gpf_instance=None):
if gpf_instance is None:
gpf_instance = GPFInstance()
argv = parse_cli_arguments(argv, gpf_instance)
if argv.verbose == 1:
logging.basicConfig(level=logging.WARNING)
elif argv.verbose == 2:
logging.basicConfig(level=logging.INFO)
elif argv.verbose >= 3:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
logging.getLogger("impala").setLevel(logging.WARNING)
if argv.studies is None:
study_ids = [
gd.study_id
for gd in gpf_instance.get_all_genotype_data() if not gd.is_group]
else:
study_ids = [sid.strip() for sid in argv.studies.split(",")]
logger.info(f"computing table stats for studies: {study_ids}")
for study_id in study_ids:
study = gpf_instance.get_genotype_data(study_id)
assert study.study_id == study_id
study_backend = study._backend
if not isinstance(study_backend, ImpalaVariants):
logger.info(f"not an impala study: {study_id}; skipping...")
continue
pedigree_compute_stats(study_backend)
if study_backend.variants_table is None:
continue
if "region_bin" not in study_backend.schema:
variants_compute_stats(study_backend, region_bin=None)
if study_backend.has_summary_variants_table:
summary_variants_compute_stats(study_backend, region_bin=None)
else:
assert "region_bin" in study_backend.schema
region_bins = variants_region_bins(study_backend)
logger.info(
f"processing {len(region_bins)} region bins; {region_bins}")
for index, region_bin in enumerate(region_bins):
start = time.time()
variants_compute_stats(study_backend, region_bin)
if study_backend.has_summary_variants_table:
summary_variants_compute_stats(study_backend, region_bin)
elapsed = time.time() - start
logger.info(
f"computing stats {index}/{len(region_bins)} "
f"for {study_backend.db}.{study_backend.variants_table}; "
f"{elapsed:.2f} secs")
if __name__ == "__main__":
main(sys.argv[1:]) | dae/dae/tools/impala_tables_stats.py | import sys
import argparse
import logging
import time
from contextlib import closing
from dae.gpf_instance.gpf_instance import GPFInstance
from dae.backends.impala.impala_variants import ImpalaVariants
logger = logging.getLogger("impala_tables_stats")
def parse_cli_arguments(argv, gpf_instance):
parser = argparse.ArgumentParser(
description="loading study parquet files in impala db",
conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--verbose', '-V', action='count', default=0)
parser.add_argument(
"--studies",
type=str,
metavar="<studies IDs>",
help="comma separated list of study IDs",
)
argv = parser.parse_args(argv)
return argv
def variants_region_bins(study_backend):
impala = study_backend._impala_helpers
region_bins = []
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
q = f"SELECT DISTINCT(region_bin) FROM " \
f"{study_backend.db}.{study_backend.variants_table}"
print(q)
cursor.execute(q)
for row in cursor:
region_bins.append(row[0])
print(region_bins)
return region_bins
def variants_compute_stats(study_backend, region_bin=None):
impala = study_backend._impala_helpers
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
if region_bin is not None:
q = f"COMPUTE INCREMENTAL STATS " \
f"{study_backend.db}.{study_backend.variants_table} " \
f"PARTITION (region_bin='{region_bin}')"
else:
q = f"COMPUTE STATS " \
f"{study_backend.db}.{study_backend.variants_table}"
logger.info(f"compute stats for variants table: {q}")
cursor.execute(q)
def summary_variants_compute_stats(study_backend, region_bin=None):
impala = study_backend._impala_helpers
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
if region_bin is not None:
q = f"COMPUTE INCREMENTAL STATS " \
f"{study_backend.db}." \
f"{study_backend.summary_variants_table} " \
f"PARTITION (region_bin='{region_bin}')"
else:
q = f"COMPUTE STATS " \
f"{study_backend.db}." \
f"{study_backend.summary_variants_table}"
logger.info(f"compute stats for variants table: {q}")
cursor.execute(q)
def pedigree_compute_stats(study_backend):
impala = study_backend._impala_helpers
with closing(impala.connection()) as connection:
with connection.cursor() as cursor:
q = f"COMPUTE STATS " \
f"{study_backend.db}.{study_backend.pedigree_table}"
logger.info(f"compute stats for pedigree table: {q}")
cursor.execute(q)
def main(argv=sys.argv[1:], gpf_instance=None):
if gpf_instance is None:
gpf_instance = GPFInstance()
argv = parse_cli_arguments(argv, gpf_instance)
if argv.verbose == 1:
logging.basicConfig(level=logging.WARNING)
elif argv.verbose == 2:
logging.basicConfig(level=logging.INFO)
elif argv.verbose >= 3:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
logging.getLogger("impala").setLevel(logging.WARNING)
if argv.studies is None:
study_ids = [
gd.study_id
for gd in gpf_instance.get_all_genotype_data() if not gd.is_group]
else:
study_ids = [sid.strip() for sid in argv.studies.split(",")]
logger.info(f"computing table stats for studies: {study_ids}")
for study_id in study_ids:
study = gpf_instance.get_genotype_data(study_id)
assert study.study_id == study_id
study_backend = study._backend
if not isinstance(study_backend, ImpalaVariants):
logger.info(f"not an impala study: {study_id}; skipping...")
continue
pedigree_compute_stats(study_backend)
if study_backend.variants_table is None:
continue
if "region_bin" not in study_backend.schema:
variants_compute_stats(study_backend, region_bin=None)
if study_backend.has_summary_variants_table:
summary_variants_compute_stats(study_backend, region_bin=None)
else:
assert "region_bin" in study_backend.schema
region_bins = variants_region_bins(study_backend)
logger.info(
f"processing {len(region_bins)} region bins; {region_bins}")
for index, region_bin in enumerate(region_bins):
start = time.time()
variants_compute_stats(study_backend, region_bin)
if study_backend.has_summary_variants_table:
summary_variants_compute_stats(study_backend, region_bin)
elapsed = time.time() - start
logger.info(
f"computing stats {index}/{len(region_bins)} "
f"for {study_backend.db}.{study_backend.variants_table}; "
f"{elapsed:.2f} secs")
if __name__ == "__main__":
main(sys.argv[1:]) | 0.321141 | 0.125146 |
import sqlite3
import datetime
import logging
from typing import Union
from sqlite3 import Error
logger = logging.getLogger('marble_match.' + __name__)
def replace_char_list(_old: str, _replacement: list, _replace: str = '?') -> str:
for i in _replacement:
_old = _old.replace(_replace, str(i), 1)
return _old
def create_con(path: str):
logger.debug(f'create_connection: {path}')
try:
con = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
logger.debug(f'connection created: {con}')
return con
except Error as e:
logger.error(f'Failed to create connection: {e}')
raise e
def create_user(connection: sqlite3.Connection, player_id: Union[int, None],
uuid: int, nickname: str, marbles: int, server_id: int,
wins: int = 0, loses: int = 0) -> int:
logger.debug(f'create_user: {player_id}, {uuid}, {nickname}, {marbles}, {server_id}, {wins}, {loses}')
query = "INSERT INTO users VALUES (?, ?, ?, ?, ?, ?, ?)"
query_param = [player_id, uuid, nickname, marbles, server_id, wins, loses]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a user into users: {e}')
return 0
# TODO Update references to new paramaters
def create_match(connection: sqlite3.Connection, match_id: Union[int, None], amount: int,
participant1: int, participant2: int, active: int = 0, accepted: int = 0,
game: str = 'melee', format: str = 'Bo3') -> int:
logger.debug(f'create_match: {match_id}, {amount}, {participant1}, {participant2}, {active}, {accepted},'
f'{game}, {format}')
query = "INSERT INTO matches VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
query_param = [match_id, amount, active, participant1, participant2, accepted, game, format]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a match into matches: {e}')
return 0
def create_bet(connection: sqlite3.Connection, bet_id: Union[int, None], amount: int, match_id: int, better_id: int,
participant1: int) -> int:
logger.debug(f'create_bet: {bet_id}, {amount}, {match_id}, {better_id}, {participant1}')
query = "INSERT INTO bets VALUES (?, ?, ?, ?, ?)"
query_param = [bet_id, amount, match_id, better_id, participant1]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
subtract_marbles(connection, better_id, amount)
logger.debug(f'subtract_marbles called')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a bet into bets: {e}')
return 0
# TODO Update references with new paramaters
def create_match_history(connection: sqlite3.Connection, match_id: Union[int, None], amount: int,
participant1: int, participant2: int, winner_id: int,
time: datetime.datetime = datetime.datetime.utcnow(),
game: str = 'melee', format: str = 'Bo3') -> int:
logger.debug(f'create_match_history: {match_id}, {amount}, {participant1}, {participant2}, {winner_id}, {time},'
f'{game}, {format}')
query = "INSERT INTO matches_history VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
query_param = [match_id, amount, participant1, participant2, winner_id, time, game, format]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a match into match_history: {e}')
return 0
def create_bet_history(connection: sqlite3.Connection, bet_id: Union[int, None], amount: int, match_id: int,
better_id: int, participant1: int, winner_id: int,
time: datetime.datetime = datetime.datetime.utcnow()) -> int:
logger.debug(f'create_bet_history: '
f'{bet_id}, {amount}, {match_id}, {better_id}, {participant1}, {winner_id}, {time}')
query = "INSERT INTO bets_history VALUES (?, ?, ?, ?, ?, ?, ?)"
query_param = [bet_id, amount, match_id, better_id, participant1, winner_id, time]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a bet into bet_history: {e}')
return 0
def create_friendly(connection: sqlite3.Connection, player_id: int,
time: datetime.datetime = datetime.datetime.utcnow()):
logger.debug(f'create_user: {player_id}, {time}')
query = "INSERT INTO friendly VALUES (?, ?)"
query_param = [player_id, time]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a friendly into friendly: {e}')
return 0
def update_friendly(connection: sqlite3.Connection, player_id: int,
time: datetime.datetime = datetime.datetime.utcnow()) -> bool:
logger.debug(f'update_friendly: {player_id}, {time}')
query = "UPDATE friendly SET last_used = ? WHERE id = ?"
query_param = [time, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating last_used in friendly: {e}')
return False
def update_match_activity(connection: sqlite3.Connection, match_id: int, active: int = 1) -> bool:
logger.debug(f'update_match_activity: {match_id}, {active}')
query = "UPDATE matches SET active = ? WHERE id = ?"
query_param = [active, match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating activity in matches: {e}')
return False
def update_match_accepted(connection: sqlite3.Connection, match_id: int, accepted: int = 1) -> bool:
logger.debug(f'update_match_accepted: {match_id}, {accepted}')
query = "UPDATE matches SET accepted = ? WHERE id = ?"
query_param = [accepted, match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating accepted in matches: {e}')
return False
def update_marble_count(connection: sqlite3.Connection, player_id: int, marbles: int) -> bool:
logger.debug(f'update_marble_count: {player_id}, {marbles}')
query = "UPDATE users SET marbles = ? WHERE id = ?"
query_param = [marbles, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating marbles in user: {e}')
return False
def update_player_nickname(connection: sqlite3.Connection, player_id: int, nickname: str) -> bool:
logger.debug(f'update_player_nickname: {player_id}, {nickname}')
query = "UPDATE users SET nickname = ? WHERE id = ?"
query_param = [nickname, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating wins in user: {e}')
return False
def update_player_wins(connection: sqlite3.Connection, player_id: int, wins: int) -> bool:
logger.debug(f'update_player_wins: {player_id}, {wins}')
query = "UPDATE users SET wins = ? WHERE id = ?"
query_param = [wins, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating wins in user: {e}')
return False
def update_player_loses(connection: sqlite3.Connection, player_id: int, loses: int) -> bool:
logger.debug(f'update_player_loses: {player_id}, {loses}')
query = "UPDATE users SET loses = ? WHERE id = ?"
query_param = [loses, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating loses in user: {e}')
return False
def update_bet(connection: sqlite3.Connection, bet_id: int, player_id: int, amount: int) -> bool:
logger.debug(f'update_bet: {bet_id}, {player_id}, {amount}')
query = "UPDATE bets SET amount=?, participant1=? WHERE id=?"
query_param = [amount, player_id, bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating bet in bets: {e}')
return False
def get_friendly_last_used(connection: sqlite3.Connection, player_id: int) -> Union[datetime.datetime, int]:
logger.debug(f'get_friendly_last_used: {player_id}')
query = "SELECT * FROM friendly WHERE id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[1]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting a friendly from friendly: {e}')
return 0
def get_match_info_by_id(connection: sqlite3.Connection, match_id: int) -> Union[tuple, int]:
logger.debug(f'get_match_info_by_id: {match_id}')
query = "SELECT * FROM matches WHERE id=?"
query_param = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting a match from matches: {e}')
return 0
# TODO Update function ot use palyer2_id for simplified functions
def get_match_info_all(connection: sqlite3.Connection, player_id: int):
logger.debug(f'get_match_info_all: {player_id}')
query = "SELECT * FROM matches WHERE participant1=? OR participant2=?"
query_param = [player_id, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all matches from matches: {e}')
return 0
def get_match_history_info(connection: sqlite3.Connection, match_id: int) -> Union[tuple, int]:
logger.debug(f'get_match_history_info: {match_id}')
query = "SELECT * FROM matches_history WHERE id=?"
query_param = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting matches from match_history: {e}')
return 0
# TODO Update function ot use palyer2_id for simplified functions
def get_match_history_info_all(connection: sqlite3.Connection, player_id: int, player2_id: int = None):
logger.debug(f'get_match_history_info_all: {player_id}')
query = "SELECT * FROM matches_history WHERE participant1=? OR participant2=?"
query_param = [player_id, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all matches from match_history: {e}')
return 0
def get_player_id(connection: sqlite3.Connection, uuid: int, server_id: int) -> int:
logger.debug(f'get_player_id: {uuid}, {server_id}')
query = "SELECT * FROM users WHERE uuid=? AND server_id=?"
query_param = [uuid, server_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting player_id in users: {e}')
return 0
def get_player_id_by_username(connection: sqlite3, nickname: str):
logger.debug(f'get_player_id_by_username: {nickname}')
query = "SELECT * FROM users WHERE nickname=?"
query_param = [nickname]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting player_id in users: {e}')
return 0
def get_player_info(connection: sqlite3.Connection, player_id: int):
logger.debug(f'get_player_info: {player_id}')
query = "SELECT * FROM users WHERE id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting player_info in users: {e}')
return 0
def get_player_wins(connection: sqlite3.Connection, player_id: int) -> int:
logger.debug(f'get_player_wins: {player_id}')
return get_player_info(connection, player_id)[4]
def get_player_loses(connection: sqlite3.Connection, player_id: int) -> int:
logger.debug(f'get_player_loses: {player_id}')
return get_player_info(connection, player_id)[5]
def get_player_info_all_by_server(connection: sqlite3.Connection, server_id: int):
logger.debug(f'get_player_info_all_by_server: {server_id}')
query = "SELECT * FROM users WHERE server_id=?"
query_param = [server_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all player_info in users: {e}')
return 0
def get_marble_count(connection: sqlite3.Connection, player_id: int) -> int:
logger.debug(f'get_marble_count: {player_id}')
query = "SELECT * FROM users WHERE id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[3]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting marbles from users: {e}')
return 0
def get_bet_info(connection: sqlite3.Connection, bet_id: int):
logger.debug(f'get_bet_info: {bet_id}')
query = "SELECT * FROM bets WHERE id=?"
query_param = [bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting bet_info from bets: {e}')
return 0
def get_bet_info_all(connection: sqlite3.Connection, player_id: int):
logger.debug(f'get_bet_info_all: {player_id}')
query = "SELECT * FROM bets WHERE better_id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all bet_info from bets: {e}')
return 0
def get_bet_info_match_all(connection: sqlite3.Connection, match_id: int):
logger.debug(f'get_bet_info_all: {match_id}')
query = "SELECT * FROM bets WHERE match_id=?"
query_param = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all bet_info from bets: {e}')
return 0
def get_bet_history_info(connection: sqlite3.Connection, bet_id: int):
logger.debug(f'get_bet_history_info: {bet_id}')
query = "SELECT * FROM bets_history WHERE id=?"
query_param = [bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting bet_history_info from bets_history: {e}')
return 0
def get_bet_history_info_all(connection: sqlite3.Connection, better_id: int):
logger.debug(f'get_bet_history_info_all: {better_id}')
query = "SELECT * FROM bets_history WHERE better_id=?"
query_param = [better_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all bet_history_info from bet_history: {e}')
return 0
def find_match_by_player_id(connection: sqlite3.Connection, player_id: int):
logger.debug(f'find_match_by_player_id: {player_id}')
query = "SELECT * FROM matches WHERE participant1=? OR participant2=?"
query_param = [player_id, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting match_id from matches: {e}')
return 0
def find_bet(connection: sqlite3.Connection, match_id: int, better_id: int):
logger.debug(f'find_bet: {match_id}, {better_id}')
query = "SELECT * FROM bets WHERE match_id=? AND better_id=?"
query_param = [match_id, better_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting bet from bets: {e}')
return 0
def delete_match(connection: sqlite3.Connection, match_id: int) -> bool:
logger.debug(f'delete_match: {match_id}')
query = "DELETE FROM matches WHERE id=?"
query_parm = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_parm)
connection.commit()
logger.debug(replace_char_list(query, query_parm))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'match deleted')
return True
except Error as e:
logger.error(f'There was an error deleting match from matches: {e}')
return False
def delete_bet(connection: sqlite3.Connection, bet_id: int) -> bool:
logger.debug(f'delete_bet: {bet_id}')
query = "DELETE FROM bets WHERE id=?"
query_param = [bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'bet deleted')
return True
except Error as e:
logger.error(f'There was an error deleting bet from bets: {e}')
return False
def delete_bet_by_match_id(connection: sqlite3.Connection, match_id: int):
logger.debug(f'delete_bet_by_match_id: {match_id}')
bets = get_bet_info_match_all(connection, match_id)
for bet in bets:
add_marbles(connection, bet[3], bet[1])
delete_bet(connection, bet[0])
def add_marbles(connection: sqlite3.Connection, player_id: int, marbles: int) -> bool:
logger.debug(f'add_marbles: {player_id}, {marbles}')
old_marbles = get_marble_count(connection, player_id)
return update_marble_count(connection, player_id, old_marbles + marbles)
def add_player_win(connection: sqlite3.Connection, player_id: int, wins: int) -> bool:
logger.debug(f'add_player_win: {player_id}, {wins}')
player_wins = get_player_wins(connection, player_id)
return update_player_wins(connection, player_id, player_wins+wins)
def add_player_loses(connection: sqlite3.Connection, player_id: int, loses: int) -> bool:
logger.debug(f'add_player_loses: {player_id}, {loses}')
player_loses = get_player_loses(connection, player_id)
return update_player_loses(connection, player_id, player_loses+loses)
def subtract_marbles(connection: sqlite3.Connection, player_id: int, marbles: int) -> bool:
logger.debug(f'subtract_marbles: {player_id}, {marbles}')
old_marbles = get_marble_count(connection, player_id)
if old_marbles - marbles < 0:
return update_marble_count(connection, player_id, 0)
return update_marble_count(connection, player_id, old_marbles - marbles)
def transfer_marbles(connection: sqlite3.Connection, player_id1: int, player_id2: int, marbles: int) -> bool:
logger.debug(f'transfer_marbles: {player_id1}, {player_id2}, {marbles}')
player_marbles1 = get_marble_count(connection, player_id1)
if player_marbles1 < marbles:
return False
if subtract_marbles(connection, player_id1, marbles) and add_marbles(connection, player_id2, marbles):
return True
return False
def is_bet_win(connection: sqlite3.Connection, bet_id: int, winner_id: int) -> bool:
logger.debug(f'is_bet_win: {bet_id}, {winner_id}')
bet_info = get_bet_info(connection, bet_id)
logger.debug(f'bet_info: {bet_info}')
if bet_info[4] == winner_id:
return True
else:
return False | marble_match/database/database_operation.py | import sqlite3
import datetime
import logging
from typing import Union
from sqlite3 import Error
logger = logging.getLogger('marble_match.' + __name__)
def replace_char_list(_old: str, _replacement: list, _replace: str = '?') -> str:
for i in _replacement:
_old = _old.replace(_replace, str(i), 1)
return _old
def create_con(path: str):
logger.debug(f'create_connection: {path}')
try:
con = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
logger.debug(f'connection created: {con}')
return con
except Error as e:
logger.error(f'Failed to create connection: {e}')
raise e
def create_user(connection: sqlite3.Connection, player_id: Union[int, None],
uuid: int, nickname: str, marbles: int, server_id: int,
wins: int = 0, loses: int = 0) -> int:
logger.debug(f'create_user: {player_id}, {uuid}, {nickname}, {marbles}, {server_id}, {wins}, {loses}')
query = "INSERT INTO users VALUES (?, ?, ?, ?, ?, ?, ?)"
query_param = [player_id, uuid, nickname, marbles, server_id, wins, loses]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a user into users: {e}')
return 0
# TODO Update references to new paramaters
def create_match(connection: sqlite3.Connection, match_id: Union[int, None], amount: int,
participant1: int, participant2: int, active: int = 0, accepted: int = 0,
game: str = 'melee', format: str = 'Bo3') -> int:
logger.debug(f'create_match: {match_id}, {amount}, {participant1}, {participant2}, {active}, {accepted},'
f'{game}, {format}')
query = "INSERT INTO matches VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
query_param = [match_id, amount, active, participant1, participant2, accepted, game, format]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a match into matches: {e}')
return 0
def create_bet(connection: sqlite3.Connection, bet_id: Union[int, None], amount: int, match_id: int, better_id: int,
participant1: int) -> int:
logger.debug(f'create_bet: {bet_id}, {amount}, {match_id}, {better_id}, {participant1}')
query = "INSERT INTO bets VALUES (?, ?, ?, ?, ?)"
query_param = [bet_id, amount, match_id, better_id, participant1]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
subtract_marbles(connection, better_id, amount)
logger.debug(f'subtract_marbles called')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a bet into bets: {e}')
return 0
# TODO Update references with new paramaters
def create_match_history(connection: sqlite3.Connection, match_id: Union[int, None], amount: int,
participant1: int, participant2: int, winner_id: int,
time: datetime.datetime = datetime.datetime.utcnow(),
game: str = 'melee', format: str = 'Bo3') -> int:
logger.debug(f'create_match_history: {match_id}, {amount}, {participant1}, {participant2}, {winner_id}, {time},'
f'{game}, {format}')
query = "INSERT INTO matches_history VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
query_param = [match_id, amount, participant1, participant2, winner_id, time, game, format]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a match into match_history: {e}')
return 0
def create_bet_history(connection: sqlite3.Connection, bet_id: Union[int, None], amount: int, match_id: int,
better_id: int, participant1: int, winner_id: int,
time: datetime.datetime = datetime.datetime.utcnow()) -> int:
logger.debug(f'create_bet_history: '
f'{bet_id}, {amount}, {match_id}, {better_id}, {participant1}, {winner_id}, {time}')
query = "INSERT INTO bets_history VALUES (?, ?, ?, ?, ?, ?, ?)"
query_param = [bet_id, amount, match_id, better_id, participant1, winner_id, time]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a bet into bet_history: {e}')
return 0
def create_friendly(connection: sqlite3.Connection, player_id: int,
time: datetime.datetime = datetime.datetime.utcnow()):
logger.debug(f'create_user: {player_id}, {time}')
query = "INSERT INTO friendly VALUES (?, ?)"
query_param = [player_id, time]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return cur.lastrowid
except Error as e:
logger.error(f'There was an error inserting a friendly into friendly: {e}')
return 0
def update_friendly(connection: sqlite3.Connection, player_id: int,
time: datetime.datetime = datetime.datetime.utcnow()) -> bool:
logger.debug(f'update_friendly: {player_id}, {time}')
query = "UPDATE friendly SET last_used = ? WHERE id = ?"
query_param = [time, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating last_used in friendly: {e}')
return False
def update_match_activity(connection: sqlite3.Connection, match_id: int, active: int = 1) -> bool:
logger.debug(f'update_match_activity: {match_id}, {active}')
query = "UPDATE matches SET active = ? WHERE id = ?"
query_param = [active, match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating activity in matches: {e}')
return False
def update_match_accepted(connection: sqlite3.Connection, match_id: int, accepted: int = 1) -> bool:
logger.debug(f'update_match_accepted: {match_id}, {accepted}')
query = "UPDATE matches SET accepted = ? WHERE id = ?"
query_param = [accepted, match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating accepted in matches: {e}')
return False
def update_marble_count(connection: sqlite3.Connection, player_id: int, marbles: int) -> bool:
logger.debug(f'update_marble_count: {player_id}, {marbles}')
query = "UPDATE users SET marbles = ? WHERE id = ?"
query_param = [marbles, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating marbles in user: {e}')
return False
def update_player_nickname(connection: sqlite3.Connection, player_id: int, nickname: str) -> bool:
logger.debug(f'update_player_nickname: {player_id}, {nickname}')
query = "UPDATE users SET nickname = ? WHERE id = ?"
query_param = [nickname, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating wins in user: {e}')
return False
def update_player_wins(connection: sqlite3.Connection, player_id: int, wins: int) -> bool:
logger.debug(f'update_player_wins: {player_id}, {wins}')
query = "UPDATE users SET wins = ? WHERE id = ?"
query_param = [wins, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating wins in user: {e}')
return False
def update_player_loses(connection: sqlite3.Connection, player_id: int, loses: int) -> bool:
logger.debug(f'update_player_loses: {player_id}, {loses}')
query = "UPDATE users SET loses = ? WHERE id = ?"
query_param = [loses, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating loses in user: {e}')
return False
def update_bet(connection: sqlite3.Connection, bet_id: int, player_id: int, amount: int) -> bool:
logger.debug(f'update_bet: {bet_id}, {player_id}, {amount}')
query = "UPDATE bets SET amount=?, participant1=? WHERE id=?"
query_param = [amount, player_id, bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
return True
except Error as e:
logger.error(f'There was an error updating bet in bets: {e}')
return False
def get_friendly_last_used(connection: sqlite3.Connection, player_id: int) -> Union[datetime.datetime, int]:
logger.debug(f'get_friendly_last_used: {player_id}')
query = "SELECT * FROM friendly WHERE id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[1]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting a friendly from friendly: {e}')
return 0
def get_match_info_by_id(connection: sqlite3.Connection, match_id: int) -> Union[tuple, int]:
logger.debug(f'get_match_info_by_id: {match_id}')
query = "SELECT * FROM matches WHERE id=?"
query_param = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting a match from matches: {e}')
return 0
# TODO Update function ot use palyer2_id for simplified functions
def get_match_info_all(connection: sqlite3.Connection, player_id: int):
logger.debug(f'get_match_info_all: {player_id}')
query = "SELECT * FROM matches WHERE participant1=? OR participant2=?"
query_param = [player_id, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all matches from matches: {e}')
return 0
def get_match_history_info(connection: sqlite3.Connection, match_id: int) -> Union[tuple, int]:
logger.debug(f'get_match_history_info: {match_id}')
query = "SELECT * FROM matches_history WHERE id=?"
query_param = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting matches from match_history: {e}')
return 0
# TODO Update function ot use palyer2_id for simplified functions
def get_match_history_info_all(connection: sqlite3.Connection, player_id: int, player2_id: int = None):
logger.debug(f'get_match_history_info_all: {player_id}')
query = "SELECT * FROM matches_history WHERE participant1=? OR participant2=?"
query_param = [player_id, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all matches from match_history: {e}')
return 0
def get_player_id(connection: sqlite3.Connection, uuid: int, server_id: int) -> int:
logger.debug(f'get_player_id: {uuid}, {server_id}')
query = "SELECT * FROM users WHERE uuid=? AND server_id=?"
query_param = [uuid, server_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting player_id in users: {e}')
return 0
def get_player_id_by_username(connection: sqlite3, nickname: str):
logger.debug(f'get_player_id_by_username: {nickname}')
query = "SELECT * FROM users WHERE nickname=?"
query_param = [nickname]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting player_id in users: {e}')
return 0
def get_player_info(connection: sqlite3.Connection, player_id: int):
logger.debug(f'get_player_info: {player_id}')
query = "SELECT * FROM users WHERE id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting player_info in users: {e}')
return 0
def get_player_wins(connection: sqlite3.Connection, player_id: int) -> int:
logger.debug(f'get_player_wins: {player_id}')
return get_player_info(connection, player_id)[4]
def get_player_loses(connection: sqlite3.Connection, player_id: int) -> int:
logger.debug(f'get_player_loses: {player_id}')
return get_player_info(connection, player_id)[5]
def get_player_info_all_by_server(connection: sqlite3.Connection, server_id: int):
logger.debug(f'get_player_info_all_by_server: {server_id}')
query = "SELECT * FROM users WHERE server_id=?"
query_param = [server_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all player_info in users: {e}')
return 0
def get_marble_count(connection: sqlite3.Connection, player_id: int) -> int:
logger.debug(f'get_marble_count: {player_id}')
query = "SELECT * FROM users WHERE id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[3]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting marbles from users: {e}')
return 0
def get_bet_info(connection: sqlite3.Connection, bet_id: int):
logger.debug(f'get_bet_info: {bet_id}')
query = "SELECT * FROM bets WHERE id=?"
query_param = [bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting bet_info from bets: {e}')
return 0
def get_bet_info_all(connection: sqlite3.Connection, player_id: int):
logger.debug(f'get_bet_info_all: {player_id}')
query = "SELECT * FROM bets WHERE better_id=?"
query_param = [player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all bet_info from bets: {e}')
return 0
def get_bet_info_match_all(connection: sqlite3.Connection, match_id: int):
logger.debug(f'get_bet_info_all: {match_id}')
query = "SELECT * FROM bets WHERE match_id=?"
query_param = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all bet_info from bets: {e}')
return 0
def get_bet_history_info(connection: sqlite3.Connection, bet_id: int):
logger.debug(f'get_bet_history_info: {bet_id}')
query = "SELECT * FROM bets_history WHERE id=?"
query_param = [bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting bet_history_info from bets_history: {e}')
return 0
def get_bet_history_info_all(connection: sqlite3.Connection, better_id: int):
logger.debug(f'get_bet_history_info_all: {better_id}')
query = "SELECT * FROM bets_history WHERE better_id=?"
query_param = [better_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchall()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results
else:
return 0
except Error as e:
logger.error(f'There was an error selecting all bet_history_info from bet_history: {e}')
return 0
def find_match_by_player_id(connection: sqlite3.Connection, player_id: int):
logger.debug(f'find_match_by_player_id: {player_id}')
query = "SELECT * FROM matches WHERE participant1=? OR participant2=?"
query_param = [player_id, player_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting match_id from matches: {e}')
return 0
def find_bet(connection: sqlite3.Connection, match_id: int, better_id: int):
logger.debug(f'find_bet: {match_id}, {better_id}')
query = "SELECT * FROM bets WHERE match_id=? AND better_id=?"
query_param = [match_id, better_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
results = cur.fetchone()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'results: {results}')
if results is not None:
return results[0]
else:
return 0
except Error as e:
logger.error(f'There was an error selecting bet from bets: {e}')
return 0
def delete_match(connection: sqlite3.Connection, match_id: int) -> bool:
logger.debug(f'delete_match: {match_id}')
query = "DELETE FROM matches WHERE id=?"
query_parm = [match_id]
try:
cur = connection.cursor()
cur.execute(query, query_parm)
connection.commit()
logger.debug(replace_char_list(query, query_parm))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'match deleted')
return True
except Error as e:
logger.error(f'There was an error deleting match from matches: {e}')
return False
def delete_bet(connection: sqlite3.Connection, bet_id: int) -> bool:
logger.debug(f'delete_bet: {bet_id}')
query = "DELETE FROM bets WHERE id=?"
query_param = [bet_id]
try:
cur = connection.cursor()
cur.execute(query, query_param)
connection.commit()
logger.debug(replace_char_list(query, query_param))
logger.debug(f'lastrowid: {cur.lastrowid}')
logger.debug(f'bet deleted')
return True
except Error as e:
logger.error(f'There was an error deleting bet from bets: {e}')
return False
def delete_bet_by_match_id(connection: sqlite3.Connection, match_id: int):
logger.debug(f'delete_bet_by_match_id: {match_id}')
bets = get_bet_info_match_all(connection, match_id)
for bet in bets:
add_marbles(connection, bet[3], bet[1])
delete_bet(connection, bet[0])
def add_marbles(connection: sqlite3.Connection, player_id: int, marbles: int) -> bool:
logger.debug(f'add_marbles: {player_id}, {marbles}')
old_marbles = get_marble_count(connection, player_id)
return update_marble_count(connection, player_id, old_marbles + marbles)
def add_player_win(connection: sqlite3.Connection, player_id: int, wins: int) -> bool:
logger.debug(f'add_player_win: {player_id}, {wins}')
player_wins = get_player_wins(connection, player_id)
return update_player_wins(connection, player_id, player_wins+wins)
def add_player_loses(connection: sqlite3.Connection, player_id: int, loses: int) -> bool:
logger.debug(f'add_player_loses: {player_id}, {loses}')
player_loses = get_player_loses(connection, player_id)
return update_player_loses(connection, player_id, player_loses+loses)
def subtract_marbles(connection: sqlite3.Connection, player_id: int, marbles: int) -> bool:
logger.debug(f'subtract_marbles: {player_id}, {marbles}')
old_marbles = get_marble_count(connection, player_id)
if old_marbles - marbles < 0:
return update_marble_count(connection, player_id, 0)
return update_marble_count(connection, player_id, old_marbles - marbles)
def transfer_marbles(connection: sqlite3.Connection, player_id1: int, player_id2: int, marbles: int) -> bool:
logger.debug(f'transfer_marbles: {player_id1}, {player_id2}, {marbles}')
player_marbles1 = get_marble_count(connection, player_id1)
if player_marbles1 < marbles:
return False
if subtract_marbles(connection, player_id1, marbles) and add_marbles(connection, player_id2, marbles):
return True
return False
def is_bet_win(connection: sqlite3.Connection, bet_id: int, winner_id: int) -> bool:
logger.debug(f'is_bet_win: {bet_id}, {winner_id}')
bet_info = get_bet_info(connection, bet_id)
logger.debug(f'bet_info: {bet_info}')
if bet_info[4] == winner_id:
return True
else:
return False | 0.389082 | 0.189352 |
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
# ONBOARDING
path('', views.splash, name='splash'),
# location bot
path('location_bot', views.location_bot, name='location_bot'),
path('bot_api', views.bot_api, name='bot_api'),
# email collector api
path('email_collector', views.email_collector, name='email_collector'),
path('brand_collector', views.brand_collector, name='brand_collector'),
path('signin', views.signin, name='signin'),
path('signup', views.splash, name='signup'),
path('signup_two', views.signup, name='signup_two'),
path('contact_us', views.contact_us, name='contact_us'),
path('activate_account', views.activate_account, name='activate_account'),
path('forgot_password', views.forgot_password, name='forgot_password'),
path('send_password_link', views.send_password_link, name='send_password_link'),
path('faq', views.faq, name='faq'),
path('signout', views.signout, name='signout'),
path('home', views.dashboard, name='home'),
path('hub_home', views.hub_dashboard, name='hub_home'),
path('wallet', views.wallet, name='wallet'),
path('profile', views.profile, name='profile'),
path('notification', views.notification, name='notification'),
path('scanner', views.scanner, name='scanner'),
path('activate/<otp_code>', views.activate, name='activate'),
path('change_password/<uid>', views.change_password, name='change_password'),
# path('test', views.test, name='test'),
# APIS
path('signup_api', views.signup_api, name='signup_api'),
path('hub_confirmation', views.hub_confirmation, name='hub_confirmation'),
path('confirmation_api', views.confirmation_api, name='confirmation_api'),
path('signin_api', views.signin_api, name='signin_api'),
path('update_account', views.update_account, name='update_account'),
path('contact_api', views.contact_api, name='contact_api'),
path('update_password', views.update_password, name='update_password'),
path('update_bio', views.update_bio, name='update_bio'),
path('send_activation_link_api', views.send_activation_link_api, name='send_activation_link_api'),
path('change_password_api', views.change_password_api, name='change_password_api'),
# path('send_coins', views.send_coins, name='send_coins'), # send coins with filter for existing recycled items
path('send_coins', views.send_coins_no_filter, name='send_coins'), # send coins with no filter
] | mvp/urls.py | from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
# ONBOARDING
path('', views.splash, name='splash'),
# location bot
path('location_bot', views.location_bot, name='location_bot'),
path('bot_api', views.bot_api, name='bot_api'),
# email collector api
path('email_collector', views.email_collector, name='email_collector'),
path('brand_collector', views.brand_collector, name='brand_collector'),
path('signin', views.signin, name='signin'),
path('signup', views.splash, name='signup'),
path('signup_two', views.signup, name='signup_two'),
path('contact_us', views.contact_us, name='contact_us'),
path('activate_account', views.activate_account, name='activate_account'),
path('forgot_password', views.forgot_password, name='forgot_password'),
path('send_password_link', views.send_password_link, name='send_password_link'),
path('faq', views.faq, name='faq'),
path('signout', views.signout, name='signout'),
path('home', views.dashboard, name='home'),
path('hub_home', views.hub_dashboard, name='hub_home'),
path('wallet', views.wallet, name='wallet'),
path('profile', views.profile, name='profile'),
path('notification', views.notification, name='notification'),
path('scanner', views.scanner, name='scanner'),
path('activate/<otp_code>', views.activate, name='activate'),
path('change_password/<uid>', views.change_password, name='change_password'),
# path('test', views.test, name='test'),
# APIS
path('signup_api', views.signup_api, name='signup_api'),
path('hub_confirmation', views.hub_confirmation, name='hub_confirmation'),
path('confirmation_api', views.confirmation_api, name='confirmation_api'),
path('signin_api', views.signin_api, name='signin_api'),
path('update_account', views.update_account, name='update_account'),
path('contact_api', views.contact_api, name='contact_api'),
path('update_password', views.update_password, name='update_password'),
path('update_bio', views.update_bio, name='update_bio'),
path('send_activation_link_api', views.send_activation_link_api, name='send_activation_link_api'),
path('change_password_api', views.change_password_api, name='change_password_api'),
# path('send_coins', views.send_coins, name='send_coins'), # send coins with filter for existing recycled items
path('send_coins', views.send_coins_no_filter, name='send_coins'), # send coins with no filter
] | 0.315525 | 0.080864 |
import os
import re
from glob import glob
from typing import Callable, Iterable
from uuid import uuid4
from dotenv import load_dotenv
from orgparse import loads
from orgparse.node import OrgBaseNode
load_dotenv()
ORG_DIRECTORY = os.getenv("ORG_DIRECTORY")
ORGZLY_CUSTOM_ID_FILE = os.getenv("ORGZLY_CUSTOM_ID_FILE")
# Global variables specifying what whe mean when we say directorypath, orgfile, linkname, ...
# ext4 allows every character except / and NULL to be part of a directory or filename
directorypath_regex = r"([^\[\]/]*/)+"
orgfile_regex = r"[^\[\]/]*\.org"
linkname_regex = r"[^\[\]]+"
linksearch_regex = r"[\*#]" + linkname_regex
generic_orglink_regex = r"[^\[\]]+"
def recursive_filter(condition: Callable[[OrgBaseNode], bool], root: Iterable[OrgBaseNode]) -> Iterable[OrgBaseNode]:
"""recursively trasvese all possible nodes from root and return only those for which
condition returns True
Args:
condition: condition which evaluates to true
nodes: nodes to be traversed
Yields each node with matches the condition
"""
for node in root:
if condition(node):
yield node
if node.children:
yield from recursive_filter(condition, node.children)
def get_children(parent: OrgBaseNode) -> Iterable[OrgBaseNode]:
if parent.children:
for node in parent.children:
yield node
if node.children:
yield from get_children(node)
# This dictionary maps each custom_id to their id (either existent id or newly generated id)
custom_to_id = {}
def add_id(node: OrgBaseNode) -> str:
"""add id if not exists to the str representation of an OrgBaseNode, using custom_to_id dict"""
if (node.properties.get("custom_id") in custom_to_id.keys()) and (
set(node.properties.keys()).intersection(set(("id", "ID", "iD", "Id"))) == set()
):
return re.sub(
r"(:custom_id: " + node.properties["custom_id"] + r")",
r"\1\n:ID: " + custom_to_id[node.properties["custom_id"]],
str(node),
)
else:
return str(node)
def substitute_customid_links(content: str) -> str:
# Substitute simple links [[#link]]
content = re.sub(r"\[\[#" + custom + r"\]\]", f"[[id:{uuid}][{custom}]]", content)
# Substitute links with names [[#link][name]]
content = re.sub(
r"\[\[#" + custom + r"\]\[(" + linkname_regex + r")\]\]",
"[[id:" + uuid + r"][\1]]",
content,
)
return content
def add_orgzly_flat_links(content: str) -> str:
"""Strips the directories out of file links to work with orgzly, flattening the directory structure in one big
directory so that orgzly can work with it
Also retains the previous links with \g<0> so that everything works as normal in emacs"""
# Substitute simple links [[file:folder1/folder2/my.org]] -> [[file:my.org]]
# Substitute links with names [[file:folder1/folder2/my.org][name]] ->[[file:my.org][name]]
content = re.sub(
r"\[\[file:"
+ directorypath_regex
+ r"("
+ orgfile_regex
+ r")\]((:?\["
+ linkname_regex
+ r"\])?)\]"
+ r"(?! \[\[file:\2\]\3\])", # Do not replace if already replaced
r"\g<0> [[file:\2]\3]",
content,
)
# Substitute simple links [[file:~/Documents/doc.pdf]] -> [[file:Documents/doc.pdf]]
# Substitute links with names [[file:~/Documents/doc.pdf][Name]] -> [[file:Documents/doc.pdf][Name]]
content = re.sub(
r"\[\[file:~/"
+ r"("
+ generic_orglink_regex
+ r")\]((:?\["
+ linkname_regex
+ r"\])?)\]"
+ r"(?! \[\[file:\1\]\2\])", # Do not replace if already replaced
r"\g<0> [[file:\1]\2]",
content,
)
return content
# First pass, create ID if not exists for each heading with custom_id
for path in glob(f"{ORG_DIRECTORY}/**/*.org", recursive=True):
with open(path, "r") as f:
root = loads(f.read())
custom_id = recursive_filter(lambda x: x.properties.get("custom_id") is not None, get_children(root))
for item in custom_id:
uuid = item.properties.get("ID", str(uuid4())) # Create id if not exists only
custom_to_id.update({item.properties["custom_id"]: uuid})
if ORGZLY_CUSTOM_ID_FILE is not None:
with open(ORGZLY_CUSTOM_ID_FILE, "w") as f:
for custom_id, regular_id in custom_to_id.items():
f.write(f"* [[id:{regular_id}][{custom_id}]]\n")
result = str(root[0]) + "\n" + "\n".join([add_orgzly_flat_links(add_id(x)) for x in root[1:]])
with open(path, "w") as f:
# Overwrite content
f.seek(0)
f.write(result)
# Second pass, substitute links with the custom_to_id mapping
for path in glob(f"{ORG_DIRECTORY}/**/*.org", recursive=True):
with open(path, "r") as f:
content = f.read()
for custom, uuid in custom_to_id.items():
content = substitute_customid_links(content) # TODO Try to do it node by node, seems faster
with open(path, "w") as f:
# Overwrite content
f.seek(0)
f.write(content) | link_translation.py | import os
import re
from glob import glob
from typing import Callable, Iterable
from uuid import uuid4
from dotenv import load_dotenv
from orgparse import loads
from orgparse.node import OrgBaseNode
load_dotenv()
ORG_DIRECTORY = os.getenv("ORG_DIRECTORY")
ORGZLY_CUSTOM_ID_FILE = os.getenv("ORGZLY_CUSTOM_ID_FILE")
# Global variables specifying what whe mean when we say directorypath, orgfile, linkname, ...
# ext4 allows every character except / and NULL to be part of a directory or filename
directorypath_regex = r"([^\[\]/]*/)+"
orgfile_regex = r"[^\[\]/]*\.org"
linkname_regex = r"[^\[\]]+"
linksearch_regex = r"[\*#]" + linkname_regex
generic_orglink_regex = r"[^\[\]]+"
def recursive_filter(condition: Callable[[OrgBaseNode], bool], root: Iterable[OrgBaseNode]) -> Iterable[OrgBaseNode]:
"""recursively trasvese all possible nodes from root and return only those for which
condition returns True
Args:
condition: condition which evaluates to true
nodes: nodes to be traversed
Yields each node with matches the condition
"""
for node in root:
if condition(node):
yield node
if node.children:
yield from recursive_filter(condition, node.children)
def get_children(parent: OrgBaseNode) -> Iterable[OrgBaseNode]:
if parent.children:
for node in parent.children:
yield node
if node.children:
yield from get_children(node)
# This dictionary maps each custom_id to their id (either existent id or newly generated id)
custom_to_id = {}
def add_id(node: OrgBaseNode) -> str:
"""add id if not exists to the str representation of an OrgBaseNode, using custom_to_id dict"""
if (node.properties.get("custom_id") in custom_to_id.keys()) and (
set(node.properties.keys()).intersection(set(("id", "ID", "iD", "Id"))) == set()
):
return re.sub(
r"(:custom_id: " + node.properties["custom_id"] + r")",
r"\1\n:ID: " + custom_to_id[node.properties["custom_id"]],
str(node),
)
else:
return str(node)
def substitute_customid_links(content: str) -> str:
# Substitute simple links [[#link]]
content = re.sub(r"\[\[#" + custom + r"\]\]", f"[[id:{uuid}][{custom}]]", content)
# Substitute links with names [[#link][name]]
content = re.sub(
r"\[\[#" + custom + r"\]\[(" + linkname_regex + r")\]\]",
"[[id:" + uuid + r"][\1]]",
content,
)
return content
def add_orgzly_flat_links(content: str) -> str:
"""Strips the directories out of file links to work with orgzly, flattening the directory structure in one big
directory so that orgzly can work with it
Also retains the previous links with \g<0> so that everything works as normal in emacs"""
# Substitute simple links [[file:folder1/folder2/my.org]] -> [[file:my.org]]
# Substitute links with names [[file:folder1/folder2/my.org][name]] ->[[file:my.org][name]]
content = re.sub(
r"\[\[file:"
+ directorypath_regex
+ r"("
+ orgfile_regex
+ r")\]((:?\["
+ linkname_regex
+ r"\])?)\]"
+ r"(?! \[\[file:\2\]\3\])", # Do not replace if already replaced
r"\g<0> [[file:\2]\3]",
content,
)
# Substitute simple links [[file:~/Documents/doc.pdf]] -> [[file:Documents/doc.pdf]]
# Substitute links with names [[file:~/Documents/doc.pdf][Name]] -> [[file:Documents/doc.pdf][Name]]
content = re.sub(
r"\[\[file:~/"
+ r"("
+ generic_orglink_regex
+ r")\]((:?\["
+ linkname_regex
+ r"\])?)\]"
+ r"(?! \[\[file:\1\]\2\])", # Do not replace if already replaced
r"\g<0> [[file:\1]\2]",
content,
)
return content
# First pass, create ID if not exists for each heading with custom_id
for path in glob(f"{ORG_DIRECTORY}/**/*.org", recursive=True):
with open(path, "r") as f:
root = loads(f.read())
custom_id = recursive_filter(lambda x: x.properties.get("custom_id") is not None, get_children(root))
for item in custom_id:
uuid = item.properties.get("ID", str(uuid4())) # Create id if not exists only
custom_to_id.update({item.properties["custom_id"]: uuid})
if ORGZLY_CUSTOM_ID_FILE is not None:
with open(ORGZLY_CUSTOM_ID_FILE, "w") as f:
for custom_id, regular_id in custom_to_id.items():
f.write(f"* [[id:{regular_id}][{custom_id}]]\n")
result = str(root[0]) + "\n" + "\n".join([add_orgzly_flat_links(add_id(x)) for x in root[1:]])
with open(path, "w") as f:
# Overwrite content
f.seek(0)
f.write(result)
# Second pass, substitute links with the custom_to_id mapping
for path in glob(f"{ORG_DIRECTORY}/**/*.org", recursive=True):
with open(path, "r") as f:
content = f.read()
for custom, uuid in custom_to_id.items():
content = substitute_customid_links(content) # TODO Try to do it node by node, seems faster
with open(path, "w") as f:
# Overwrite content
f.seek(0)
f.write(content) | 0.655226 | 0.192122 |
__author__ = "ivallesp"
import os
import json
def _norm_path(path):
"""
Decorator function intended for using it to normalize a the output of a path retrieval function. Useful for
fixing the slash/backslash windows cases.
"""
def normalize_path(*args):
return os.path.normpath(path(*args))
return normalize_path
def _assure_path_exists(path):
"""
Decorator function intended for checking the existence of a the output of a path retrieval function. Useful for
fixing the slash/backslash windows cases.
"""
def assure_exists(*args):
assert os.path.exists(path(*args))
return path(*args)
return assure_exists
def _is_output_path(path):
"""
Decorator function intended for grouping the functions which are applied over the output of an output path retrieval
function
"""
@_norm_path
@_assure_path_exists
def check_existence_or_create_it(*args):
if not os.path.exists(path(*args)):
"Path didn't exist... creating it: {}".format(path(*args))
os.makedirs(path(*args))
return path(*args)
return check_existence_or_create_it
def _is_input_path(path):
"""
Decorator function intended for grouping the functions which are applied over the output of an input path retrieval
function
"""
@_norm_path
@_assure_path_exists
def check_existence(*args):
return path(*args)
return check_existence
@_is_input_path
def get_project_path():
"""
Function used for retrieving the path where the project is located
:return: the checked path (str|unicode)
"""
with open("./settings.json") as f:
settings = json.load(f)
return settings["project_path"]
@_is_input_path
def get_data_path():
with open("./settings.json") as f:
settings = json.load(f)
return settings["data_path"]
@_is_input_path
def get_raw_data_path():
return os.path.join(get_data_path(), "raw")
@_is_input_path
def get_numerai_secrets_path():
return os.path.join(get_project_path(), "NumerAPI", "secrets.json")
@_is_output_path
def get_submissions_path():
return os.path.join(get_data_path(), "submissions")
@_is_output_path
def get_logs_path():
return os.path.join(get_project_path(), "logs")
@_is_output_path
def get_reports_path():
return os.path.join(get_data_path(), "reports")
@_is_output_path
def get_raw_data_version_path(version):
"""
Retrieves the path where the raw data is saved.
:param version: version of the data which is intended to be loaded. If not specified, the last version name is
retrieved from the settings.json file and used to build the path (str|unicode|None).
:return: the path of the data requested (str|unicode).
"""
from utilities import get_last_data_version
if not version:
version = get_last_data_version()
return os.path.join(get_raw_data_path(), version,"numerai_datasets")
@_is_output_path
def get_reports_version_path(version):
"""
Retrieves the path where the reports are going to be saved
:param version: version of the data which is intended to be saved. If not specified, the last version name is
retrieved from the settings.json file and used to build the path (str|unicode|None).
:return: the path of the data requested (str|unicode).
"""
from utilities import get_last_data_version
if not version:
version = get_last_data_version()
return os.path.join(get_reports_path(), version)
@_is_output_path
def get_submissions_version_path(version=None):
"""
Retrieves the path where the submissions are going to be saved
:param version: version of the data which is intended to be saved. If not specified, the last version name is
retrieved from the settings.json file and used to build the path (str|unicode|None).
:return: the path of the data requested (str|unicode).
"""
from utilities import get_last_data_version
if not version:
version = get_last_data_version()
return os.path.join(get_submissions_path(), version)
def get_submission_filepath(version, alias):
"""
Retrieves the path of the submissios given by a version and an alias.
:param version: version of the data which is intended to be accessed (str|unicode).
:param alias: version of the submission which is intended to be accessed (str|unicode).
:return: the path of the submission file (str|unicode)
"""
path = get_submissions_version_path(version)
path = os.path.join(path, "submission_{0}.csv".format(alias))
return path | src/common_paths.py | __author__ = "ivallesp"
import os
import json
def _norm_path(path):
"""
Decorator function intended for using it to normalize a the output of a path retrieval function. Useful for
fixing the slash/backslash windows cases.
"""
def normalize_path(*args):
return os.path.normpath(path(*args))
return normalize_path
def _assure_path_exists(path):
"""
Decorator function intended for checking the existence of a the output of a path retrieval function. Useful for
fixing the slash/backslash windows cases.
"""
def assure_exists(*args):
assert os.path.exists(path(*args))
return path(*args)
return assure_exists
def _is_output_path(path):
"""
Decorator function intended for grouping the functions which are applied over the output of an output path retrieval
function
"""
@_norm_path
@_assure_path_exists
def check_existence_or_create_it(*args):
if not os.path.exists(path(*args)):
"Path didn't exist... creating it: {}".format(path(*args))
os.makedirs(path(*args))
return path(*args)
return check_existence_or_create_it
def _is_input_path(path):
"""
Decorator function intended for grouping the functions which are applied over the output of an input path retrieval
function
"""
@_norm_path
@_assure_path_exists
def check_existence(*args):
return path(*args)
return check_existence
@_is_input_path
def get_project_path():
"""
Function used for retrieving the path where the project is located
:return: the checked path (str|unicode)
"""
with open("./settings.json") as f:
settings = json.load(f)
return settings["project_path"]
@_is_input_path
def get_data_path():
with open("./settings.json") as f:
settings = json.load(f)
return settings["data_path"]
@_is_input_path
def get_raw_data_path():
return os.path.join(get_data_path(), "raw")
@_is_input_path
def get_numerai_secrets_path():
return os.path.join(get_project_path(), "NumerAPI", "secrets.json")
@_is_output_path
def get_submissions_path():
return os.path.join(get_data_path(), "submissions")
@_is_output_path
def get_logs_path():
return os.path.join(get_project_path(), "logs")
@_is_output_path
def get_reports_path():
return os.path.join(get_data_path(), "reports")
@_is_output_path
def get_raw_data_version_path(version):
"""
Retrieves the path where the raw data is saved.
:param version: version of the data which is intended to be loaded. If not specified, the last version name is
retrieved from the settings.json file and used to build the path (str|unicode|None).
:return: the path of the data requested (str|unicode).
"""
from utilities import get_last_data_version
if not version:
version = get_last_data_version()
return os.path.join(get_raw_data_path(), version,"numerai_datasets")
@_is_output_path
def get_reports_version_path(version):
"""
Retrieves the path where the reports are going to be saved
:param version: version of the data which is intended to be saved. If not specified, the last version name is
retrieved from the settings.json file and used to build the path (str|unicode|None).
:return: the path of the data requested (str|unicode).
"""
from utilities import get_last_data_version
if not version:
version = get_last_data_version()
return os.path.join(get_reports_path(), version)
@_is_output_path
def get_submissions_version_path(version=None):
"""
Retrieves the path where the submissions are going to be saved
:param version: version of the data which is intended to be saved. If not specified, the last version name is
retrieved from the settings.json file and used to build the path (str|unicode|None).
:return: the path of the data requested (str|unicode).
"""
from utilities import get_last_data_version
if not version:
version = get_last_data_version()
return os.path.join(get_submissions_path(), version)
def get_submission_filepath(version, alias):
"""
Retrieves the path of the submissios given by a version and an alias.
:param version: version of the data which is intended to be accessed (str|unicode).
:param alias: version of the submission which is intended to be accessed (str|unicode).
:return: the path of the submission file (str|unicode)
"""
path = get_submissions_version_path(version)
path = os.path.join(path, "submission_{0}.csv".format(alias))
return path | 0.624408 | 0.44734 |
from const import RACE_ID, HUM
from game import INF
import numpy as np
def evaluate_inf(board, race, race_ennemi):
'''heuristic function'''
sum_ = np.sum(board.grid, axis=(0, 1))
if sum_[RACE_ID[race]] == 0:
return -INF
elif sum_[RACE_ID[race_ennemi]] == 0:
return INF
else:
# evite la dispersion
dispersion = int(np.sum(board.grid[:, :, RACE_ID[race]] > 0))
return 50 * (int(sum_[RACE_ID[race]]) - int(sum_[RACE_ID[race_ennemi]])) - dispersion
def evaluate_disp(board, race, race_ennemi):
'''heuristic function'''
sum_ = np.sum(board.grid, axis=(0, 1))
# evite la dispersion
dispersion = int(np.sum(board.grid[:, :, RACE_ID[race]] > 0))
return 50 * (int(sum_[RACE_ID[race]]) - int(sum_[RACE_ID[race_ennemi]])) - dispersion
def evaluate_fred(board, race, race_ennemi):
ally_squares = []
enemy_squares = []
human_squares = []
sum_ = np.sum(board.grid, axis=(0, 1))
H = int(sum_[RACE_ID[race]]) - int(sum_[RACE_ID[race_ennemi]])
for square in board.enumerate_squares():
if board.grid[square][RACE_ID[race]] > 0:
ally_squares += [square]
elif board.grid[square][RACE_ID[race_ennemi]] > 0:
enemy_squares += [square]
elif board.grid[square][RACE_ID[HUM]] > 0:
human_squares += [square]
for square_ally in ally_squares:
for square_hum in human_squares:
dist = L_inf_dist(square_ally, square_hum)
H += (0.1**dist) * expected_outcome_attack_humans(
board.grid[square_ally][RACE_ID[race]],
board.grid[square_hum][RACE_ID[HUM]])
for square_ally in ally_squares:
for square_enemy in enemy_squares:
dist = L_inf_dist(square_ally, square_enemy)
H += (0.1**dist) * expected_outcome_attack_player(
board.grid[square_ally][RACE_ID[race]],
board.grid[square_enemy][RACE_ID[race_ennemi]])
H -= (0.1**dist) * expected_outcome_attack_player(
board.grid[square_enemy][RACE_ID[race_ennemi]],
board.grid[square_ally][RACE_ID[race]])
for square_enemy in enemy_squares:
for square_hum in human_squares:
dist = L_inf_dist(square_enemy, square_hum)
H += (0.1**dist) * expected_outcome_attack_humans(
board.grid[square_enemy][RACE_ID[race_ennemi]],
board.grid[square_hum][RACE_ID[HUM]])
return H
def expected_outcome_attack_humans(attacker, defender):
'''Returns the average increase or decrease in warriors after combat'''
attackingNumberOfPlayer = int(attacker)
defendingNumberOfHumans = int(defender)
if(attackingNumberOfPlayer >= 1.5 * defendingNumberOfHumans):
return defendingNumberOfHumans
elif(attackingNumberOfPlayer >= defendingNumberOfHumans):
P = attackingNumberOfPlayer / defendingNumberOfHumans - 0.5
else:
P = attackingNumberOfPlayer / (2 * defendingNumberOfHumans)
# We lose units with probability 1-P and gain defending units with prob. P
winCase = (P - 1) * attackingNumberOfPlayer + P * defendingNumberOfHumans
# We lose our units
losingCase = -attackingNumberOfPlayer
return max(0,P * winCase + (1 - P) * losingCase)
def expected_outcome_attack_player(attacker, defender):
attackingNumberOfPlayer = int(attacker)
defendingNumberOfPlayer = int(defender)
if(attackingNumberOfPlayer >= 1.5 * defendingNumberOfPlayer):
P = 1
elif(attackingNumberOfPlayer >= defendingNumberOfPlayer):
P = attackingNumberOfPlayer / defendingNumberOfPlayer - 0.5
else:
P = attackingNumberOfPlayer / (2 * defendingNumberOfPlayer)
# We don't gain the defending units but the enemy loses it : same thing !
winCase = (P - 1) * attackingNumberOfPlayer + defendingNumberOfPlayer
# We lose all our units and the opponents loses them with probability P
losingCase = -attackingNumberOfPlayer + P * defendingNumberOfPlayer
return P * winCase + (1 - P) * losingCase
def L_inf_dist(square1, square2):
return max(square1[0] - square2[0], square1[1] - square2[1],
square2[0] - square1[0], square2[1] - square1[1]) | src/evaluation.py | from const import RACE_ID, HUM
from game import INF
import numpy as np
def evaluate_inf(board, race, race_ennemi):
'''heuristic function'''
sum_ = np.sum(board.grid, axis=(0, 1))
if sum_[RACE_ID[race]] == 0:
return -INF
elif sum_[RACE_ID[race_ennemi]] == 0:
return INF
else:
# evite la dispersion
dispersion = int(np.sum(board.grid[:, :, RACE_ID[race]] > 0))
return 50 * (int(sum_[RACE_ID[race]]) - int(sum_[RACE_ID[race_ennemi]])) - dispersion
def evaluate_disp(board, race, race_ennemi):
'''heuristic function'''
sum_ = np.sum(board.grid, axis=(0, 1))
# evite la dispersion
dispersion = int(np.sum(board.grid[:, :, RACE_ID[race]] > 0))
return 50 * (int(sum_[RACE_ID[race]]) - int(sum_[RACE_ID[race_ennemi]])) - dispersion
def evaluate_fred(board, race, race_ennemi):
ally_squares = []
enemy_squares = []
human_squares = []
sum_ = np.sum(board.grid, axis=(0, 1))
H = int(sum_[RACE_ID[race]]) - int(sum_[RACE_ID[race_ennemi]])
for square in board.enumerate_squares():
if board.grid[square][RACE_ID[race]] > 0:
ally_squares += [square]
elif board.grid[square][RACE_ID[race_ennemi]] > 0:
enemy_squares += [square]
elif board.grid[square][RACE_ID[HUM]] > 0:
human_squares += [square]
for square_ally in ally_squares:
for square_hum in human_squares:
dist = L_inf_dist(square_ally, square_hum)
H += (0.1**dist) * expected_outcome_attack_humans(
board.grid[square_ally][RACE_ID[race]],
board.grid[square_hum][RACE_ID[HUM]])
for square_ally in ally_squares:
for square_enemy in enemy_squares:
dist = L_inf_dist(square_ally, square_enemy)
H += (0.1**dist) * expected_outcome_attack_player(
board.grid[square_ally][RACE_ID[race]],
board.grid[square_enemy][RACE_ID[race_ennemi]])
H -= (0.1**dist) * expected_outcome_attack_player(
board.grid[square_enemy][RACE_ID[race_ennemi]],
board.grid[square_ally][RACE_ID[race]])
for square_enemy in enemy_squares:
for square_hum in human_squares:
dist = L_inf_dist(square_enemy, square_hum)
H += (0.1**dist) * expected_outcome_attack_humans(
board.grid[square_enemy][RACE_ID[race_ennemi]],
board.grid[square_hum][RACE_ID[HUM]])
return H
def expected_outcome_attack_humans(attacker, defender):
'''Returns the average increase or decrease in warriors after combat'''
attackingNumberOfPlayer = int(attacker)
defendingNumberOfHumans = int(defender)
if(attackingNumberOfPlayer >= 1.5 * defendingNumberOfHumans):
return defendingNumberOfHumans
elif(attackingNumberOfPlayer >= defendingNumberOfHumans):
P = attackingNumberOfPlayer / defendingNumberOfHumans - 0.5
else:
P = attackingNumberOfPlayer / (2 * defendingNumberOfHumans)
# We lose units with probability 1-P and gain defending units with prob. P
winCase = (P - 1) * attackingNumberOfPlayer + P * defendingNumberOfHumans
# We lose our units
losingCase = -attackingNumberOfPlayer
return max(0,P * winCase + (1 - P) * losingCase)
def expected_outcome_attack_player(attacker, defender):
attackingNumberOfPlayer = int(attacker)
defendingNumberOfPlayer = int(defender)
if(attackingNumberOfPlayer >= 1.5 * defendingNumberOfPlayer):
P = 1
elif(attackingNumberOfPlayer >= defendingNumberOfPlayer):
P = attackingNumberOfPlayer / defendingNumberOfPlayer - 0.5
else:
P = attackingNumberOfPlayer / (2 * defendingNumberOfPlayer)
# We don't gain the defending units but the enemy loses it : same thing !
winCase = (P - 1) * attackingNumberOfPlayer + defendingNumberOfPlayer
# We lose all our units and the opponents loses them with probability P
losingCase = -attackingNumberOfPlayer + P * defendingNumberOfPlayer
return P * winCase + (1 - P) * losingCase
def L_inf_dist(square1, square2):
return max(square1[0] - square2[0], square1[1] - square2[1],
square2[0] - square1[0], square2[1] - square1[1]) | 0.640186 | 0.605158 |
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"first_name",
models.CharField(max_length=50, verbose_name="first name"),
),
(
"last_name",
models.CharField(max_length=50, verbose_name="last name"),
),
("bio", models.TextField(verbose_name="bio")),
("instagram", models.URLField(verbose_name="instagram")),
(
"email",
models.EmailField(
blank=True,
max_length=255,
null=True,
unique=True,
verbose_name="email address",
),
),
(
"birthday_date",
models.DateField(
blank=True, null=True, verbose_name="birthday date"
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"instagram_user_id",
models.CharField(
blank=True,
max_length=20,
null=True,
verbose_name="instagram user id",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
},
),
migrations.CreateModel(
name="Movie",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=250, verbose_name="title")),
("netflix_url", models.URLField(verbose_name="url")),
("image", models.URLField(verbose_name="image")),
("justwatch_id", models.IntegerField(verbose_name="JustWatch ID")),
("tmdb_id", models.IntegerField(verbose_name="TMDB ID")),
("netflix_id", models.IntegerField(verbose_name="Netflix ID")),
(
"imdb_score",
models.DecimalField(
blank=True,
decimal_places=1,
max_digits=3,
null=True,
verbose_name="IMDB Score",
),
),
(
"tmdb_score",
models.DecimalField(
blank=True,
decimal_places=1,
max_digits=3,
null=True,
verbose_name="TMDB Score",
),
),
(
"description",
models.TextField(blank=True, null=True, verbose_name="description"),
),
("active", models.BooleanField(default=True)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Picture",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("url", models.URLField(max_length=300)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pictures",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ProfileLike",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("like", models.BooleanField(verbose_name="like")),
(
"from_user",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="chosen_likes",
to=settings.AUTH_USER_MODEL,
),
),
(
"movie",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="profile_likes",
to="dateflix_api.movie",
),
),
(
"to_user",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="received_likes",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"unique_together": {("movie", "from_user", "to_user")},
},
),
migrations.CreateModel(
name="Like",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("like", models.BooleanField(verbose_name="like")),
(
"movie",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="likes",
to="dateflix_api.movie",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="likes",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"unique_together": {("movie", "user")},
},
),
] | dateflix_api/migrations/0001_initial.py |
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"first_name",
models.CharField(max_length=50, verbose_name="first name"),
),
(
"last_name",
models.CharField(max_length=50, verbose_name="last name"),
),
("bio", models.TextField(verbose_name="bio")),
("instagram", models.URLField(verbose_name="instagram")),
(
"email",
models.EmailField(
blank=True,
max_length=255,
null=True,
unique=True,
verbose_name="email address",
),
),
(
"birthday_date",
models.DateField(
blank=True, null=True, verbose_name="birthday date"
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"instagram_user_id",
models.CharField(
blank=True,
max_length=20,
null=True,
verbose_name="instagram user id",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
},
),
migrations.CreateModel(
name="Movie",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=250, verbose_name="title")),
("netflix_url", models.URLField(verbose_name="url")),
("image", models.URLField(verbose_name="image")),
("justwatch_id", models.IntegerField(verbose_name="JustWatch ID")),
("tmdb_id", models.IntegerField(verbose_name="TMDB ID")),
("netflix_id", models.IntegerField(verbose_name="Netflix ID")),
(
"imdb_score",
models.DecimalField(
blank=True,
decimal_places=1,
max_digits=3,
null=True,
verbose_name="IMDB Score",
),
),
(
"tmdb_score",
models.DecimalField(
blank=True,
decimal_places=1,
max_digits=3,
null=True,
verbose_name="TMDB Score",
),
),
(
"description",
models.TextField(blank=True, null=True, verbose_name="description"),
),
("active", models.BooleanField(default=True)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Picture",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("url", models.URLField(max_length=300)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="pictures",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ProfileLike",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("like", models.BooleanField(verbose_name="like")),
(
"from_user",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="chosen_likes",
to=settings.AUTH_USER_MODEL,
),
),
(
"movie",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="profile_likes",
to="dateflix_api.movie",
),
),
(
"to_user",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="received_likes",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"unique_together": {("movie", "from_user", "to_user")},
},
),
migrations.CreateModel(
name="Like",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
unique=True,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("like", models.BooleanField(verbose_name="like")),
(
"movie",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="likes",
to="dateflix_api.movie",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="likes",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"unique_together": {("movie", "user")},
},
),
] | 0.49292 | 0.157428 |
from collections import deque
def bfs(graph, start = None):
if(graph):
if(not start):
start = next(iter(graph))
explored_nodes = {}
dq = deque([start])
while(dq):
current_node = dq.popleft()
print(current_node)
if(current_node in explored_nodes):
print('Error, node is revisited!')
break
explored_nodes[current_node] = 0
for next_node in graph[current_node]:
if(next_node not in explored_nodes):
dq.append(next_node)
explored_nodes[current_node] = 1
# Note: call stack method for cycles might give TLE error, when cycles are sub-cycles of
# large cycles and we revisit each cyle backwards; in those cases use rank method, or dont
# traverse the cycle through stack, just make proper use of recursion.
def dfs_recursive(graph, current_node = None, explored_nodes = {}, call_stack = []):
if(graph):
if(not current_node):
current_node = next(iter(graph))
print(current_node)
explored_nodes[current_node] = 0
call_stack.append(current_node)
for next_node in graph[current_node]:
if(next_node not in explored_nodes):
explored_nodes = dfs_recursive(graph, next_node, explored_nodes, call_stack)
elif(explored_nodes[next_node] == 0):
print("Cycle detected", call_stack)
call_stack.pop()
explored_nodes[current_node] = 1
return explored_nodes
def dfs_stack(graph, start_node = None):
if(graph):
if(not start_node):
start_node = next(iter(graph))
stack = [start_node]
explored_nodes = {start_node:0}
while(stack):
print(stack, explored_nodes)
current_node = stack[-1]
if(explored_nodes[current_node] != 0):
explored_nodes[current_node] = 2
stack.pop()
continue
else:
explored_nodes[current_node] = 1
print(current_node)
for next_node in graph[current_node]:
if(next_node not in explored_nodes):
stack.append(next_node)
explored_nodes[next_node] = 0
elif explored_nodes[next_node] == 1:
print("Cycle detected")
if __name__ == '__main__':
#graph = {0: [1, 2], 1: [3], 2: [4, 5], 3: [], 4: [], 5: [0] }
graph = {0: [1], 1:[2], 2:[3], 3:[4], 4:[5], 5:[2]}
#bfs(graph, 0)
dfs_stack(graph, 0)
#dfs_recursive(graph) | Graph/graphSearch.py | from collections import deque
def bfs(graph, start = None):
if(graph):
if(not start):
start = next(iter(graph))
explored_nodes = {}
dq = deque([start])
while(dq):
current_node = dq.popleft()
print(current_node)
if(current_node in explored_nodes):
print('Error, node is revisited!')
break
explored_nodes[current_node] = 0
for next_node in graph[current_node]:
if(next_node not in explored_nodes):
dq.append(next_node)
explored_nodes[current_node] = 1
# Note: call stack method for cycles might give TLE error, when cycles are sub-cycles of
# large cycles and we revisit each cyle backwards; in those cases use rank method, or dont
# traverse the cycle through stack, just make proper use of recursion.
def dfs_recursive(graph, current_node = None, explored_nodes = {}, call_stack = []):
if(graph):
if(not current_node):
current_node = next(iter(graph))
print(current_node)
explored_nodes[current_node] = 0
call_stack.append(current_node)
for next_node in graph[current_node]:
if(next_node not in explored_nodes):
explored_nodes = dfs_recursive(graph, next_node, explored_nodes, call_stack)
elif(explored_nodes[next_node] == 0):
print("Cycle detected", call_stack)
call_stack.pop()
explored_nodes[current_node] = 1
return explored_nodes
def dfs_stack(graph, start_node = None):
if(graph):
if(not start_node):
start_node = next(iter(graph))
stack = [start_node]
explored_nodes = {start_node:0}
while(stack):
print(stack, explored_nodes)
current_node = stack[-1]
if(explored_nodes[current_node] != 0):
explored_nodes[current_node] = 2
stack.pop()
continue
else:
explored_nodes[current_node] = 1
print(current_node)
for next_node in graph[current_node]:
if(next_node not in explored_nodes):
stack.append(next_node)
explored_nodes[next_node] = 0
elif explored_nodes[next_node] == 1:
print("Cycle detected")
if __name__ == '__main__':
#graph = {0: [1, 2], 1: [3], 2: [4, 5], 3: [], 4: [], 5: [0] }
graph = {0: [1], 1:[2], 2:[3], 3:[4], 4:[5], 5:[2]}
#bfs(graph, 0)
dfs_stack(graph, 0)
#dfs_recursive(graph) | 0.324985 | 0.240931 |
import numpy as np
import sncosmo
import matplotlib.pyplot as plt
# read in spectrum from snfit, created with phase=0
wave, flux1 = np.loadtxt("src/snfit-2.4.2/src/testspec.dat", unpack=True)
z = 0.05
# adjust flux by a^2 for some reason. Why? What is the definition of
# "rest frame flux" in snfit?
flux1 *= 1. / (1. + z)**2
# evalute sncosmo model with same spec
model = sncosmo.Model('salt2', effects=[sncosmo.CCM89Dust()],
effect_names=['mw'], effect_frames=['obs'])
model.set(z=z)
flux2 = model.flux(0., wave)
#plt.plot(wave, flux1)
#plt.plot(wave, flux2)
#plt.show()
#plt.clf()
#plt.plot(wave, flux2/flux1)
# overplot grid
wgrid = model.source._wave * (1. + z)
#plt.plot(wgrid, np.ones_like(wgrid), ls='none', marker ='o')
#plt.show()
# compare covariance with parameters from fitting SDSS19230 with snfit:
# last evaluation
model.set(t0=54390.4478055,
z=0.221,
c=0.0774572207493,
x0=5.54909505246e-05,
x1=-1.69392055944,
mwebv=0.0493)
# model covariance from salt2
gcov = """
7.330114144e-06 1.518440648e-10 2.120843104e-10 2.243632586e-10 2.044266221e-10 8.224784911e-11 5.651721412e-11 2.714715353e-11 1.911928001e-11 1.478561758e-11 1.401421858e-11 1.276311422e-11 1.188595609e-11 1.062405818e-11 1.033562406e-11
1.518440648e-10 8.879247039e-08 1.974546477e-08 2.088865891e-08 1.903251899e-08 7.657435879e-09 5.261863353e-09 2.527453176e-09 1.780042424e-09 1.376569962e-09 1.304751204e-09 1.188270937e-09 1.106605797e-09 9.891206292e-10 9.622668481e-10
2.120843104e-10 1.974546477e-08 8.118133605e-08 2.917569959e-08 2.658318368e-08 1.069532754e-08 7.349372936e-09 3.530155521e-09 2.486228687e-09 1.922688855e-09 1.822377843e-09 1.659687012e-09 1.545623319e-09 1.381528918e-09 1.344021586e-09
2.243632586e-10 2.088865891e-08 2.917569959e-08 9.34913321e-08 2.81222581e-08 1.131455002e-08 7.774876218e-09 3.73453932e-09 2.630172732e-09 2.034005892e-09 1.927887219e-09 1.755777151e-09 1.635109565e-09 1.461514665e-09 1.421835789e-09
2.044266221e-10 1.903251899e-08 2.658318368e-08 2.81222581e-08 1.039774164e-07 1.030915336e-08 7.084010511e-09 3.40269286e-09 2.396458896e-09 1.85326669e-09 1.756577589e-09 1.599761005e-09 1.489815788e-09 1.331646313e-09 1.295493252e-09
8.224784911e-11 7.657435879e-09 1.069532754e-08 1.131455002e-08 1.030915336e-08 1.053816796e-07 2.85014066e-09 1.36902017e-09 9.641776971e-10 7.456328221e-10 7.067314766e-10 6.436387806e-10 5.994040449e-10 5.357670342e-10 5.212214164e-10
5.651721412e-11 5.261863353e-09 7.349372936e-09 7.774876218e-09 7.084010511e-09 2.85014066e-09 5.759544871e-08 9.407322736e-10 6.625417922e-10 5.123670748e-10 4.856357293e-10 4.422811195e-10 4.118848957e-10 3.68156256e-10 3.581611278e-10
2.714715353e-11 2.527453176e-09 3.530155521e-09 3.73453932e-09 3.40269286e-09 1.36902017e-09 9.407322736e-10 5.762709334e-08 3.182415134e-10 2.461074535e-10 2.332674727e-10 2.124427688e-10 1.978424215e-10 1.768380583e-10 1.720370559e-10
1.911928001e-11 1.780042424e-09 2.486228687e-09 2.630172732e-09 2.396458896e-09 9.641776971e-10 6.625417922e-10 3.182415134e-10 3.894180305e-08 1.733293072e-10 1.642863265e-10 1.496198406e-10 1.393370635e-10 1.245440465e-10 1.211627819e-10
1.478561758e-11 1.376569962e-09 1.922688855e-09 2.034005892e-09 1.85326669e-09 7.456328221e-10 5.123670748e-10 2.461074535e-10 1.733293072e-10 6.788685517e-08 1.270484451e-10 1.157063312e-10 1.077542949e-10 9.631432999e-11 9.369947812e-11
1.401421858e-11 1.304751204e-09 1.822377843e-09 1.927887219e-09 1.756577589e-09 7.067314766e-10 4.856357293e-10 2.332674727e-10 1.642863265e-10 1.270484451e-10 1.748910407e-07 1.09669671e-10 1.021325104e-10 9.128939425e-11 8.881096509e-11
1.276311422e-11 1.188270937e-09 1.659687012e-09 1.755777151e-09 1.599761005e-09 6.436387806e-10 4.422811195e-10 2.124427688e-10 1.496198406e-10 1.157063312e-10 1.09669671e-10 3.212299467e-07 9.301473983e-11 8.31396313e-11 8.088246126e-11
1.188595609e-11 1.106605797e-09 1.545623319e-09 1.635109565e-09 1.489815788e-09 5.994040449e-10 4.118848957e-10 1.978424215e-10 1.393370635e-10 1.077542949e-10 1.021325104e-10 9.301473983e-11 4.373284014e-07 7.742577482e-11 7.532373111e-11
1.062405818e-11 9.891206292e-10 1.381528918e-09 1.461514665e-09 1.331646313e-09 5.357670342e-10 3.68156256e-10 1.768380583e-10 1.245440465e-10 9.631432999e-11 9.128939425e-11 8.31396313e-11 7.742577482e-11 5.814610946e-07 6.732682631e-11
1.033562406e-11 9.622668481e-10 1.344021586e-09 1.421835789e-09 1.295493252e-09 5.212214164e-10 3.581611278e-10 1.720370559e-10 1.211627819e-10 9.369947812e-11 8.881096509e-11 8.088246126e-11 7.532373111e-11 6.732682631e-11 5.986208889e-08
"""
rcov = """
1.607334919e-07 8.969114806e-11 1.305736366e-10 1.433570483e-10 1.484259483e-10 1.046903485e-10 8.881586833e-11 5.818582734e-11 4.403860391e-11 3.32764275e-11 3.070967718e-11 2.627847415e-11 1.939496342e-11 1.847476418e-11
8.969114806e-11 8.630951983e-08 2.165231408e-09 2.377211752e-09 2.461266557e-09 1.736022956e-09 1.472785109e-09 9.648638434e-10 7.302681523e-10 5.518048501e-10 5.092418293e-10 4.35761606e-10 3.216161015e-10 3.063569392e-10
1.305736366e-10 2.165231408e-09 6.612165215e-08 3.46077835e-09 3.58314652e-09 2.52732667e-09 2.144101305e-09 1.404662373e-09 1.063134662e-09 8.033252726e-10 7.413614275e-10 6.343878834e-10 4.682132503e-10 4.459987469e-10
1.433570483e-10 2.377211752e-09 3.46077835e-09 8.807302929e-08 3.933943497e-09 2.774756841e-09 2.354012972e-09 1.542181537e-09 1.167217602e-09 8.819723711e-10 8.139421457e-10 6.964956847e-10 5.140522334e-10 4.896628872e-10
1.484259483e-10 2.461266557e-09 3.58314652e-09 3.933943497e-09 7.605941509e-08 2.872868272e-09 2.437247501e-09 1.596710869e-09 1.208488746e-09 9.13157652e-10 8.427219751e-10 7.211227753e-10 5.322283848e-10 5.069766662e-10
1.046903485e-10 1.736022956e-09 2.52732667e-09 2.774756841e-09 2.872868272e-09 9.044946539e-08 1.719081421e-09 1.126219635e-09 8.523921145e-10 6.440840963e-10 5.944031905e-10 5.086347468e-10 3.75400499e-10 3.575895215e-10
8.881586833e-11 1.472785109e-09 2.144101305e-09 2.354012972e-09 2.437247501e-09 1.719081421e-09 7.140554821e-08 9.554479113e-10 7.231415973e-10 5.464198862e-10 5.042722302e-10 4.315090871e-10 3.18477508e-10 3.033672571e-10
5.818582734e-11 9.648638434e-10 1.404662373e-09 1.542181537e-09 1.596710869e-09 1.126219635e-09 9.554479113e-10 8.078522566e-08 4.737508388e-10 3.57975368e-10 3.303632276e-10 2.826940018e-10 2.086437665e-10 1.98744607e-10
4.403860391e-11 7.302681523e-10 1.063134662e-09 1.167217602e-09 1.208488746e-09 8.523921145e-10 7.231415973e-10 4.737508388e-10 5.965892201e-08 2.709377208e-10 2.500391588e-10 2.139601642e-10 1.57914403e-10 1.504221118e-10
3.32764275e-11 5.518048501e-10 8.033252726e-10 8.819723711e-10 9.13157652e-10 6.440840963e-10 5.464198862e-10 3.57975368e-10 2.709377208e-10 7.769247409e-08 1.889344621e-10 1.616724705e-10 1.19323201e-10 1.136618797e-10
3.070967718e-11 5.092418293e-10 7.413614275e-10 8.139421457e-10 8.427219751e-10 5.944031905e-10 5.042722302e-10 3.303632276e-10 2.500391588e-10 1.889344621e-10 1.281776665e-07 1.492019953e-10 1.101193024e-10 1.048946625e-10
2.627847415e-11 4.35761606e-10 6.343878834e-10 6.964956847e-10 7.211227753e-10 5.086347468e-10 4.315090871e-10 2.826940018e-10 2.139601642e-10 1.616724705e-10 1.492019953e-10 1.706724195e-07 9.422981635e-11 8.975905741e-11
1.939496342e-11 3.216161015e-10 4.682132503e-10 5.140522334e-10 5.322283848e-10 3.75400499e-10 3.18477508e-10 2.086437665e-10 1.57914403e-10 1.19323201e-10 1.101193024e-10 9.422981635e-11 2.66806697e-07 6.624713541e-11
1.847476418e-11 3.063569392e-10 4.459987469e-10 4.896628872e-10 5.069766662e-10 3.575895215e-10 3.033672571e-10 1.98744607e-10 1.504221118e-10 1.136618797e-10 1.048946625e-10 8.975905741e-11 6.624713541e-11 1.081358582e-07
"""
icov = """
1.925517986e-07 8.641930131e-11 1.172465457e-10 1.258299804e-10 1.322289373e-10 8.310499466e-11 7.492342052e-11 6.511431925e-11 5.942521721e-11 5.036753513e-11 4.738887799e-11 4.137676206e-11 2.357618816e-11 2.134573105e-11
8.641930131e-11 1.407436288e-07 1.711749934e-09 1.837064447e-09 1.930486509e-09 1.213297742e-09 1.093850223e-09 9.506414966e-10 8.675830151e-10 7.353446911e-10 6.918575577e-10 6.040832103e-10 3.442023667e-10 3.116386371e-10
1.172465457e-10 1.711749934e-09 1.031585337e-07 2.492376789e-09 2.6191241e-09 1.64610182e-09 1.484045326e-09 1.289751595e-09 1.177064731e-09 9.976547327e-10 9.386549943e-10 8.195700343e-10 4.669852442e-10 4.228054748e-10
1.258299804e-10 1.837064447e-09 2.492376789e-09 1.378718758e-07 2.810866044e-09 1.766610338e-09 1.592689943e-09 1.384172275e-09 1.263235783e-09 1.070691462e-09 1.007372446e-09 8.795694639e-10 5.011724974e-10 4.537583967e-10
1.322289373e-10 1.930486509e-09 2.6191241e-09 2.810866044e-09 9.954532511e-08 1.856449527e-09 1.673684586e-09 1.454562962e-09 1.327476367e-09 1.125140398e-09 1.058601357e-09 9.24299083e-10 5.266591199e-10 4.768338229e-10
8.310499466e-11 1.213297742e-09 1.64610182e-09 1.766610338e-09 1.856449527e-09 1.213974599e-07 1.051899466e-09 9.141830042e-10 8.34309937e-10 7.071431462e-10 6.65323808e-10 5.809157353e-10 3.310017023e-10 2.996868394e-10
7.492342052e-11 1.093850223e-09 1.484045326e-09 1.592689943e-09 1.673684586e-09 1.051899466e-09 1.319813423e-07 8.241829259e-10 7.521732539e-10 6.375258615e-10 5.998235805e-10 5.237253681e-10 2.984150331e-10 2.701830761e-10
6.511431925e-11 9.506414966e-10 1.289751595e-09 1.384172275e-09 1.454562962e-09 9.141830042e-10 8.241829259e-10 1.261848385e-07 6.53697456e-10 5.540598946e-10 5.212936601e-10 4.551583548e-10 2.593460309e-10 2.348102496e-10
5.942521721e-11 8.675830151e-10 1.177064731e-09 1.263235783e-09 1.327476367e-09 8.34309937e-10 7.521732539e-10 6.53697456e-10 1.056038015e-07 5.056511373e-10 4.757477209e-10 4.153907222e-10 2.366867134e-10 2.142946474e-10
5.036753513e-11 7.353446911e-10 9.976547327e-10 1.070691462e-09 1.125140398e-09 7.071431462e-10 6.375258615e-10 5.540598946e-10 5.056511373e-10 1.191942316e-07 4.03233529e-10 3.520762359e-10 2.006105642e-10 1.81631531e-10
4.738887799e-11 6.918575577e-10 9.386549943e-10 1.007372446e-09 1.058601357e-09 6.65323808e-10 5.998235805e-10 5.212936601e-10 4.757477209e-10 4.03233529e-10 1.481719398e-07 3.312549987e-10 1.887467697e-10 1.708901267e-10
4.137676206e-11 6.040832103e-10 8.195700343e-10 8.795694639e-10 9.24299083e-10 5.809157353e-10 5.237253681e-10 4.551583548e-10 4.153907222e-10 3.520762359e-10 3.312549987e-10 1.965560442e-07 1.648009092e-10 1.492096967e-10
2.357618816e-11 3.442023667e-10 4.669852442e-10 5.011724974e-10 5.266591199e-10 3.310017023e-10 2.984150331e-10 2.593460309e-10 2.366867134e-10 2.006105642e-10 1.887467697e-10 1.648009092e-10 2.936453269e-07 8.501863628e-11
2.134573105e-11 3.116386371e-10 4.228054748e-10 4.537583967e-10 4.768338229e-10 2.996868394e-10 2.701830761e-10 2.348102496e-10 2.142946474e-10 1.81631531e-10 1.708901267e-10 1.492096967e-10 8.501863628e-11 1.529401363e-07
"""
# sdssg band shifted to rest frame
print(model.source._colordisp(4717.599777 / 1.221))
# total chisq:
model.set(z=0.221,
t0=54390.4105525,
x0=5.52396533233e-05,
x1=-1.62106970624,
c=0.079535505334)
times = np.array([54346.219, 54356.262, 54358.207, 54359.172, 54365.238,
54373.313, 54382.246, 54386.25, 54388.254, 54393.238,
54403.168, 54406.16, 54412.16, 54416.156, 54420.184,
54421.164, 54423.156, 54425.156, 54431.164, 54433.1])
model_rcov = model._bandflux_rcov('sdssg', times)
relerr2 = np.diag(model_rcov) - model_rcov[0, 1]
for i, t in enumerate(times):
print(i, t, relerr2[i])
data = sncosmo.read_lc('jla_light_curves/lc-SDSS19230.list',
format='salt2', read_covmat=True)
# print(data)
data = sncosmo.photdata.photometric_data(data)
mask = sncosmo.fitting._data_mask(data, model.get('t0'), model.get('z'),
(-15., 45.), (3000., 7000.))
# print(len(data[mask]))
# print(sncosmo.chisq(data[mask], model, modelcov=True))
# print(data[mask])
result, fitted_model = sncosmo.fit_lc(data, model, ['t0', 'x0', 'x1', 'c'],
guess_amplitude=False, guess_t0=False,
phase_range=(-15., 45.), wave_range=(3000., 7000.), modelcov=True)
print(result) | compare_model_spec.py |
import numpy as np
import sncosmo
import matplotlib.pyplot as plt
# read in spectrum from snfit, created with phase=0
wave, flux1 = np.loadtxt("src/snfit-2.4.2/src/testspec.dat", unpack=True)
z = 0.05
# adjust flux by a^2 for some reason. Why? What is the definition of
# "rest frame flux" in snfit?
flux1 *= 1. / (1. + z)**2
# evalute sncosmo model with same spec
model = sncosmo.Model('salt2', effects=[sncosmo.CCM89Dust()],
effect_names=['mw'], effect_frames=['obs'])
model.set(z=z)
flux2 = model.flux(0., wave)
#plt.plot(wave, flux1)
#plt.plot(wave, flux2)
#plt.show()
#plt.clf()
#plt.plot(wave, flux2/flux1)
# overplot grid
wgrid = model.source._wave * (1. + z)
#plt.plot(wgrid, np.ones_like(wgrid), ls='none', marker ='o')
#plt.show()
# compare covariance with parameters from fitting SDSS19230 with snfit:
# last evaluation
model.set(t0=54390.4478055,
z=0.221,
c=0.0774572207493,
x0=5.54909505246e-05,
x1=-1.69392055944,
mwebv=0.0493)
# model covariance from salt2
gcov = """
7.330114144e-06 1.518440648e-10 2.120843104e-10 2.243632586e-10 2.044266221e-10 8.224784911e-11 5.651721412e-11 2.714715353e-11 1.911928001e-11 1.478561758e-11 1.401421858e-11 1.276311422e-11 1.188595609e-11 1.062405818e-11 1.033562406e-11
1.518440648e-10 8.879247039e-08 1.974546477e-08 2.088865891e-08 1.903251899e-08 7.657435879e-09 5.261863353e-09 2.527453176e-09 1.780042424e-09 1.376569962e-09 1.304751204e-09 1.188270937e-09 1.106605797e-09 9.891206292e-10 9.622668481e-10
2.120843104e-10 1.974546477e-08 8.118133605e-08 2.917569959e-08 2.658318368e-08 1.069532754e-08 7.349372936e-09 3.530155521e-09 2.486228687e-09 1.922688855e-09 1.822377843e-09 1.659687012e-09 1.545623319e-09 1.381528918e-09 1.344021586e-09
2.243632586e-10 2.088865891e-08 2.917569959e-08 9.34913321e-08 2.81222581e-08 1.131455002e-08 7.774876218e-09 3.73453932e-09 2.630172732e-09 2.034005892e-09 1.927887219e-09 1.755777151e-09 1.635109565e-09 1.461514665e-09 1.421835789e-09
2.044266221e-10 1.903251899e-08 2.658318368e-08 2.81222581e-08 1.039774164e-07 1.030915336e-08 7.084010511e-09 3.40269286e-09 2.396458896e-09 1.85326669e-09 1.756577589e-09 1.599761005e-09 1.489815788e-09 1.331646313e-09 1.295493252e-09
8.224784911e-11 7.657435879e-09 1.069532754e-08 1.131455002e-08 1.030915336e-08 1.053816796e-07 2.85014066e-09 1.36902017e-09 9.641776971e-10 7.456328221e-10 7.067314766e-10 6.436387806e-10 5.994040449e-10 5.357670342e-10 5.212214164e-10
5.651721412e-11 5.261863353e-09 7.349372936e-09 7.774876218e-09 7.084010511e-09 2.85014066e-09 5.759544871e-08 9.407322736e-10 6.625417922e-10 5.123670748e-10 4.856357293e-10 4.422811195e-10 4.118848957e-10 3.68156256e-10 3.581611278e-10
2.714715353e-11 2.527453176e-09 3.530155521e-09 3.73453932e-09 3.40269286e-09 1.36902017e-09 9.407322736e-10 5.762709334e-08 3.182415134e-10 2.461074535e-10 2.332674727e-10 2.124427688e-10 1.978424215e-10 1.768380583e-10 1.720370559e-10
1.911928001e-11 1.780042424e-09 2.486228687e-09 2.630172732e-09 2.396458896e-09 9.641776971e-10 6.625417922e-10 3.182415134e-10 3.894180305e-08 1.733293072e-10 1.642863265e-10 1.496198406e-10 1.393370635e-10 1.245440465e-10 1.211627819e-10
1.478561758e-11 1.376569962e-09 1.922688855e-09 2.034005892e-09 1.85326669e-09 7.456328221e-10 5.123670748e-10 2.461074535e-10 1.733293072e-10 6.788685517e-08 1.270484451e-10 1.157063312e-10 1.077542949e-10 9.631432999e-11 9.369947812e-11
1.401421858e-11 1.304751204e-09 1.822377843e-09 1.927887219e-09 1.756577589e-09 7.067314766e-10 4.856357293e-10 2.332674727e-10 1.642863265e-10 1.270484451e-10 1.748910407e-07 1.09669671e-10 1.021325104e-10 9.128939425e-11 8.881096509e-11
1.276311422e-11 1.188270937e-09 1.659687012e-09 1.755777151e-09 1.599761005e-09 6.436387806e-10 4.422811195e-10 2.124427688e-10 1.496198406e-10 1.157063312e-10 1.09669671e-10 3.212299467e-07 9.301473983e-11 8.31396313e-11 8.088246126e-11
1.188595609e-11 1.106605797e-09 1.545623319e-09 1.635109565e-09 1.489815788e-09 5.994040449e-10 4.118848957e-10 1.978424215e-10 1.393370635e-10 1.077542949e-10 1.021325104e-10 9.301473983e-11 4.373284014e-07 7.742577482e-11 7.532373111e-11
1.062405818e-11 9.891206292e-10 1.381528918e-09 1.461514665e-09 1.331646313e-09 5.357670342e-10 3.68156256e-10 1.768380583e-10 1.245440465e-10 9.631432999e-11 9.128939425e-11 8.31396313e-11 7.742577482e-11 5.814610946e-07 6.732682631e-11
1.033562406e-11 9.622668481e-10 1.344021586e-09 1.421835789e-09 1.295493252e-09 5.212214164e-10 3.581611278e-10 1.720370559e-10 1.211627819e-10 9.369947812e-11 8.881096509e-11 8.088246126e-11 7.532373111e-11 6.732682631e-11 5.986208889e-08
"""
rcov = """
1.607334919e-07 8.969114806e-11 1.305736366e-10 1.433570483e-10 1.484259483e-10 1.046903485e-10 8.881586833e-11 5.818582734e-11 4.403860391e-11 3.32764275e-11 3.070967718e-11 2.627847415e-11 1.939496342e-11 1.847476418e-11
8.969114806e-11 8.630951983e-08 2.165231408e-09 2.377211752e-09 2.461266557e-09 1.736022956e-09 1.472785109e-09 9.648638434e-10 7.302681523e-10 5.518048501e-10 5.092418293e-10 4.35761606e-10 3.216161015e-10 3.063569392e-10
1.305736366e-10 2.165231408e-09 6.612165215e-08 3.46077835e-09 3.58314652e-09 2.52732667e-09 2.144101305e-09 1.404662373e-09 1.063134662e-09 8.033252726e-10 7.413614275e-10 6.343878834e-10 4.682132503e-10 4.459987469e-10
1.433570483e-10 2.377211752e-09 3.46077835e-09 8.807302929e-08 3.933943497e-09 2.774756841e-09 2.354012972e-09 1.542181537e-09 1.167217602e-09 8.819723711e-10 8.139421457e-10 6.964956847e-10 5.140522334e-10 4.896628872e-10
1.484259483e-10 2.461266557e-09 3.58314652e-09 3.933943497e-09 7.605941509e-08 2.872868272e-09 2.437247501e-09 1.596710869e-09 1.208488746e-09 9.13157652e-10 8.427219751e-10 7.211227753e-10 5.322283848e-10 5.069766662e-10
1.046903485e-10 1.736022956e-09 2.52732667e-09 2.774756841e-09 2.872868272e-09 9.044946539e-08 1.719081421e-09 1.126219635e-09 8.523921145e-10 6.440840963e-10 5.944031905e-10 5.086347468e-10 3.75400499e-10 3.575895215e-10
8.881586833e-11 1.472785109e-09 2.144101305e-09 2.354012972e-09 2.437247501e-09 1.719081421e-09 7.140554821e-08 9.554479113e-10 7.231415973e-10 5.464198862e-10 5.042722302e-10 4.315090871e-10 3.18477508e-10 3.033672571e-10
5.818582734e-11 9.648638434e-10 1.404662373e-09 1.542181537e-09 1.596710869e-09 1.126219635e-09 9.554479113e-10 8.078522566e-08 4.737508388e-10 3.57975368e-10 3.303632276e-10 2.826940018e-10 2.086437665e-10 1.98744607e-10
4.403860391e-11 7.302681523e-10 1.063134662e-09 1.167217602e-09 1.208488746e-09 8.523921145e-10 7.231415973e-10 4.737508388e-10 5.965892201e-08 2.709377208e-10 2.500391588e-10 2.139601642e-10 1.57914403e-10 1.504221118e-10
3.32764275e-11 5.518048501e-10 8.033252726e-10 8.819723711e-10 9.13157652e-10 6.440840963e-10 5.464198862e-10 3.57975368e-10 2.709377208e-10 7.769247409e-08 1.889344621e-10 1.616724705e-10 1.19323201e-10 1.136618797e-10
3.070967718e-11 5.092418293e-10 7.413614275e-10 8.139421457e-10 8.427219751e-10 5.944031905e-10 5.042722302e-10 3.303632276e-10 2.500391588e-10 1.889344621e-10 1.281776665e-07 1.492019953e-10 1.101193024e-10 1.048946625e-10
2.627847415e-11 4.35761606e-10 6.343878834e-10 6.964956847e-10 7.211227753e-10 5.086347468e-10 4.315090871e-10 2.826940018e-10 2.139601642e-10 1.616724705e-10 1.492019953e-10 1.706724195e-07 9.422981635e-11 8.975905741e-11
1.939496342e-11 3.216161015e-10 4.682132503e-10 5.140522334e-10 5.322283848e-10 3.75400499e-10 3.18477508e-10 2.086437665e-10 1.57914403e-10 1.19323201e-10 1.101193024e-10 9.422981635e-11 2.66806697e-07 6.624713541e-11
1.847476418e-11 3.063569392e-10 4.459987469e-10 4.896628872e-10 5.069766662e-10 3.575895215e-10 3.033672571e-10 1.98744607e-10 1.504221118e-10 1.136618797e-10 1.048946625e-10 8.975905741e-11 6.624713541e-11 1.081358582e-07
"""
icov = """
1.925517986e-07 8.641930131e-11 1.172465457e-10 1.258299804e-10 1.322289373e-10 8.310499466e-11 7.492342052e-11 6.511431925e-11 5.942521721e-11 5.036753513e-11 4.738887799e-11 4.137676206e-11 2.357618816e-11 2.134573105e-11
8.641930131e-11 1.407436288e-07 1.711749934e-09 1.837064447e-09 1.930486509e-09 1.213297742e-09 1.093850223e-09 9.506414966e-10 8.675830151e-10 7.353446911e-10 6.918575577e-10 6.040832103e-10 3.442023667e-10 3.116386371e-10
1.172465457e-10 1.711749934e-09 1.031585337e-07 2.492376789e-09 2.6191241e-09 1.64610182e-09 1.484045326e-09 1.289751595e-09 1.177064731e-09 9.976547327e-10 9.386549943e-10 8.195700343e-10 4.669852442e-10 4.228054748e-10
1.258299804e-10 1.837064447e-09 2.492376789e-09 1.378718758e-07 2.810866044e-09 1.766610338e-09 1.592689943e-09 1.384172275e-09 1.263235783e-09 1.070691462e-09 1.007372446e-09 8.795694639e-10 5.011724974e-10 4.537583967e-10
1.322289373e-10 1.930486509e-09 2.6191241e-09 2.810866044e-09 9.954532511e-08 1.856449527e-09 1.673684586e-09 1.454562962e-09 1.327476367e-09 1.125140398e-09 1.058601357e-09 9.24299083e-10 5.266591199e-10 4.768338229e-10
8.310499466e-11 1.213297742e-09 1.64610182e-09 1.766610338e-09 1.856449527e-09 1.213974599e-07 1.051899466e-09 9.141830042e-10 8.34309937e-10 7.071431462e-10 6.65323808e-10 5.809157353e-10 3.310017023e-10 2.996868394e-10
7.492342052e-11 1.093850223e-09 1.484045326e-09 1.592689943e-09 1.673684586e-09 1.051899466e-09 1.319813423e-07 8.241829259e-10 7.521732539e-10 6.375258615e-10 5.998235805e-10 5.237253681e-10 2.984150331e-10 2.701830761e-10
6.511431925e-11 9.506414966e-10 1.289751595e-09 1.384172275e-09 1.454562962e-09 9.141830042e-10 8.241829259e-10 1.261848385e-07 6.53697456e-10 5.540598946e-10 5.212936601e-10 4.551583548e-10 2.593460309e-10 2.348102496e-10
5.942521721e-11 8.675830151e-10 1.177064731e-09 1.263235783e-09 1.327476367e-09 8.34309937e-10 7.521732539e-10 6.53697456e-10 1.056038015e-07 5.056511373e-10 4.757477209e-10 4.153907222e-10 2.366867134e-10 2.142946474e-10
5.036753513e-11 7.353446911e-10 9.976547327e-10 1.070691462e-09 1.125140398e-09 7.071431462e-10 6.375258615e-10 5.540598946e-10 5.056511373e-10 1.191942316e-07 4.03233529e-10 3.520762359e-10 2.006105642e-10 1.81631531e-10
4.738887799e-11 6.918575577e-10 9.386549943e-10 1.007372446e-09 1.058601357e-09 6.65323808e-10 5.998235805e-10 5.212936601e-10 4.757477209e-10 4.03233529e-10 1.481719398e-07 3.312549987e-10 1.887467697e-10 1.708901267e-10
4.137676206e-11 6.040832103e-10 8.195700343e-10 8.795694639e-10 9.24299083e-10 5.809157353e-10 5.237253681e-10 4.551583548e-10 4.153907222e-10 3.520762359e-10 3.312549987e-10 1.965560442e-07 1.648009092e-10 1.492096967e-10
2.357618816e-11 3.442023667e-10 4.669852442e-10 5.011724974e-10 5.266591199e-10 3.310017023e-10 2.984150331e-10 2.593460309e-10 2.366867134e-10 2.006105642e-10 1.887467697e-10 1.648009092e-10 2.936453269e-07 8.501863628e-11
2.134573105e-11 3.116386371e-10 4.228054748e-10 4.537583967e-10 4.768338229e-10 2.996868394e-10 2.701830761e-10 2.348102496e-10 2.142946474e-10 1.81631531e-10 1.708901267e-10 1.492096967e-10 8.501863628e-11 1.529401363e-07
"""
# sdssg band shifted to rest frame
print(model.source._colordisp(4717.599777 / 1.221))
# total chisq:
model.set(z=0.221,
t0=54390.4105525,
x0=5.52396533233e-05,
x1=-1.62106970624,
c=0.079535505334)
times = np.array([54346.219, 54356.262, 54358.207, 54359.172, 54365.238,
54373.313, 54382.246, 54386.25, 54388.254, 54393.238,
54403.168, 54406.16, 54412.16, 54416.156, 54420.184,
54421.164, 54423.156, 54425.156, 54431.164, 54433.1])
model_rcov = model._bandflux_rcov('sdssg', times)
relerr2 = np.diag(model_rcov) - model_rcov[0, 1]
for i, t in enumerate(times):
print(i, t, relerr2[i])
data = sncosmo.read_lc('jla_light_curves/lc-SDSS19230.list',
format='salt2', read_covmat=True)
# print(data)
data = sncosmo.photdata.photometric_data(data)
mask = sncosmo.fitting._data_mask(data, model.get('t0'), model.get('z'),
(-15., 45.), (3000., 7000.))
# print(len(data[mask]))
# print(sncosmo.chisq(data[mask], model, modelcov=True))
# print(data[mask])
result, fitted_model = sncosmo.fit_lc(data, model, ['t0', 'x0', 'x1', 'c'],
guess_amplitude=False, guess_t0=False,
phase_range=(-15., 45.), wave_range=(3000., 7000.), modelcov=True)
print(result) | 0.547464 | 0.485539 |
import django.test
from snapshotServer.models import Snapshot, Application, Version, TestStep, \
TestSession, TestEnvironment, TestCase, TestCaseInSession, StepResult
import datetime
import pytz
from django.db.utils import IntegrityError
from snapshotServer.tests import SnapshotTestCase
class TestSnapshots(SnapshotTestCase):
def setUp(self):
super().setUp()
self.app = Application(name="test")
self.app.save()
self.v1 = Version(application=self.app, name='1.0')
self.v1.save()
self.v2 = Version(application=self.app, name='2.0')
self.v2.save()
env = TestEnvironment(name='DEV')
env.save()
self.session1 = TestSession(sessionId="1234", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v1, ttl=datetime.timedelta(0))
self.session1.save()
self.session_same_env = TestSession(sessionId="1235", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v1, ttl=datetime.timedelta(0))
self.session_same_env.save()
self.session3 = TestSession(sessionId="1236", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v2, ttl=datetime.timedelta(0))
self.session3.save()
self.session4 = TestSession(sessionId="1237", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v2, ttl=datetime.timedelta(0))
self.session4.save()
self.step = TestStep(name="step1")
self.step.save()
self.tc1 = TestCase(name="case1", application=self.app)
self.tc1.save()
self.tcs1 = TestCaseInSession(testCase=self.tc1, session=self.session1)
self.tcs1.save()
self.tcs_same_env = TestCaseInSession(testCase=self.tc1, session=self.session_same_env)
self.tcs_same_env.save()
self.tcs3 = TestCaseInSession(testCase=self.tc1, session=self.session3)
self.tcs3.save()
self.tcs4 = TestCaseInSession(testCase=self.tc1, session=self.session4)
self.tcs4.save()
self.tcs1.testSteps.set([self.step])
self.tcs1.save()
self.tcs_same_env.testSteps.set([self.step])
self.tcs_same_env.save()
self.tsr1 = StepResult(step=self.step, testCase=self.tcs1, result=True)
self.tsr1.save()
self.tsr2 = StepResult(step=self.step, testCase=self.tcs_same_env, result=True)
self.tsr2.save()
self.tsr3 = StepResult(step=self.step, testCase=self.tcs3, result=True)
self.tsr3.save()
self.tsr4 = StepResult(step=self.step, testCase=self.tcs4, result=True)
self.tsr4.save()
def test_no_next_snapshots(self):
"""
check that we do not look at ourself when searching next snapshots
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
self.assertEqual(s2.snapshotsUntilNextRef(s2.refSnapshot), [], "No next snapshot should be found")
def test_too_low_diff_tolerance(self):
"""
tolerance < 0 should be refused
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None, diffTolerance=-0.1)
self.assertRaises(IntegrityError, s1.save)
def test_too_high_diff_tolerance(self):
"""
tolerance > 100 should be refused
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None, diffTolerance=100.1)
self.assertRaises(IntegrityError, s1.save)
def test_next_snapshots_with_no_ref(self):
"""
Search for next snapshot that reference ourself
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
# s3 should not be found has it does not reference s1
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s2, pixelsDiff=None)
s3.save()
self.assertEqual(s1.snapshotsUntilNextRef(s1), [s2], "One snapshot should be found")
def test_next_snapshots_with_ref(self):
"""
Check that the next reference snapshot (s4) is not rendered but pictures from the next version are
"""
# snapshots on app v1
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
# snapshots on app v2
s3 = Snapshot(stepResult=self.tsr3, image=None, refSnapshot=s1, pixelsDiff=None)
s3.save()
s4 = Snapshot(stepResult=self.tsr4, image=None, refSnapshot=None, pixelsDiff=None)
s4.save()
self.assertEqual(s1.snapshotsUntilNextRef(s1), [s2, s3], "2 snapshots should be found")
def test_next_snapshots_with_lower_version(self):
"""
We should not give snapshots from a lower version even if snapshot id is lower
We assume that 2 versions are being tested at the same time. Order of declared snapshots is important
"""
# snapshots on app v2
s0 = Snapshot(stepResult=self.tsr3, image=None, refSnapshot=None, pixelsDiff=None)
s0.save()
# snapshots on app v1
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
# snapshots on app v2
s3 = Snapshot(stepResult=self.tsr4, image=None, refSnapshot=s0, pixelsDiff=None)
s3.save()
self.assertEqual(s1.snapshotsUntilNextRef(s1), [s2], "One snapshot should be found")
def test_snapshot_deletion_and_recomputing_on_previous_reference(self):
"""
Test that when snapshot is deleted and is a reference for other snapshot, this reference is removed and replaced by a previous reference when it exists
Here,
S1 is a reference
S2 is a reference
S3 has a reference on S2
After deletion of S2, S3 should have reference on S1
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=None, pixelsDiff=None)
s2.save()
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s2, pixelsDiff=None)
s3.save()
s2.delete()
self.assertEqual(Snapshot.objects.get(pk=s3.id).refSnapshot, s1)
def test_snapshot_deletion_of_non_reference_snapshot(self):
"""
Test that when snapshot is deleted and is not a reference for other snapshot, nothing happens
Here,
S1 is a reference
S2 has a reference on S1
S3 has a reference on S1
After deletion of S2, S3 should still have reference on S1
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s3.save()
s2.delete()
self.assertEqual(Snapshot.objects.get(pk=s3.id).refSnapshot, s1)
def test_snapshot_deletion_of_first_reference(self):
"""
Test that when first reference is deleted, other pictures have their references changed
Here,
S1 is a reference
S2 has a reference on S1
S3 has a reference on S1
After deletion of S1, S2 becomes a reference S3 should have reference on S2
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s3.save()
s1.delete()
self.assertEqual(Snapshot.objects.get(pk=s2.id).refSnapshot, None)
self.assertEqual(Snapshot.objects.get(pk=s3.id).refSnapshot, s2)
def test_snapshot_deletion_of_last_snapshot(self):
"""
Test that nothing happens when the deleted snapshot is the last one
Here,
S1 is a reference
S2 has a reference on S1
S3 has a reference on S1
After deletion of S1, S2 becomes a reference S3 should have reference on S2
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
s2.delete()
self.assertEqual(Snapshot.objects.get(pk=s1.id).refSnapshot, None) | snapshotServer/tests/model/test_Snapshots.py | import django.test
from snapshotServer.models import Snapshot, Application, Version, TestStep, \
TestSession, TestEnvironment, TestCase, TestCaseInSession, StepResult
import datetime
import pytz
from django.db.utils import IntegrityError
from snapshotServer.tests import SnapshotTestCase
class TestSnapshots(SnapshotTestCase):
def setUp(self):
super().setUp()
self.app = Application(name="test")
self.app.save()
self.v1 = Version(application=self.app, name='1.0')
self.v1.save()
self.v2 = Version(application=self.app, name='2.0')
self.v2.save()
env = TestEnvironment(name='DEV')
env.save()
self.session1 = TestSession(sessionId="1234", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v1, ttl=datetime.timedelta(0))
self.session1.save()
self.session_same_env = TestSession(sessionId="1235", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v1, ttl=datetime.timedelta(0))
self.session_same_env.save()
self.session3 = TestSession(sessionId="1236", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v2, ttl=datetime.timedelta(0))
self.session3.save()
self.session4 = TestSession(sessionId="1237", date=datetime.datetime(2017, 5, 7, tzinfo=pytz.UTC), browser="firefox", environment=env, version=self.v2, ttl=datetime.timedelta(0))
self.session4.save()
self.step = TestStep(name="step1")
self.step.save()
self.tc1 = TestCase(name="case1", application=self.app)
self.tc1.save()
self.tcs1 = TestCaseInSession(testCase=self.tc1, session=self.session1)
self.tcs1.save()
self.tcs_same_env = TestCaseInSession(testCase=self.tc1, session=self.session_same_env)
self.tcs_same_env.save()
self.tcs3 = TestCaseInSession(testCase=self.tc1, session=self.session3)
self.tcs3.save()
self.tcs4 = TestCaseInSession(testCase=self.tc1, session=self.session4)
self.tcs4.save()
self.tcs1.testSteps.set([self.step])
self.tcs1.save()
self.tcs_same_env.testSteps.set([self.step])
self.tcs_same_env.save()
self.tsr1 = StepResult(step=self.step, testCase=self.tcs1, result=True)
self.tsr1.save()
self.tsr2 = StepResult(step=self.step, testCase=self.tcs_same_env, result=True)
self.tsr2.save()
self.tsr3 = StepResult(step=self.step, testCase=self.tcs3, result=True)
self.tsr3.save()
self.tsr4 = StepResult(step=self.step, testCase=self.tcs4, result=True)
self.tsr4.save()
def test_no_next_snapshots(self):
"""
check that we do not look at ourself when searching next snapshots
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
self.assertEqual(s2.snapshotsUntilNextRef(s2.refSnapshot), [], "No next snapshot should be found")
def test_too_low_diff_tolerance(self):
"""
tolerance < 0 should be refused
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None, diffTolerance=-0.1)
self.assertRaises(IntegrityError, s1.save)
def test_too_high_diff_tolerance(self):
"""
tolerance > 100 should be refused
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None, diffTolerance=100.1)
self.assertRaises(IntegrityError, s1.save)
def test_next_snapshots_with_no_ref(self):
"""
Search for next snapshot that reference ourself
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
# s3 should not be found has it does not reference s1
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s2, pixelsDiff=None)
s3.save()
self.assertEqual(s1.snapshotsUntilNextRef(s1), [s2], "One snapshot should be found")
def test_next_snapshots_with_ref(self):
"""
Check that the next reference snapshot (s4) is not rendered but pictures from the next version are
"""
# snapshots on app v1
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
# snapshots on app v2
s3 = Snapshot(stepResult=self.tsr3, image=None, refSnapshot=s1, pixelsDiff=None)
s3.save()
s4 = Snapshot(stepResult=self.tsr4, image=None, refSnapshot=None, pixelsDiff=None)
s4.save()
self.assertEqual(s1.snapshotsUntilNextRef(s1), [s2, s3], "2 snapshots should be found")
def test_next_snapshots_with_lower_version(self):
"""
We should not give snapshots from a lower version even if snapshot id is lower
We assume that 2 versions are being tested at the same time. Order of declared snapshots is important
"""
# snapshots on app v2
s0 = Snapshot(stepResult=self.tsr3, image=None, refSnapshot=None, pixelsDiff=None)
s0.save()
# snapshots on app v1
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
# snapshots on app v2
s3 = Snapshot(stepResult=self.tsr4, image=None, refSnapshot=s0, pixelsDiff=None)
s3.save()
self.assertEqual(s1.snapshotsUntilNextRef(s1), [s2], "One snapshot should be found")
def test_snapshot_deletion_and_recomputing_on_previous_reference(self):
"""
Test that when snapshot is deleted and is a reference for other snapshot, this reference is removed and replaced by a previous reference when it exists
Here,
S1 is a reference
S2 is a reference
S3 has a reference on S2
After deletion of S2, S3 should have reference on S1
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=None, pixelsDiff=None)
s2.save()
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s2, pixelsDiff=None)
s3.save()
s2.delete()
self.assertEqual(Snapshot.objects.get(pk=s3.id).refSnapshot, s1)
def test_snapshot_deletion_of_non_reference_snapshot(self):
"""
Test that when snapshot is deleted and is not a reference for other snapshot, nothing happens
Here,
S1 is a reference
S2 has a reference on S1
S3 has a reference on S1
After deletion of S2, S3 should still have reference on S1
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s3.save()
s2.delete()
self.assertEqual(Snapshot.objects.get(pk=s3.id).refSnapshot, s1)
def test_snapshot_deletion_of_first_reference(self):
"""
Test that when first reference is deleted, other pictures have their references changed
Here,
S1 is a reference
S2 has a reference on S1
S3 has a reference on S1
After deletion of S1, S2 becomes a reference S3 should have reference on S2
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
s3 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s3.save()
s1.delete()
self.assertEqual(Snapshot.objects.get(pk=s2.id).refSnapshot, None)
self.assertEqual(Snapshot.objects.get(pk=s3.id).refSnapshot, s2)
def test_snapshot_deletion_of_last_snapshot(self):
"""
Test that nothing happens when the deleted snapshot is the last one
Here,
S1 is a reference
S2 has a reference on S1
S3 has a reference on S1
After deletion of S1, S2 becomes a reference S3 should have reference on S2
"""
s1 = Snapshot(stepResult=self.tsr1, image=None, refSnapshot=None, pixelsDiff=None)
s1.save()
s2 = Snapshot(stepResult=self.tsr2, image=None, refSnapshot=s1, pixelsDiff=None)
s2.save()
s2.delete()
self.assertEqual(Snapshot.objects.get(pk=s1.id).refSnapshot, None) | 0.453988 | 0.224969 |
import scipy.io as sio
import numpy.linalg as LA
from utils import mse
import numpy as np
from utils_rbf import *
import warnings
warnings.filterwarnings("ignore")
result = np.array([]).reshape(0,1)
phi = "Gauss"
method = "RJSA"
for year in [2010, 2011, 2012, 2013, 2014]:
print(">>>>>")
data_name = "data{}".format(year)
data_path = "../data2017/{}.mat".format(data_name)
print("Loading data from \"{}\" ...".format(data_path))
data = sio.loadmat(data_path)[data_name]
x = data["Score"][0][0]
y = data["TargetScore1"][0][0]
print("shape of x:", x.shape)
print("shape of y:", y.shape)
id1 = np.where(y==-1)[0]
id2 = np.where(y!=-1)[0]
N = x.shape[0]
d = x.shape[1]
c = y.shape[1]
n_test = id1.shape[0]
n_val = int((N-n_test)*0.1)
n_train = N-n_val-n_test
print("DataNum for train:", n_train)
print("DataNum for test:", n_test)
print("DataNum for validation:", n_val)
x_test = x[id1]
y_test = y[id1]
x = x[id2]
y = y[id2]
min_loss = 999999
min_id = -1
for ii in range(0,N):
start = ii*n_val
if N-n_test-start < n_val:
break
x_train = np.concatenate((x[:start,:], x[start+n_val:,:]))
y_train = np.concatenate((y[:start,:], y[start+n_val:,:]))
x_val = x[start:start+n_val, :]
y_val = y[start:start+n_val, :]
k = 0
iter = 2000
Mu = np.array([])
valLoss = np.array([])
trainLoss = np.array([])
s = 1
k_max = 100
b_ = lambda k, k_max: 0 if k == k_max else 1 if k == 0 else 0.2
d_ = lambda k: 0 if k == 0 else 0.2
s_ = lambda k, k_max: 0 if k == 0 or k == k_max else 0.2
m_ = lambda k: 0 if k == 0 or k ==1 else 0.2
for i in range(iter):
k = Mu.shape[0]
Mu_old = Mu
alpha = Alpha(x_train, Mu, y_train, phi)
tao = Tao(x_train, Mu, y_train, phi)
loss_val = Loss(x_val, Mu, y_val, alpha, tao, phi)
valLoss = np.append(valLoss, loss_val)
loss_train = Loss(x_train, Mu, y_train, alpha, tao, phi)
trainLoss = np.append(trainLoss, loss_train)
if k >= k_max:
iter = i + 1
break
if valLoss.shape[0] >= 100 and abs(valLoss[-1] - valLoss[-5]) < 1e-5 and abs(trainLoss[-1] - trainLoss[-5]) < 1e-5:
iter = i + 1
break
[bi, di, si, mi] = [b_(k, k_max), d_(k), s_(k, k_max), m_(k)]
u = np.random.rand()
if u <= bi:
u_ = np.random.rand()
mu = Generate2(x_train)
if u_ <= A(Birth(x_train, Mu, y_train, mu, phi)):
if k == 0:
Mu = Mu.reshape(0)
mu = mu.reshape(d)
Mu = np.concatenate((Mu, mu)).reshape(1, d)
else:
Mu = np.concatenate((Mu, mu))
k = k + 1
else:
pass
elif u <= bi + di:
u_ = np.random.rand()
j = np.random.randint(0, k)
if u_ <= A(Death(x_train, Mu, y_train, j, phi)):
k = k - 1
Mu = np.concatenate((Mu[:j, :], Mu[j + 1:, :]))
else:
pass
elif u <= bi + di + si:
u_ = np.random.rand()
j = np.random.randint(0, k)
mu = Mu[j, :]
u_1 = np.random.rand(1, d)
mu1 = mu - u_1 * s
mu2 = mu + u_1 * s
if u_ <= A(Split(x_train, Mu, y_train, s, j, mu1, mu2, phi)):
k = k + 1
Mu = np.concatenate((Mu[:j, :], Mu[j + 1:, :], mu1, mu2))
else:
pass
elif u <= bi + di + si + mi:
u_ = np.random.rand()
j1 = np.random.randint(0, k)
mu1 = Mu[j1, :]
j2 = FindClosest(Mu, j1)
mu2 = Mu[j2, :]
[j1, j2] = [j2, j1] if j1 > j2 else [j1, j2]
if la.norm(j1 - j2) > 2 * s:
i -= 1
continue
mu = ((mu1 + mu2) * 0.5).reshape(1, d)
if u_ <= A(Merge(x_train, Mu, y_train, s, j1, j2, mu, phi)):
k = k - 1
Mu = np.concatenate((Mu[:j1, :], Mu[j1 + 1: j2, :], Mu[j2 + 1:, :], mu))
else:
pass
else:
if k == 1:
pass
else:
Mu = Update(x_train, Mu, y_train, phi)
if method == "RJSA":
# perform a MH step with the annealed acceptance ratio
Mu = SA3(x_train, y_train, i, Mu, Mu_old, phi)
elif method == "AIC":
alpha = Alpha(x_train, Mu, y_train, phi)
tao = Tao(x_train, Mu, y_train, phi)
alpha_old = Alpha(x_train, Mu_old, y_train, phi)
tao_old = Tao(x_train, Mu_old, y_train, phi)
Mu = AIC(x_train, y_train, Mu, Mu_old, alpha, alpha_old, tao, tao_old, phi)
elif method == "BIC":
alpha = Alpha(x_train, Mu, y_train, phi)
tao = Tao(x_train, Mu, y_train, phi)
alpha_old = Alpha(x_train, Mu_old, y_train, phi)
tao_old = Tao(x_train, Mu_old, y_train, phi)
Mu = BIC(x_train, y_train, Mu, Mu_old, alpha, alpha_old, tao, tao_old, phi)
mean_loss = loss_train*0.25+loss_val*0.75
if min_loss > mean_loss:
min_id = ii+1
min_loss = (loss_train+loss_val)/2
print("#%d \t valLoss=%.3f \t trainLoss=%.3f \t meanLoss=%.3f" % (ii+1, loss_val, loss_train, mean_loss))
# print("[use #{}] total loss={}".format(min_id, loss))
#
# if result.shape[0] > 0:
# result = np.concatenate((result, y_pred), axis=0)
# else:
# result = y_pred
#
#sio.savemat("../results/a.mat", {'a':result}) | HW2/scripts/RBF.py |
import scipy.io as sio
import numpy.linalg as LA
from utils import mse
import numpy as np
from utils_rbf import *
import warnings
warnings.filterwarnings("ignore")
result = np.array([]).reshape(0,1)
phi = "Gauss"
method = "RJSA"
for year in [2010, 2011, 2012, 2013, 2014]:
print(">>>>>")
data_name = "data{}".format(year)
data_path = "../data2017/{}.mat".format(data_name)
print("Loading data from \"{}\" ...".format(data_path))
data = sio.loadmat(data_path)[data_name]
x = data["Score"][0][0]
y = data["TargetScore1"][0][0]
print("shape of x:", x.shape)
print("shape of y:", y.shape)
id1 = np.where(y==-1)[0]
id2 = np.where(y!=-1)[0]
N = x.shape[0]
d = x.shape[1]
c = y.shape[1]
n_test = id1.shape[0]
n_val = int((N-n_test)*0.1)
n_train = N-n_val-n_test
print("DataNum for train:", n_train)
print("DataNum for test:", n_test)
print("DataNum for validation:", n_val)
x_test = x[id1]
y_test = y[id1]
x = x[id2]
y = y[id2]
min_loss = 999999
min_id = -1
for ii in range(0,N):
start = ii*n_val
if N-n_test-start < n_val:
break
x_train = np.concatenate((x[:start,:], x[start+n_val:,:]))
y_train = np.concatenate((y[:start,:], y[start+n_val:,:]))
x_val = x[start:start+n_val, :]
y_val = y[start:start+n_val, :]
k = 0
iter = 2000
Mu = np.array([])
valLoss = np.array([])
trainLoss = np.array([])
s = 1
k_max = 100
b_ = lambda k, k_max: 0 if k == k_max else 1 if k == 0 else 0.2
d_ = lambda k: 0 if k == 0 else 0.2
s_ = lambda k, k_max: 0 if k == 0 or k == k_max else 0.2
m_ = lambda k: 0 if k == 0 or k ==1 else 0.2
for i in range(iter):
k = Mu.shape[0]
Mu_old = Mu
alpha = Alpha(x_train, Mu, y_train, phi)
tao = Tao(x_train, Mu, y_train, phi)
loss_val = Loss(x_val, Mu, y_val, alpha, tao, phi)
valLoss = np.append(valLoss, loss_val)
loss_train = Loss(x_train, Mu, y_train, alpha, tao, phi)
trainLoss = np.append(trainLoss, loss_train)
if k >= k_max:
iter = i + 1
break
if valLoss.shape[0] >= 100 and abs(valLoss[-1] - valLoss[-5]) < 1e-5 and abs(trainLoss[-1] - trainLoss[-5]) < 1e-5:
iter = i + 1
break
[bi, di, si, mi] = [b_(k, k_max), d_(k), s_(k, k_max), m_(k)]
u = np.random.rand()
if u <= bi:
u_ = np.random.rand()
mu = Generate2(x_train)
if u_ <= A(Birth(x_train, Mu, y_train, mu, phi)):
if k == 0:
Mu = Mu.reshape(0)
mu = mu.reshape(d)
Mu = np.concatenate((Mu, mu)).reshape(1, d)
else:
Mu = np.concatenate((Mu, mu))
k = k + 1
else:
pass
elif u <= bi + di:
u_ = np.random.rand()
j = np.random.randint(0, k)
if u_ <= A(Death(x_train, Mu, y_train, j, phi)):
k = k - 1
Mu = np.concatenate((Mu[:j, :], Mu[j + 1:, :]))
else:
pass
elif u <= bi + di + si:
u_ = np.random.rand()
j = np.random.randint(0, k)
mu = Mu[j, :]
u_1 = np.random.rand(1, d)
mu1 = mu - u_1 * s
mu2 = mu + u_1 * s
if u_ <= A(Split(x_train, Mu, y_train, s, j, mu1, mu2, phi)):
k = k + 1
Mu = np.concatenate((Mu[:j, :], Mu[j + 1:, :], mu1, mu2))
else:
pass
elif u <= bi + di + si + mi:
u_ = np.random.rand()
j1 = np.random.randint(0, k)
mu1 = Mu[j1, :]
j2 = FindClosest(Mu, j1)
mu2 = Mu[j2, :]
[j1, j2] = [j2, j1] if j1 > j2 else [j1, j2]
if la.norm(j1 - j2) > 2 * s:
i -= 1
continue
mu = ((mu1 + mu2) * 0.5).reshape(1, d)
if u_ <= A(Merge(x_train, Mu, y_train, s, j1, j2, mu, phi)):
k = k - 1
Mu = np.concatenate((Mu[:j1, :], Mu[j1 + 1: j2, :], Mu[j2 + 1:, :], mu))
else:
pass
else:
if k == 1:
pass
else:
Mu = Update(x_train, Mu, y_train, phi)
if method == "RJSA":
# perform a MH step with the annealed acceptance ratio
Mu = SA3(x_train, y_train, i, Mu, Mu_old, phi)
elif method == "AIC":
alpha = Alpha(x_train, Mu, y_train, phi)
tao = Tao(x_train, Mu, y_train, phi)
alpha_old = Alpha(x_train, Mu_old, y_train, phi)
tao_old = Tao(x_train, Mu_old, y_train, phi)
Mu = AIC(x_train, y_train, Mu, Mu_old, alpha, alpha_old, tao, tao_old, phi)
elif method == "BIC":
alpha = Alpha(x_train, Mu, y_train, phi)
tao = Tao(x_train, Mu, y_train, phi)
alpha_old = Alpha(x_train, Mu_old, y_train, phi)
tao_old = Tao(x_train, Mu_old, y_train, phi)
Mu = BIC(x_train, y_train, Mu, Mu_old, alpha, alpha_old, tao, tao_old, phi)
mean_loss = loss_train*0.25+loss_val*0.75
if min_loss > mean_loss:
min_id = ii+1
min_loss = (loss_train+loss_val)/2
print("#%d \t valLoss=%.3f \t trainLoss=%.3f \t meanLoss=%.3f" % (ii+1, loss_val, loss_train, mean_loss))
# print("[use #{}] total loss={}".format(min_id, loss))
#
# if result.shape[0] > 0:
# result = np.concatenate((result, y_pred), axis=0)
# else:
# result = y_pred
#
#sio.savemat("../results/a.mat", {'a':result}) | 0.275422 | 0.358465 |
from saflow_params import FS_SUBJDIR, FOLDERPATH, SUBJ_LIST, BLOCS_LIST
import numpy as np
import os
import os.path as op
import mne
import scipy.io as sio
import h5py
from ephypype.import_data import write_hdf5
fwd_template = FOLDERPATH + '/sub-{subj}/ses-recording/meg/sub-{subj}_ses-recording_task-gradCPT_run-0{bloc}_meg_-epo-oct-6-fwd.fif'
sources_fp = FOLDERPATH
sources_template = FOLDERPATH + 'source_reconstruction_MNE_aparca2009s/inv_sol_pipeline/_run_id_run-0{bloc}_session_id_ses-recording_subject_id_sub-{subj}/inv_solution/sub-{subj}_ses-recording_task-gradCPT_run-0{bloc}_meg_-epo_stc.hdf5'
morphed_template = FOLDERPATH + 'source_reconstruction_MNE_aparca2009s/inv_sol_pipeline/_run_id_run-0{bloc}_session_id_ses-recording_subject_id_sub-{subj}/inv_solution/sub-{subj}_ses-recording_task-gradCPT_run-0{bloc}_meg_-epo_stcmorphed.hdf5'
fsaverage_fpath = op.join(FS_SUBJDIR, 'fsaverage/bem/fsaverage-oct-6-src.fif')
fsaverage_src = mne.read_source_spaces(fsaverage_fpath)
vertices_to = [s['vertno'] for s in fsaverage_src]
for subj in SUBJ_LIST:
for bloc in BLOCS_LIST:
fwd_fpath = fwd_template.format(subj=subj, bloc=bloc)
sources_fpath = sources_template.format(subj=subj, bloc=bloc)
morphed_fpath = morphed_template.format(subj=subj, bloc=bloc)
fwd = mne.read_forward_solution(fwd_fpath)
src = fwd['src']
surf_src = mne.source_space.SourceSpaces(fwd['src'][:2])
n_cortex = (src[0]['nuse'] + src[1]['nuse'])
try:
morph_surf = mne.compute_source_morph(
src=surf_src, subject_from='sub-{}'.format(subj), subject_to='fsaverage',
spacing=vertices_to, subjects_dir=FS_SUBJDIR)
except ValueError:
try:
morph_surf = mne.compute_source_morph(
src=surf_src, subject_from='SA{}'.format(subj), subject_to='fsaverage',
spacing=vertices_to, subjects_dir=FS_SUBJDIR)
except ValueError:
morph_surf = mne.compute_source_morph(
src=surf_src, subject_from='fsaverage', subject_to='fsaverage',
spacing=vertices_to, subjects_dir=FS_SUBJDIR)
with h5py.File(sources_fpath, 'r') as f:
a_group_key = list(f.keys())[0]
data = list(f[a_group_key])
morphed_data = []
morphed_dat = np.array([])
for i, dat in enumerate(data):
morphed_data.append(morphed_dat)
morphed_data[i] = morph_surf.morph_mat*dat
write_hdf5(morphed_fpath, morphed_data, dataset_name='stc_data', dtype='f')
del data, morphed_data | scripts/saflow_morph.py | from saflow_params import FS_SUBJDIR, FOLDERPATH, SUBJ_LIST, BLOCS_LIST
import numpy as np
import os
import os.path as op
import mne
import scipy.io as sio
import h5py
from ephypype.import_data import write_hdf5
fwd_template = FOLDERPATH + '/sub-{subj}/ses-recording/meg/sub-{subj}_ses-recording_task-gradCPT_run-0{bloc}_meg_-epo-oct-6-fwd.fif'
sources_fp = FOLDERPATH
sources_template = FOLDERPATH + 'source_reconstruction_MNE_aparca2009s/inv_sol_pipeline/_run_id_run-0{bloc}_session_id_ses-recording_subject_id_sub-{subj}/inv_solution/sub-{subj}_ses-recording_task-gradCPT_run-0{bloc}_meg_-epo_stc.hdf5'
morphed_template = FOLDERPATH + 'source_reconstruction_MNE_aparca2009s/inv_sol_pipeline/_run_id_run-0{bloc}_session_id_ses-recording_subject_id_sub-{subj}/inv_solution/sub-{subj}_ses-recording_task-gradCPT_run-0{bloc}_meg_-epo_stcmorphed.hdf5'
fsaverage_fpath = op.join(FS_SUBJDIR, 'fsaverage/bem/fsaverage-oct-6-src.fif')
fsaverage_src = mne.read_source_spaces(fsaverage_fpath)
vertices_to = [s['vertno'] for s in fsaverage_src]
for subj in SUBJ_LIST:
for bloc in BLOCS_LIST:
fwd_fpath = fwd_template.format(subj=subj, bloc=bloc)
sources_fpath = sources_template.format(subj=subj, bloc=bloc)
morphed_fpath = morphed_template.format(subj=subj, bloc=bloc)
fwd = mne.read_forward_solution(fwd_fpath)
src = fwd['src']
surf_src = mne.source_space.SourceSpaces(fwd['src'][:2])
n_cortex = (src[0]['nuse'] + src[1]['nuse'])
try:
morph_surf = mne.compute_source_morph(
src=surf_src, subject_from='sub-{}'.format(subj), subject_to='fsaverage',
spacing=vertices_to, subjects_dir=FS_SUBJDIR)
except ValueError:
try:
morph_surf = mne.compute_source_morph(
src=surf_src, subject_from='SA{}'.format(subj), subject_to='fsaverage',
spacing=vertices_to, subjects_dir=FS_SUBJDIR)
except ValueError:
morph_surf = mne.compute_source_morph(
src=surf_src, subject_from='fsaverage', subject_to='fsaverage',
spacing=vertices_to, subjects_dir=FS_SUBJDIR)
with h5py.File(sources_fpath, 'r') as f:
a_group_key = list(f.keys())[0]
data = list(f[a_group_key])
morphed_data = []
morphed_dat = np.array([])
for i, dat in enumerate(data):
morphed_data.append(morphed_dat)
morphed_data[i] = morph_surf.morph_mat*dat
write_hdf5(morphed_fpath, morphed_data, dataset_name='stc_data', dtype='f')
del data, morphed_data | 0.21036 | 0.099514 |
import csv
import os
from collections import Counter
from DemoV1 import get_txt_from_file_name
from age_gender_detect import age_gender_detect
Dic_Name = ["shot_1", "shot_2", "shot_3", "shot_4", "shot_5", "shot_6", "shot_7", "shot_8"]
All_shot_infor = []
uid_set = set()
for dic in Dic_Name:
shot_infor = []
dic_path = os.path.join(r"video_cut", dic)
files = os.listdir(dic_path)
uid_list = []
txt_list = []
new_files = []
for file in files:
uid = file.split(".myreel.")[0]
uid_list.append(uid)
uid_set.add(uid)
try:
txt = get_txt_from_file_name(os.path.join(dic_path, file))
txt_list.append(txt)
except:
txt_list.append('Video file is damaged or format is not supported.')
afile = os.path.join(dic_path, file)
new_files.append(afile)
shot_infor.append(uid_list)
shot_infor.append(txt_list)
shot_infor.append(new_files)
All_shot_infor.append(shot_infor)
with open('All_Txt.csv', 'w', encoding='UTF-8', newline="") as f:
csv.writer(f).writerow(
["uid", "@shot1", "@shot2", "@shot3", "@shot4", "@shot5", "@shot6", "@shot7", "@shot8", "@age", "@gender"])
for uid in uid_set:
line = uid + ":::@"
age_counter = Counter()
gender_counter = Counter()
for shot_infor in All_shot_infor:
if uid in shot_infor[0]:
idx = shot_infor[0].index(uid)
try:
age, gender, dic = age_gender_detect(shot_infor[2][idx])
age_counter = age_counter + dic['age']
gender_counter = gender_counter + dic['gender']
except:
continue
line = line + shot_infor[1][idx] + ':::@'
else:
line = line + "Shot missed:::@"
max_key = max(age_counter, key=age_counter.get)
max_key_gender = max(gender_counter, key=gender_counter.get)
line = line + str(max_key) + ":::@" + str(max_key_gender)
line_list = line.split(':::')
csv.writer(f).writerow(line_list)
f.close() | DemoV3_part1.py | import csv
import os
from collections import Counter
from DemoV1 import get_txt_from_file_name
from age_gender_detect import age_gender_detect
Dic_Name = ["shot_1", "shot_2", "shot_3", "shot_4", "shot_5", "shot_6", "shot_7", "shot_8"]
All_shot_infor = []
uid_set = set()
for dic in Dic_Name:
shot_infor = []
dic_path = os.path.join(r"video_cut", dic)
files = os.listdir(dic_path)
uid_list = []
txt_list = []
new_files = []
for file in files:
uid = file.split(".myreel.")[0]
uid_list.append(uid)
uid_set.add(uid)
try:
txt = get_txt_from_file_name(os.path.join(dic_path, file))
txt_list.append(txt)
except:
txt_list.append('Video file is damaged or format is not supported.')
afile = os.path.join(dic_path, file)
new_files.append(afile)
shot_infor.append(uid_list)
shot_infor.append(txt_list)
shot_infor.append(new_files)
All_shot_infor.append(shot_infor)
with open('All_Txt.csv', 'w', encoding='UTF-8', newline="") as f:
csv.writer(f).writerow(
["uid", "@shot1", "@shot2", "@shot3", "@shot4", "@shot5", "@shot6", "@shot7", "@shot8", "@age", "@gender"])
for uid in uid_set:
line = uid + ":::@"
age_counter = Counter()
gender_counter = Counter()
for shot_infor in All_shot_infor:
if uid in shot_infor[0]:
idx = shot_infor[0].index(uid)
try:
age, gender, dic = age_gender_detect(shot_infor[2][idx])
age_counter = age_counter + dic['age']
gender_counter = gender_counter + dic['gender']
except:
continue
line = line + shot_infor[1][idx] + ':::@'
else:
line = line + "Shot missed:::@"
max_key = max(age_counter, key=age_counter.get)
max_key_gender = max(gender_counter, key=gender_counter.get)
line = line + str(max_key) + ":::@" + str(max_key_gender)
line_list = line.split(':::')
csv.writer(f).writerow(line_list)
f.close() | 0.107087 | 0.106226 |
from Stream import *
from Operators import *
from examples_element_wrapper import print_stream
import numpy as np
from collections import namedtuple
from missing_data_multiple import *
from types import *
class Message():
def __init__(self, id, timestamp, content, category):
self.id = id
self.timestamp = timestamp
self.content = content
self.category = category
def divide_with_op(output_dic):
def divide_trans(input_streams, state=None):
start_req = input_streams[0].start
start_resp = input_streams[1].start
stop_req = input_streams[0].stop
stop_resp = input_streams[1].stop
request_stream = input_streams[0].list[start_req:stop_req]
response_stream = input_streams[1].list[start_resp:stop_resp]
list_categries = {}
keys = output_dic.keys()
for p in keys:
list_categries[p] = [[], []]
assert (len(list_categries) == len(keys))
for i in request_stream:
cat = i.category
list_categries[cat][0].append(i)
for j in response_stream:
cat1 = j.category
list_categries[cat1][1].append(j)
for p in list_categries.keys():
output_dic[p][0].extend(list_categries[p][0])
output_dic[p][1].extend(list_categries[p][1])
start_req += len(request_stream)
start_resp += len(response_stream)
return [], None, [start_req, start_resp]
return divide_trans
class MissingData_multistream():
def __init__(self, main_req_stream, main_resp_stream, categories, delay):
self.num_categories = len(categories)
assert (self.num_categories > 0)
self.main_req_stream = main_req_stream
self.main_resp_stream = main_resp_stream
dic = {}
for i in categories:
req_stream = Stream("Request_Category_" + str(i))
resp_stream = Stream("Response_Category_" + str(i))
timely_stream = Stream("Timely_Category_" + str(i))
untimely_stream = Stream("Untimely_Category_" + str(i))
dic.update({i: [req_stream, resp_stream, timely_stream,
untimely_stream,
MissingData(req_stream, resp_stream, timely_stream,
untimely_stream, delay[i])]})
self.struct_dic = dic
# assert (self.struct_dic.keys()==categories)
# assert (len(dic[categories[0]])==5)
# assert (len(self.struct_dic)==self.num_categories)
self.func1 = divide_with_op(self.struct_dic)
self.divideAgent = Agent([self.main_req_stream, self.main_resp_stream],
[],
self.func1, None) | Missing_Data_Section/missing_data_multiplestreams.py | from Stream import *
from Operators import *
from examples_element_wrapper import print_stream
import numpy as np
from collections import namedtuple
from missing_data_multiple import *
from types import *
class Message():
def __init__(self, id, timestamp, content, category):
self.id = id
self.timestamp = timestamp
self.content = content
self.category = category
def divide_with_op(output_dic):
def divide_trans(input_streams, state=None):
start_req = input_streams[0].start
start_resp = input_streams[1].start
stop_req = input_streams[0].stop
stop_resp = input_streams[1].stop
request_stream = input_streams[0].list[start_req:stop_req]
response_stream = input_streams[1].list[start_resp:stop_resp]
list_categries = {}
keys = output_dic.keys()
for p in keys:
list_categries[p] = [[], []]
assert (len(list_categries) == len(keys))
for i in request_stream:
cat = i.category
list_categries[cat][0].append(i)
for j in response_stream:
cat1 = j.category
list_categries[cat1][1].append(j)
for p in list_categries.keys():
output_dic[p][0].extend(list_categries[p][0])
output_dic[p][1].extend(list_categries[p][1])
start_req += len(request_stream)
start_resp += len(response_stream)
return [], None, [start_req, start_resp]
return divide_trans
class MissingData_multistream():
def __init__(self, main_req_stream, main_resp_stream, categories, delay):
self.num_categories = len(categories)
assert (self.num_categories > 0)
self.main_req_stream = main_req_stream
self.main_resp_stream = main_resp_stream
dic = {}
for i in categories:
req_stream = Stream("Request_Category_" + str(i))
resp_stream = Stream("Response_Category_" + str(i))
timely_stream = Stream("Timely_Category_" + str(i))
untimely_stream = Stream("Untimely_Category_" + str(i))
dic.update({i: [req_stream, resp_stream, timely_stream,
untimely_stream,
MissingData(req_stream, resp_stream, timely_stream,
untimely_stream, delay[i])]})
self.struct_dic = dic
# assert (self.struct_dic.keys()==categories)
# assert (len(dic[categories[0]])==5)
# assert (len(self.struct_dic)==self.num_categories)
self.func1 = divide_with_op(self.struct_dic)
self.divideAgent = Agent([self.main_req_stream, self.main_resp_stream],
[],
self.func1, None) | 0.498535 | 0.162347 |
import math
import torch
import torch.nn.functional as F
import numpy as np
import cv2
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import matplotlib.pyplot as plt
from torch_scatter import scatter_mean
imgs = torch.load('attns/images0.pt', map_location='cpu')
attns = torch.load('attns/attn0.pt', map_location='cpu')
qks = torch.load('attns/qks0.pt', map_location='cpu')
masks = torch.load('attns/masks0.pt', map_location='cpu')
distances = torch.load('attns/distances0.pt', map_location='cpu')
b,_,h,w = imgs.shape
def unnormalize(img):
mean = torch.Tensor(IMAGENET_DEFAULT_MEAN)
std = torch.Tensor(IMAGENET_DEFAULT_STD)
mean=(-mean/std)
std=1./std
img = (img-mean)/std
return img
def display_img(img):
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def save_img(fname,img):
cv2.imwrite(fname,img)
def gather_data():
cor_aq_list = list()
cor_ad_list = list()
layers = list()
blocks = list()
annos = list()
block_count = 0
for i in range(len(attns)):
attn = attns[i]
qk_ = qks[i]
mask_ = masks[i]
distance = distances[i]
for j in range(len(attn)):
at = attn[j] # (bxn_w) x num_heads x num_query x num_key
qk = qk_[j] # (bxn_w) x num_heads x num_query x num_key
mask = mask_[j] # nw x n x n
dis = distance[j] # n x n
at = at.reshape(b,-1,at.shape[1],at.shape[2],at.shape[3]) # b x num_window x num_head x num_query x num_key
_,num_window, num_head, num_query, num_key = at.shape
qk = qk.reshape(b,-1,num_head, num_query, num_key)
if mask is not None:
mask = mask.unsqueeze(0).unsqueeze(2).expand(b,-1,num_head,-1,-1).contiguous() # b x num_window x num_head x num_query x num_key
dis = dis.reshape(1,1,1,num_query, num_key).expand(b,num_window,num_head,-1,-1).contiguous()
for h in range(num_head):
a = at[:,:,h] # b x num_window x num_query x num_key
q = qk[:,:,h]
if mask is not None:
m = mask[:,:,h]
d = dis[:,:,h]
if mask is not None:
# remove point where mask is -100
valid_idx = (m==0).long().nonzero(as_tuple=True)
a = a[valid_idx] # N
q = q[valid_idx]
d = d[valid_idx]
else:
a = a.reshape(-1)
q = q.reshape(-1)
d = d.reshape(-1)
cor_aq = torch.corrcoef(torch.stack([a,q],dim=0))[0,1].item()
cor_ad = torch.corrcoef(torch.stack([a,d],dim=0))[0,1].item()
cor_aq_list.append(cor_aq)
cor_ad_list.append(cor_ad)
layers.append(i)
blocks.append(block_count)
'''
# draw the graph
q_min = q.min().item()
q_max = q.max().item()
d_min = d.min().item()
d_max = d.max().item()
# discretize q and d
q_grain = 100
q_disc = (q-q_min) / (q_max-q_min)
q_disc = (q_disc * q_grain).long()
d_grain = 50
d_disc = (d-d_min) / (d_max-d_min)
d_disc = (d_disc * d_grain).long()
a_map = torch.zeros((q_grain+1) * (d_grain+1))
scatter_idx = q_disc*(d_grain+1) + d_disc
scatter_mean(src = a, index = scatter_idx, dim = 0, out = a_map)
a_map = a_map.reshape(q_grain+1, d_grain+1)
x, y = np.meshgrid(np.linspace(d_min, d_max, d_grain+1), np.linspace(q_min, q_max, q_grain+1))
z = a_map.numpy()
z_min, z_max = np.abs(z).min(), np.abs(z).max()
fig, ax = plt.subplots()
c = ax.pcolormesh(x, y, z, cmap='Blues', vmin=z_min, vmax=z_max)
ax.set_title('Average Attention\nlayer '+str(i)+' block '+str(j)+' head '+str(h))
ax.axis([x.min(), x.max(), y.min(), y.max()])
ax.set_ylabel('Normalized QK')
ax.set_xlabel('Relative Distance')
fig.colorbar(c, ax=ax)
#plt.show()
plt.savefig('attn_dist/'+'layer'+str(i)+'block'+str(j)+'head'+str(h)+'.png')
'''
block_count += 1
annos.append(str(i)+', '+str(j))
# draw the scatter plot
cor_aq_list = np.asarray(cor_aq_list)
cor_ad_list = np.asarray(cor_ad_list)
fig, ax = plt.subplots()
#scatter = ax.scatter(cor_ad_list, cor_aq_list, c=layers)
scatter = ax.scatter(cor_ad_list, cor_aq_list, c=blocks)
legend = ax.legend(scatter.legend_elements()[0], annos,
title="layer, block")
ax.add_artist(legend)
ax.set_title('Correlation coefficient between attention and QK and distance')
ax.set_ylabel('correlation coefficient between QK and attention')
ax.set_xlabel('correlation coefficient between distance and attention')
plt.show()
if __name__ == '__main__':
gather_data() | attn_distribution.py | import math
import torch
import torch.nn.functional as F
import numpy as np
import cv2
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import matplotlib.pyplot as plt
from torch_scatter import scatter_mean
imgs = torch.load('attns/images0.pt', map_location='cpu')
attns = torch.load('attns/attn0.pt', map_location='cpu')
qks = torch.load('attns/qks0.pt', map_location='cpu')
masks = torch.load('attns/masks0.pt', map_location='cpu')
distances = torch.load('attns/distances0.pt', map_location='cpu')
b,_,h,w = imgs.shape
def unnormalize(img):
mean = torch.Tensor(IMAGENET_DEFAULT_MEAN)
std = torch.Tensor(IMAGENET_DEFAULT_STD)
mean=(-mean/std)
std=1./std
img = (img-mean)/std
return img
def display_img(img):
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def save_img(fname,img):
cv2.imwrite(fname,img)
def gather_data():
cor_aq_list = list()
cor_ad_list = list()
layers = list()
blocks = list()
annos = list()
block_count = 0
for i in range(len(attns)):
attn = attns[i]
qk_ = qks[i]
mask_ = masks[i]
distance = distances[i]
for j in range(len(attn)):
at = attn[j] # (bxn_w) x num_heads x num_query x num_key
qk = qk_[j] # (bxn_w) x num_heads x num_query x num_key
mask = mask_[j] # nw x n x n
dis = distance[j] # n x n
at = at.reshape(b,-1,at.shape[1],at.shape[2],at.shape[3]) # b x num_window x num_head x num_query x num_key
_,num_window, num_head, num_query, num_key = at.shape
qk = qk.reshape(b,-1,num_head, num_query, num_key)
if mask is not None:
mask = mask.unsqueeze(0).unsqueeze(2).expand(b,-1,num_head,-1,-1).contiguous() # b x num_window x num_head x num_query x num_key
dis = dis.reshape(1,1,1,num_query, num_key).expand(b,num_window,num_head,-1,-1).contiguous()
for h in range(num_head):
a = at[:,:,h] # b x num_window x num_query x num_key
q = qk[:,:,h]
if mask is not None:
m = mask[:,:,h]
d = dis[:,:,h]
if mask is not None:
# remove point where mask is -100
valid_idx = (m==0).long().nonzero(as_tuple=True)
a = a[valid_idx] # N
q = q[valid_idx]
d = d[valid_idx]
else:
a = a.reshape(-1)
q = q.reshape(-1)
d = d.reshape(-1)
cor_aq = torch.corrcoef(torch.stack([a,q],dim=0))[0,1].item()
cor_ad = torch.corrcoef(torch.stack([a,d],dim=0))[0,1].item()
cor_aq_list.append(cor_aq)
cor_ad_list.append(cor_ad)
layers.append(i)
blocks.append(block_count)
'''
# draw the graph
q_min = q.min().item()
q_max = q.max().item()
d_min = d.min().item()
d_max = d.max().item()
# discretize q and d
q_grain = 100
q_disc = (q-q_min) / (q_max-q_min)
q_disc = (q_disc * q_grain).long()
d_grain = 50
d_disc = (d-d_min) / (d_max-d_min)
d_disc = (d_disc * d_grain).long()
a_map = torch.zeros((q_grain+1) * (d_grain+1))
scatter_idx = q_disc*(d_grain+1) + d_disc
scatter_mean(src = a, index = scatter_idx, dim = 0, out = a_map)
a_map = a_map.reshape(q_grain+1, d_grain+1)
x, y = np.meshgrid(np.linspace(d_min, d_max, d_grain+1), np.linspace(q_min, q_max, q_grain+1))
z = a_map.numpy()
z_min, z_max = np.abs(z).min(), np.abs(z).max()
fig, ax = plt.subplots()
c = ax.pcolormesh(x, y, z, cmap='Blues', vmin=z_min, vmax=z_max)
ax.set_title('Average Attention\nlayer '+str(i)+' block '+str(j)+' head '+str(h))
ax.axis([x.min(), x.max(), y.min(), y.max()])
ax.set_ylabel('Normalized QK')
ax.set_xlabel('Relative Distance')
fig.colorbar(c, ax=ax)
#plt.show()
plt.savefig('attn_dist/'+'layer'+str(i)+'block'+str(j)+'head'+str(h)+'.png')
'''
block_count += 1
annos.append(str(i)+', '+str(j))
# draw the scatter plot
cor_aq_list = np.asarray(cor_aq_list)
cor_ad_list = np.asarray(cor_ad_list)
fig, ax = plt.subplots()
#scatter = ax.scatter(cor_ad_list, cor_aq_list, c=layers)
scatter = ax.scatter(cor_ad_list, cor_aq_list, c=blocks)
legend = ax.legend(scatter.legend_elements()[0], annos,
title="layer, block")
ax.add_artist(legend)
ax.set_title('Correlation coefficient between attention and QK and distance')
ax.set_ylabel('correlation coefficient between QK and attention')
ax.set_xlabel('correlation coefficient between distance and attention')
plt.show()
if __name__ == '__main__':
gather_data() | 0.426322 | 0.441071 |
from manga_py.provider import Provider
from .helpers.std import Std
class MangaPandaCom(Provider, Std):
_cdn = 'https://img.mghubcdn.com/file/imghub'
_api_url = 'https://api.mghubcdn.com/graphql'
def get_chapter_index(self) -> str:
return self.chapter_for_json()
def get_content(self):
return self._get_content('{}/manga/{}')
def get_manga_name(self) -> str:
return self._get_name(r'\.\w{2,7}/manga/([^/]+)')
def get_chapters(self):
# curl 'https://api.mghubcdn.com/graphql' -H 'Content-Type: application/json' --data-raw '{"query": "{latestPopular(x:mr02){id}manga(x:mr02,slug:\"mahou-tsukai-no-yome\"){id,rank,title,slug,status,image,latestChapter,author,artist,genres,description,alternativeTitle,mainSlug,isYaoi,isPorn,isSoftPorn,unauthFile,noCoverAd,isLicensed,createdDate,updatedDate,chapters{id,number,title,slug,date}}}"}'
keys = [
'id', 'rank', 'title', 'slug', 'status', 'chapters'
]
data = self._api(
r'{manga(x:mr02,slug:"%(name)s"){%(keys)s{id,number,title,slug,date}}}'
% {'name': self.manga_name, 'keys': ','.join(keys)}
)
chapters = data.get('data', {}).get('manga', {}).get('chapters', []) # type: list
return chapters[::-1]
def get_files(self):
data = self._api(
r'{chapter(x:mr02,slug:"%(name)s",number:%(number)f){id,title,mangaID,number,slug,date,pages}}'
% {'name': self.manga_name, 'number': self.chapter['number']}
)
images = data.get('data', {}).get('chapter', {}).get('pages', [])
if type(images) == str:
images = self.json.loads(images) # type: list
return list(map(lambda x: '{}/{}'.format(self._cdn, images[x]), images))
def get_cover(self):
return self._cover_from_content('.manga-thumb')
def chapter_for_json(self) -> str:
number = str(self.chapter['number']).replace('.', '-')
slug = self.chapter['slug']
return '{}_{}'.format(number, slug)
def _api(self, query: str) -> dict:
response = self.http().requests(self._api_url, method='post', data={
"query": query,
}, headers={
'Accept': 'application/json',
})
return response.json()
main = MangaPandaCom | manga_py/providers/mangapanda_onl.py | from manga_py.provider import Provider
from .helpers.std import Std
class MangaPandaCom(Provider, Std):
_cdn = 'https://img.mghubcdn.com/file/imghub'
_api_url = 'https://api.mghubcdn.com/graphql'
def get_chapter_index(self) -> str:
return self.chapter_for_json()
def get_content(self):
return self._get_content('{}/manga/{}')
def get_manga_name(self) -> str:
return self._get_name(r'\.\w{2,7}/manga/([^/]+)')
def get_chapters(self):
# curl 'https://api.mghubcdn.com/graphql' -H 'Content-Type: application/json' --data-raw '{"query": "{latestPopular(x:mr02){id}manga(x:mr02,slug:\"mahou-tsukai-no-yome\"){id,rank,title,slug,status,image,latestChapter,author,artist,genres,description,alternativeTitle,mainSlug,isYaoi,isPorn,isSoftPorn,unauthFile,noCoverAd,isLicensed,createdDate,updatedDate,chapters{id,number,title,slug,date}}}"}'
keys = [
'id', 'rank', 'title', 'slug', 'status', 'chapters'
]
data = self._api(
r'{manga(x:mr02,slug:"%(name)s"){%(keys)s{id,number,title,slug,date}}}'
% {'name': self.manga_name, 'keys': ','.join(keys)}
)
chapters = data.get('data', {}).get('manga', {}).get('chapters', []) # type: list
return chapters[::-1]
def get_files(self):
data = self._api(
r'{chapter(x:mr02,slug:"%(name)s",number:%(number)f){id,title,mangaID,number,slug,date,pages}}'
% {'name': self.manga_name, 'number': self.chapter['number']}
)
images = data.get('data', {}).get('chapter', {}).get('pages', [])
if type(images) == str:
images = self.json.loads(images) # type: list
return list(map(lambda x: '{}/{}'.format(self._cdn, images[x]), images))
def get_cover(self):
return self._cover_from_content('.manga-thumb')
def chapter_for_json(self) -> str:
number = str(self.chapter['number']).replace('.', '-')
slug = self.chapter['slug']
return '{}_{}'.format(number, slug)
def _api(self, query: str) -> dict:
response = self.http().requests(self._api_url, method='post', data={
"query": query,
}, headers={
'Accept': 'application/json',
})
return response.json()
main = MangaPandaCom | 0.434581 | 0.222288 |
import re
from discord import Member
from typing import Union, List, Dict, Pattern # Type hints
"""
Constants split into classes. Admin roles have extra information for rank comparison. Channels are combined into
lists in the Section class.
"""
class RePattern:
RP_NAME = re.compile("^[A-Z][a-z]+_[A-Z]{1,2}([a-z][A-Z])?[a-z]+(_[A-Z]{1,2}([a-z][A-Z])?[a-z]+)?$")
@staticmethod
def contains_pattern(pattern: Pattern, phrase: str):
contains = bool(re.search(pattern, phrase))
return contains
class Role:
EXECUTIVE = {'name': 'Executive', 'id': '465896094333927424', 'level': 99999}
HEAD = {'name': 'Head', 'id': '465894668094144512', 'level': 1337}
SENIOR = {'name': 'Senior', 'id': '465896014130184192', 'level': 4}
GENERAL = {'name': 'General', 'id': '465887716354031624', 'level': 3}
JUNIOR = {'name': 'Junior', 'id': '465896256972128266', 'level': 2}
PROBIE = {'name': 'Probie', 'id': '475211931905556490', 'level': 1}
ADMINISTRATOR = {'name': 'Administrator', 'id': '465874213324980244', 'level': 0}
ADMIN_ROLES = [EXECUTIVE, HEAD, SENIOR, GENERAL, JUNIOR, PROBIE, ADMINISTRATOR]
HELPER = {'name': 'Helper', 'id': '465874370904981514', 'level': -1}
DEVELOPER = {'name': 'Developer', 'id': '465874671733309440', 'level': -1}
TESTER = {'name': 'Tester', 'id': '465874643337740290', 'level': -1}
@staticmethod
def has_roles(author: Member, role_list: List[Dict[str, Union[str, int]]]) -> bool:
for role in role_list:
has_current_role = False
for author_role in author.roles:
if author_role.id == role['id']:
has_current_role = True
if not has_current_role:
return False
return True
@staticmethod
def is_admin(author: Member) -> bool:
if Role.get_level(author) >= 0:
return True
else:
return False
@staticmethod
def get_level(author: Member) -> int:
"""
Returns the admin level of a Discord member. Returns -1 if not an admin
"""
level = -1 # Not an admin
for role in author.roles:
for admin_role in Role.ADMIN_ROLES:
if role.id == admin_role['id']:
if admin_role['level'] > level:
level = admin_role['level']
return level
@staticmethod
def get_rank(author: Member) -> Union[str, None]:
"""
Returns the admin rank of a Discord member or None if they are not an admin
"""
level = -1
rank = None
for role in author.roles:
for admin_role in Role.ADMIN_ROLES:
if role.id == admin_role['id']:
if admin_role['level'] > level:
rank = admin_role['name']
level = admin_role['level']
if level == -1:
return None
return rank
class Channel:
# IDs for every channel in the server
# HELP/GENERAL
GENERAL = '465873343518736397'
SUGGESTIONS = '478404721112252436'
# ADMINISTRATORS
DISCUSSION_ADMIN = '466404751857287178'
CHAT = '465873855672745985'
COMMANDS = '465875438321795074'
QUICK_NOTES = '484733912581406720'
# HELPERS
DISCUSSION_HELPER = '466420981813215232'
NEWBIE = '465874164960460800'
# DEVELOPMENT
GITHUB_BOT_UPDATES = '480330228569210891'
BOT_TODO = '466949031898382356'
TODO = '467909781126905857'
BUGS = '65879591656095754'
CONFIRMED_BUGS = '471553508118626315'
DEVELOPERS = '465873842318082049'
TESTERS = '465874413267582986'
class Section:
# Lists of channel IDs in each section
HELP_GENERAL = [Channel.GENERAL, Channel.SUGGESTIONS]
ADMINISTRATORS = [Channel.DISCUSSION_ADMIN, Channel.CHAT, Channel.COMMANDS, Channel.QUICK_NOTES]
HELPERS = [Channel.DISCUSSION_HELPER, Channel.NEWBIE]
DEVELOPMENT = [Channel.GITHUB_BOT_UPDATES, Channel.BOT_TODO, Channel.TODO, Channel.BUGS, Channel.CONFIRMED_BUGS,
Channel.DEVELOPERS, Channel.TESTERS]
@staticmethod
def in_sections(channel_id: str, section_list: List[List[str]]) -> bool:
"""
Verifies a Discord message was posted in the correct section(s) by taking in a list of lists containing
channel IDs
"""
for section in section_list:
for section_channel_id in section:
if channel_id == section_channel_id:
return True
return False | server/cmds/constants.py | import re
from discord import Member
from typing import Union, List, Dict, Pattern # Type hints
"""
Constants split into classes. Admin roles have extra information for rank comparison. Channels are combined into
lists in the Section class.
"""
class RePattern:
RP_NAME = re.compile("^[A-Z][a-z]+_[A-Z]{1,2}([a-z][A-Z])?[a-z]+(_[A-Z]{1,2}([a-z][A-Z])?[a-z]+)?$")
@staticmethod
def contains_pattern(pattern: Pattern, phrase: str):
contains = bool(re.search(pattern, phrase))
return contains
class Role:
EXECUTIVE = {'name': 'Executive', 'id': '465896094333927424', 'level': 99999}
HEAD = {'name': 'Head', 'id': '465894668094144512', 'level': 1337}
SENIOR = {'name': 'Senior', 'id': '465896014130184192', 'level': 4}
GENERAL = {'name': 'General', 'id': '465887716354031624', 'level': 3}
JUNIOR = {'name': 'Junior', 'id': '465896256972128266', 'level': 2}
PROBIE = {'name': 'Probie', 'id': '475211931905556490', 'level': 1}
ADMINISTRATOR = {'name': 'Administrator', 'id': '465874213324980244', 'level': 0}
ADMIN_ROLES = [EXECUTIVE, HEAD, SENIOR, GENERAL, JUNIOR, PROBIE, ADMINISTRATOR]
HELPER = {'name': 'Helper', 'id': '465874370904981514', 'level': -1}
DEVELOPER = {'name': 'Developer', 'id': '465874671733309440', 'level': -1}
TESTER = {'name': 'Tester', 'id': '465874643337740290', 'level': -1}
@staticmethod
def has_roles(author: Member, role_list: List[Dict[str, Union[str, int]]]) -> bool:
for role in role_list:
has_current_role = False
for author_role in author.roles:
if author_role.id == role['id']:
has_current_role = True
if not has_current_role:
return False
return True
@staticmethod
def is_admin(author: Member) -> bool:
if Role.get_level(author) >= 0:
return True
else:
return False
@staticmethod
def get_level(author: Member) -> int:
"""
Returns the admin level of a Discord member. Returns -1 if not an admin
"""
level = -1 # Not an admin
for role in author.roles:
for admin_role in Role.ADMIN_ROLES:
if role.id == admin_role['id']:
if admin_role['level'] > level:
level = admin_role['level']
return level
@staticmethod
def get_rank(author: Member) -> Union[str, None]:
"""
Returns the admin rank of a Discord member or None if they are not an admin
"""
level = -1
rank = None
for role in author.roles:
for admin_role in Role.ADMIN_ROLES:
if role.id == admin_role['id']:
if admin_role['level'] > level:
rank = admin_role['name']
level = admin_role['level']
if level == -1:
return None
return rank
class Channel:
# IDs for every channel in the server
# HELP/GENERAL
GENERAL = '465873343518736397'
SUGGESTIONS = '478404721112252436'
# ADMINISTRATORS
DISCUSSION_ADMIN = '466404751857287178'
CHAT = '465873855672745985'
COMMANDS = '465875438321795074'
QUICK_NOTES = '484733912581406720'
# HELPERS
DISCUSSION_HELPER = '466420981813215232'
NEWBIE = '465874164960460800'
# DEVELOPMENT
GITHUB_BOT_UPDATES = '480330228569210891'
BOT_TODO = '466949031898382356'
TODO = '467909781126905857'
BUGS = '65879591656095754'
CONFIRMED_BUGS = '471553508118626315'
DEVELOPERS = '465873842318082049'
TESTERS = '465874413267582986'
class Section:
# Lists of channel IDs in each section
HELP_GENERAL = [Channel.GENERAL, Channel.SUGGESTIONS]
ADMINISTRATORS = [Channel.DISCUSSION_ADMIN, Channel.CHAT, Channel.COMMANDS, Channel.QUICK_NOTES]
HELPERS = [Channel.DISCUSSION_HELPER, Channel.NEWBIE]
DEVELOPMENT = [Channel.GITHUB_BOT_UPDATES, Channel.BOT_TODO, Channel.TODO, Channel.BUGS, Channel.CONFIRMED_BUGS,
Channel.DEVELOPERS, Channel.TESTERS]
@staticmethod
def in_sections(channel_id: str, section_list: List[List[str]]) -> bool:
"""
Verifies a Discord message was posted in the correct section(s) by taking in a list of lists containing
channel IDs
"""
for section in section_list:
for section_channel_id in section:
if channel_id == section_channel_id:
return True
return False | 0.636805 | 0.215702 |
from Utils.log_handler import log_to_console
from Application.Frame.port import Port
"""
Module handles the manipulation of transfer ports for the APPL block
"""
# Dictionaries to hold the transfer ports for each waves
portsDict = []
NR_WAVES = 0
ACTIVE_WAVE = 0
def create_ports_dict(nr_waves: int) -> None:
"""
Creates the transfer ports list for a wave
:return: None
"""
global portsDict, NR_WAVES
NR_WAVES = nr_waves
for wave in range(nr_waves + 1):
portsDict.append(dict())
def add_port(name: str, size: int, port_type: str, is_image: bool, wave: int = 0) -> None:
"""
Add a port in the port dictionary.
The port is automatically added to all slots for waves
:param name: name of the port
:param size: size of the port
:param port_type: type of the port
:param is_image: if arr is an image
:param wave: wave of the port
:return: None
"""
portsDict[wave][name] = Port(name=name, size=size, port_type=port_type, is_image=is_image)
def reshape_ports(size_array: list) -> None:
"""
Add a port in the port dictionary.
The port is automatically added to all slots for waves
:param size_array: list of resolutions of levels.
:return: None
"""
for el in portsDict[ACTIVE_WAVE].keys():
if 'LC' not in el:
port_to_change = portsDict[ACTIVE_WAVE][el]
if port_to_change.get_is_image() is True:
channels = len(port_to_change.arr.shape)
level_to_change = int(port_to_change.name[-1])
if channels == 2:
port_to_change.reshape_arr(size_new_array=(size_array[level_to_change][0], size_array[level_to_change][1]),
type_new_array=port_to_change.arr.dtype)
if channels == 3:
port_to_change.reshape_arr(size_new_array=(size_array[level_to_change][0], size_array[level_to_change][1], channels),
type_new_array=port_to_change.arr.dtype)
def exist_port(name: str) -> bool:
"""
Check if ports exists.
:param name: port name
:return: if ports exists
"""
return name in portsDict[ACTIVE_WAVE].keys()
def get_port_from_wave(name: str, wave_offset: int = 0) -> Port:
"""
Get port from a specific wave
:param name: name of the port
:param wave_offset: offset to current wave
:return: Corresponding port of current wave
"""
global NR_WAVES
return portsDict[(ACTIVE_WAVE - wave_offset) % NR_WAVES].get(name)
def prepare_ports_new_wave(frame: int) -> None:
"""
Make internal mechanism for changing slots to work on new wave
:return: None
"""
global ACTIVE_WAVE, NR_WAVES
ACTIVE_WAVE = frame % NR_WAVES
for el in portsDict[ACTIVE_WAVE].keys():
port_to_change = portsDict[ACTIVE_WAVE][el]
port_to_change.self_reset()
def set_invalid_ports_of_job(ports: list) -> None:
"""
Set's invalid all the ports in the list
:param ports: list of ports
:return: None
"""
for port in ports:
portsDict[ACTIVE_WAVE][port[0]].reset_valid_flag()
def debug_ports_job(port_type: str, ports: list) -> None:
"""
Debug information for ports.
:param port_type: Type of port
:param ports: port
"""
log_to_console(port_type)
# print(ports)
if port_type == 'input':
for port in ports:
log_to_console("PORT: {port:150s} is in STATE: {state}".format(port=portsDict[ACTIVE_WAVE][port].name,
state=portsDict[ACTIVE_WAVE][port].isValid))
else:
for port in ports:
log_to_console("PORT: {port:150s} is in STATE: {state}".format(port=portsDict[ACTIVE_WAVE][port[0]].name,
state=portsDict[ACTIVE_WAVE][port[0]].isValid))
def log_to_console_exchange_ports() -> None:
"""
Logs to console all the ports created
:return:None
"""
global NR_WAVES
log_to_console("Exchange ports created:")
for wave in range(NR_WAVES):
log_to_console('WAVE: {wave}'.format(wave=str(wave)))
for port, memory in portsDict[wave].items():
log_to_console('PORT: {port:150} at ADDRESS: {size:100}'.format(port=str(port), size=str(memory)))
if __name__ == "__main__":
pass | Application/Frame/transferJobPorts.py | from Utils.log_handler import log_to_console
from Application.Frame.port import Port
"""
Module handles the manipulation of transfer ports for the APPL block
"""
# Dictionaries to hold the transfer ports for each waves
portsDict = []
NR_WAVES = 0
ACTIVE_WAVE = 0
def create_ports_dict(nr_waves: int) -> None:
"""
Creates the transfer ports list for a wave
:return: None
"""
global portsDict, NR_WAVES
NR_WAVES = nr_waves
for wave in range(nr_waves + 1):
portsDict.append(dict())
def add_port(name: str, size: int, port_type: str, is_image: bool, wave: int = 0) -> None:
"""
Add a port in the port dictionary.
The port is automatically added to all slots for waves
:param name: name of the port
:param size: size of the port
:param port_type: type of the port
:param is_image: if arr is an image
:param wave: wave of the port
:return: None
"""
portsDict[wave][name] = Port(name=name, size=size, port_type=port_type, is_image=is_image)
def reshape_ports(size_array: list) -> None:
"""
Add a port in the port dictionary.
The port is automatically added to all slots for waves
:param size_array: list of resolutions of levels.
:return: None
"""
for el in portsDict[ACTIVE_WAVE].keys():
if 'LC' not in el:
port_to_change = portsDict[ACTIVE_WAVE][el]
if port_to_change.get_is_image() is True:
channels = len(port_to_change.arr.shape)
level_to_change = int(port_to_change.name[-1])
if channels == 2:
port_to_change.reshape_arr(size_new_array=(size_array[level_to_change][0], size_array[level_to_change][1]),
type_new_array=port_to_change.arr.dtype)
if channels == 3:
port_to_change.reshape_arr(size_new_array=(size_array[level_to_change][0], size_array[level_to_change][1], channels),
type_new_array=port_to_change.arr.dtype)
def exist_port(name: str) -> bool:
"""
Check if ports exists.
:param name: port name
:return: if ports exists
"""
return name in portsDict[ACTIVE_WAVE].keys()
def get_port_from_wave(name: str, wave_offset: int = 0) -> Port:
"""
Get port from a specific wave
:param name: name of the port
:param wave_offset: offset to current wave
:return: Corresponding port of current wave
"""
global NR_WAVES
return portsDict[(ACTIVE_WAVE - wave_offset) % NR_WAVES].get(name)
def prepare_ports_new_wave(frame: int) -> None:
"""
Make internal mechanism for changing slots to work on new wave
:return: None
"""
global ACTIVE_WAVE, NR_WAVES
ACTIVE_WAVE = frame % NR_WAVES
for el in portsDict[ACTIVE_WAVE].keys():
port_to_change = portsDict[ACTIVE_WAVE][el]
port_to_change.self_reset()
def set_invalid_ports_of_job(ports: list) -> None:
"""
Set's invalid all the ports in the list
:param ports: list of ports
:return: None
"""
for port in ports:
portsDict[ACTIVE_WAVE][port[0]].reset_valid_flag()
def debug_ports_job(port_type: str, ports: list) -> None:
"""
Debug information for ports.
:param port_type: Type of port
:param ports: port
"""
log_to_console(port_type)
# print(ports)
if port_type == 'input':
for port in ports:
log_to_console("PORT: {port:150s} is in STATE: {state}".format(port=portsDict[ACTIVE_WAVE][port].name,
state=portsDict[ACTIVE_WAVE][port].isValid))
else:
for port in ports:
log_to_console("PORT: {port:150s} is in STATE: {state}".format(port=portsDict[ACTIVE_WAVE][port[0]].name,
state=portsDict[ACTIVE_WAVE][port[0]].isValid))
def log_to_console_exchange_ports() -> None:
"""
Logs to console all the ports created
:return:None
"""
global NR_WAVES
log_to_console("Exchange ports created:")
for wave in range(NR_WAVES):
log_to_console('WAVE: {wave}'.format(wave=str(wave)))
for port, memory in portsDict[wave].items():
log_to_console('PORT: {port:150} at ADDRESS: {size:100}'.format(port=str(port), size=str(memory)))
if __name__ == "__main__":
pass | 0.473901 | 0.41324 |
import asyncio
import json
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.eventloop.defaults import use_asyncio_event_loop
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts.prompt import PromptSession
from prompt_toolkit.styles import Style
from app.client_base import ClientBase
from app.ib_config import IbConfig, loadConfig
from app.server.protocols import (
ResponseStatus,
kWorkerTypeConsole,
ConsoleCommandRequest,
ConsoleCommandResponse,
)
from app.utils.log import Log
completer = WordCompleter([
'find_symbols', 'subscribe_market', 'unsubscribe_market',
'subscribe_market_depth', 'unsubscribe_market_depth', 'order',
'cancel_order', 'contract', 'orders', 'portfolio', 'cash',
'list_strategies', 'list_running_strategies', 'start_strategy',
'stop_strategy', 'run_mock_strategy'], ignore_case=True)
style = Style.from_dict({
'completion-menu.completion': 'bg:#008888 #ffffff',
'completion-menu.completion.current': 'bg:#00aaaa #000000',
'scrollbar.background': 'bg:#88aaaa',
'scrollbar.button': 'bg:#222222',
})
class ConsoleClient(ClientBase):
log_file = 'console'
def __init__(self, config: IbConfig) -> None:
self._log = Log.create(Log.path(self.log_file))
self._config: IbConfig = config
ClientBase.__init__(
self, self._log.get_logger('consoleclient'), kWorkerTypeConsole,
cmd_redis_ip=self._config.cmd_redis_ip,
cmd_redis_port=self._config.cmd_redis_port)
self._cmd_task: asyncio.Task = None
self._prompt_session: PromptSession = PromptSession(
completer=completer, style=style)
self.add_dispatcher(ConsoleCommandResponse,
self.on_console_cmd_response)
async def _handle_console_cmd(self):
while True:
with patch_stdout():
cmd = await self._prompt_session.prompt(
'>>> ', async_=True, auto_suggest=AutoSuggestFromHistory())
if cmd:
request = ConsoleCommandRequest()
request.sid = self.sid()
request.cmd = cmd
await self.send_packet(request)
async def on_console_cmd_response(self, response: ConsoleCommandResponse):
if response.status == ResponseStatus.kSuccess:
self._logger.info('success response:')
obj = json.loads(response.msg)
if type(obj) is list:
for i, value in enumerate(obj):
if i % 5 == 0:
await asyncio.sleep(0.001)
self._logger.info('%s', str(value))
else:
self._logger.info('%s', str(obj))
else:
self._logger.info('response failed with status: %d, msg: %s',
response.status, response.msg)
async def on_client_ready(self) -> None:
if self._cmd_task is None:
self._cmd_task = asyncio.create_task(self._handle_console_cmd())
async def test():
config = await loadConfig()
client = ConsoleClient(config)
await client.initialize()
if __name__ == '__main__':
use_asyncio_event_loop()
asyncio.get_event_loop().run_until_complete(test())
asyncio.get_event_loop().run_forever() | app/console/console_client.py | import asyncio
import json
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.eventloop.defaults import use_asyncio_event_loop
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts.prompt import PromptSession
from prompt_toolkit.styles import Style
from app.client_base import ClientBase
from app.ib_config import IbConfig, loadConfig
from app.server.protocols import (
ResponseStatus,
kWorkerTypeConsole,
ConsoleCommandRequest,
ConsoleCommandResponse,
)
from app.utils.log import Log
completer = WordCompleter([
'find_symbols', 'subscribe_market', 'unsubscribe_market',
'subscribe_market_depth', 'unsubscribe_market_depth', 'order',
'cancel_order', 'contract', 'orders', 'portfolio', 'cash',
'list_strategies', 'list_running_strategies', 'start_strategy',
'stop_strategy', 'run_mock_strategy'], ignore_case=True)
style = Style.from_dict({
'completion-menu.completion': 'bg:#008888 #ffffff',
'completion-menu.completion.current': 'bg:#00aaaa #000000',
'scrollbar.background': 'bg:#88aaaa',
'scrollbar.button': 'bg:#222222',
})
class ConsoleClient(ClientBase):
log_file = 'console'
def __init__(self, config: IbConfig) -> None:
self._log = Log.create(Log.path(self.log_file))
self._config: IbConfig = config
ClientBase.__init__(
self, self._log.get_logger('consoleclient'), kWorkerTypeConsole,
cmd_redis_ip=self._config.cmd_redis_ip,
cmd_redis_port=self._config.cmd_redis_port)
self._cmd_task: asyncio.Task = None
self._prompt_session: PromptSession = PromptSession(
completer=completer, style=style)
self.add_dispatcher(ConsoleCommandResponse,
self.on_console_cmd_response)
async def _handle_console_cmd(self):
while True:
with patch_stdout():
cmd = await self._prompt_session.prompt(
'>>> ', async_=True, auto_suggest=AutoSuggestFromHistory())
if cmd:
request = ConsoleCommandRequest()
request.sid = self.sid()
request.cmd = cmd
await self.send_packet(request)
async def on_console_cmd_response(self, response: ConsoleCommandResponse):
if response.status == ResponseStatus.kSuccess:
self._logger.info('success response:')
obj = json.loads(response.msg)
if type(obj) is list:
for i, value in enumerate(obj):
if i % 5 == 0:
await asyncio.sleep(0.001)
self._logger.info('%s', str(value))
else:
self._logger.info('%s', str(obj))
else:
self._logger.info('response failed with status: %d, msg: %s',
response.status, response.msg)
async def on_client_ready(self) -> None:
if self._cmd_task is None:
self._cmd_task = asyncio.create_task(self._handle_console_cmd())
async def test():
config = await loadConfig()
client = ConsoleClient(config)
await client.initialize()
if __name__ == '__main__':
use_asyncio_event_loop()
asyncio.get_event_loop().run_until_complete(test())
asyncio.get_event_loop().run_forever() | 0.172416 | 0.04879 |
"""Tests for `schedule.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import numpy as np
from optax._src import schedule
class ConstantTest(chex.TestCase):
@chex.all_variants()
def test_constant(self):
"""Check constant schedule."""
# Get schedule function.
const_value = 10
num_steps = 15
schedule_fn = self.variant(schedule.constant_schedule(const_value))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(num_steps):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array([const_value] * num_steps, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class PolynomialTest(chex.TestCase):
@chex.all_variants()
def test_linear(self):
"""Check linear schedule."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=10., end_value=20., power=1, transition_steps=10))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(list(range(10, 20)) + [20] * 5, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_zero_steps_schedule(self):
# Get schedule function.
initial_value = 10.
end_value = 20.
for num_steps in [-1, 0]:
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=initial_value, end_value=end_value,
power=1, transition_steps=num_steps))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), initial_value)
@chex.all_variants()
def test_nonlinear(self):
"""Check non-linear (quadratic) schedule."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=25., end_value=10., power=2, transition_steps=10))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(
[10. + 15. * (1. - n / 10)**2 for n in range(10)] + [10] * 5,
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_with_decay_begin(self):
"""Check quadratic schedule with non-zero schedule begin."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=30., end_value=10., power=2,
transition_steps=10, transition_begin=4))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(20):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(
[30.] * 4 + [10. + 20. * (1. - n / 10)**2 for n in range(10)] +
[10] * 6,
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class PiecewiseConstantTest(chex.TestCase):
@chex.all_variants()
def test_positive(self):
"""Check piecewise constant schedule of positive values."""
# Get schedule function.
schedule_fn = self.variant(
schedule.piecewise_constant_schedule(0.1, {3: 2., 6: 0.5}))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_negative(self):
"""Check piecewise constant schedule of negative values."""
# Get schedule function.
schedule_fn = self.variant(
schedule.piecewise_constant_schedule(-0.1, {3: 2., 6: 0.5}))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = -1 * np.array(
[0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class ExponentialTest(chex.TestCase):
@chex.all_variants()
@parameterized.parameters(False, True)
def test_constant_schedule(self, staircase):
"""Checks constant schedule for exponential decay schedule."""
num_steps = 15
# Get schedule function.
init_value = 1.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=num_steps,
decay_rate=1., staircase=staircase))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(num_steps):
generated_vals.append(schedule_fn(count))
expected_vals = np.array([init_value] * num_steps, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
@parameterized.parameters(False, True)
def test_nonvalid_transition_steps(self, staircase):
"""Checks nonvalid decay steps results in a constant schedule."""
init_value = 1.
for transition_steps in [-1, 0]:
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=transition_steps,
decay_rate=1., staircase=staircase))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), init_value)
@chex.all_variants()
@parameterized.parameters(False, True)
def test_nonvalid_decay_rate(self, staircase):
"""Checks nonvalid decay steps results in a constant schedule."""
init_value = 1.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=2,
decay_rate=0., staircase=staircase))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), init_value)
@chex.all_variants()
@parameterized.parameters((False, 0), (True, 0), (False, 5), (True, 5))
def test_exponential(self, staircase, transition_begin):
"""Checks non-linear (quadratic) schedule."""
# Get schedule function.
init_value = 1.
num_steps = 15
transition_steps = 2
decay_rate = 2.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=transition_steps,
decay_rate=decay_rate, transition_begin=transition_begin,
staircase=staircase))
# Test that generated values equal the expected schedule values.
def _staircased(count):
p = count / transition_steps
if staircase:
p = np.floor(p)
return p
generated_vals = []
for count in range(num_steps + transition_begin):
generated_vals.append(schedule_fn(count))
expected_vals = np.array(
[init_value] * transition_begin + [
init_value * np.power(decay_rate, _staircased(count))
for count in range(num_steps)
],
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class CosineDecayTest(chex.TestCase):
@chex.all_variants()
def test_decay_count_smaller_count(self):
"""Check cosine schedule decay for the entire training schedule."""
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 10, 0.0))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])))
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_decay_count_greater_count(self):
"""Check cosine schedule decay for a part of the training schedule."""
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 5, 0.0))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(12):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_decay_count_greater_count_with_alpha(self):
"""Check cosine schedule decay for a part of the training schedule."""
# Get schedule function.
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 5, 0.1))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(12):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
expected_multipliers = 0.9 * expected_multipliers + 0.1
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
if __name__ == '__main__':
absltest.main() | optax/_src/schedule_test.py | """Tests for `schedule.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import numpy as np
from optax._src import schedule
class ConstantTest(chex.TestCase):
@chex.all_variants()
def test_constant(self):
"""Check constant schedule."""
# Get schedule function.
const_value = 10
num_steps = 15
schedule_fn = self.variant(schedule.constant_schedule(const_value))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(num_steps):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array([const_value] * num_steps, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class PolynomialTest(chex.TestCase):
@chex.all_variants()
def test_linear(self):
"""Check linear schedule."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=10., end_value=20., power=1, transition_steps=10))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(list(range(10, 20)) + [20] * 5, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_zero_steps_schedule(self):
# Get schedule function.
initial_value = 10.
end_value = 20.
for num_steps in [-1, 0]:
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=initial_value, end_value=end_value,
power=1, transition_steps=num_steps))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), initial_value)
@chex.all_variants()
def test_nonlinear(self):
"""Check non-linear (quadratic) schedule."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=25., end_value=10., power=2, transition_steps=10))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(
[10. + 15. * (1. - n / 10)**2 for n in range(10)] + [10] * 5,
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_with_decay_begin(self):
"""Check quadratic schedule with non-zero schedule begin."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=30., end_value=10., power=2,
transition_steps=10, transition_begin=4))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(20):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(
[30.] * 4 + [10. + 20. * (1. - n / 10)**2 for n in range(10)] +
[10] * 6,
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class PiecewiseConstantTest(chex.TestCase):
@chex.all_variants()
def test_positive(self):
"""Check piecewise constant schedule of positive values."""
# Get schedule function.
schedule_fn = self.variant(
schedule.piecewise_constant_schedule(0.1, {3: 2., 6: 0.5}))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_negative(self):
"""Check piecewise constant schedule of negative values."""
# Get schedule function.
schedule_fn = self.variant(
schedule.piecewise_constant_schedule(-0.1, {3: 2., 6: 0.5}))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = -1 * np.array(
[0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class ExponentialTest(chex.TestCase):
@chex.all_variants()
@parameterized.parameters(False, True)
def test_constant_schedule(self, staircase):
"""Checks constant schedule for exponential decay schedule."""
num_steps = 15
# Get schedule function.
init_value = 1.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=num_steps,
decay_rate=1., staircase=staircase))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(num_steps):
generated_vals.append(schedule_fn(count))
expected_vals = np.array([init_value] * num_steps, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants()
@parameterized.parameters(False, True)
def test_nonvalid_transition_steps(self, staircase):
"""Checks nonvalid decay steps results in a constant schedule."""
init_value = 1.
for transition_steps in [-1, 0]:
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=transition_steps,
decay_rate=1., staircase=staircase))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), init_value)
@chex.all_variants()
@parameterized.parameters(False, True)
def test_nonvalid_decay_rate(self, staircase):
"""Checks nonvalid decay steps results in a constant schedule."""
init_value = 1.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=2,
decay_rate=0., staircase=staircase))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), init_value)
@chex.all_variants()
@parameterized.parameters((False, 0), (True, 0), (False, 5), (True, 5))
def test_exponential(self, staircase, transition_begin):
"""Checks non-linear (quadratic) schedule."""
# Get schedule function.
init_value = 1.
num_steps = 15
transition_steps = 2
decay_rate = 2.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=transition_steps,
decay_rate=decay_rate, transition_begin=transition_begin,
staircase=staircase))
# Test that generated values equal the expected schedule values.
def _staircased(count):
p = count / transition_steps
if staircase:
p = np.floor(p)
return p
generated_vals = []
for count in range(num_steps + transition_begin):
generated_vals.append(schedule_fn(count))
expected_vals = np.array(
[init_value] * transition_begin + [
init_value * np.power(decay_rate, _staircased(count))
for count in range(num_steps)
],
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class CosineDecayTest(chex.TestCase):
@chex.all_variants()
def test_decay_count_smaller_count(self):
"""Check cosine schedule decay for the entire training schedule."""
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 10, 0.0))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])))
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_decay_count_greater_count(self):
"""Check cosine schedule decay for a part of the training schedule."""
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 5, 0.0))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(12):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
@chex.all_variants()
def test_decay_count_greater_count_with_alpha(self):
"""Check cosine schedule decay for a part of the training schedule."""
# Get schedule function.
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 5, 0.1))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(12):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
expected_multipliers = 0.9 * expected_multipliers + 0.1
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
if __name__ == '__main__':
absltest.main() | 0.92029 | 0.817137 |
import numpy as np
import tensorflow as tf
def NLL(y_true, y_pred):
'''
Negative Log-Likelihood, see Section IV.B in the TCAN paper.
Parameters:
__________________________________
y_true: tf.Tensor.
Actual values of target time series, a tensor with shape (n_samples, n_targets) where n_samples is the batch
size and n_targets is the number of target time series.
y_pred: tf.Tensor.
Predicted means and standard deviations of target time series, a tensor with shape (n_samples, n_targets, 2)
where n_samples is the batch size and n_targets is the number of target time series.
Returns:
__________________________________
tf.Tensor.
Loss value, a scalar tensor.
'''
y_true = tf.cast(y_true, dtype=tf.float32)
mu = tf.cast(y_pred[:, :, 0], dtype=tf.float32)
sigma = tf.cast(y_pred[:, :, 1], dtype=tf.float32)
L = 0.5 * tf.math.log(2 * np.pi) + tf.math.log(sigma) + tf.math.divide(tf.math.pow(y_true - mu, 2), 2 * tf.math.pow(sigma, 2))
return tf.experimental.numpy.nanmean(tf.experimental.numpy.nansum(L, axis=-1))
def MAE(y_true, y_pred):
'''
Mean Absolute Error, see Section IV.B in the TCAN paper.
Parameters:
__________________________________
y_true: tf.Tensor.
Actual values of target time series, a tensor with shape (n_samples, n_targets) where n_samples is the batch
size and n_targets is the number of target time series.
y_pred: tf.Tensor.
Predicted means and standard deviations of target time series, a tensor with shape (n_samples, n_targets, 2)
where n_samples is the batch size and n_targets is the number of target time series. Note that only the
predicted means are used in this case.
Returns:
__________________________________
tf.Tensor.
Loss value, a scalar tensor.
'''
y_true = tf.cast(y_true, dtype=tf.float32)
y_pred = tf.cast(y_pred[:, :, 0], dtype=tf.float32)
L = tf.abs(y_true - y_pred)
return tf.experimental.numpy.nanmean(tf.experimental.numpy.nansum(L, axis=-1)) | tcan_tensorflow/losses.py | import numpy as np
import tensorflow as tf
def NLL(y_true, y_pred):
'''
Negative Log-Likelihood, see Section IV.B in the TCAN paper.
Parameters:
__________________________________
y_true: tf.Tensor.
Actual values of target time series, a tensor with shape (n_samples, n_targets) where n_samples is the batch
size and n_targets is the number of target time series.
y_pred: tf.Tensor.
Predicted means and standard deviations of target time series, a tensor with shape (n_samples, n_targets, 2)
where n_samples is the batch size and n_targets is the number of target time series.
Returns:
__________________________________
tf.Tensor.
Loss value, a scalar tensor.
'''
y_true = tf.cast(y_true, dtype=tf.float32)
mu = tf.cast(y_pred[:, :, 0], dtype=tf.float32)
sigma = tf.cast(y_pred[:, :, 1], dtype=tf.float32)
L = 0.5 * tf.math.log(2 * np.pi) + tf.math.log(sigma) + tf.math.divide(tf.math.pow(y_true - mu, 2), 2 * tf.math.pow(sigma, 2))
return tf.experimental.numpy.nanmean(tf.experimental.numpy.nansum(L, axis=-1))
def MAE(y_true, y_pred):
'''
Mean Absolute Error, see Section IV.B in the TCAN paper.
Parameters:
__________________________________
y_true: tf.Tensor.
Actual values of target time series, a tensor with shape (n_samples, n_targets) where n_samples is the batch
size and n_targets is the number of target time series.
y_pred: tf.Tensor.
Predicted means and standard deviations of target time series, a tensor with shape (n_samples, n_targets, 2)
where n_samples is the batch size and n_targets is the number of target time series. Note that only the
predicted means are used in this case.
Returns:
__________________________________
tf.Tensor.
Loss value, a scalar tensor.
'''
y_true = tf.cast(y_true, dtype=tf.float32)
y_pred = tf.cast(y_pred[:, :, 0], dtype=tf.float32)
L = tf.abs(y_true - y_pred)
return tf.experimental.numpy.nanmean(tf.experimental.numpy.nansum(L, axis=-1)) | 0.957447 | 0.670244 |
import warnings as wn
def gen_regdf(df):
"""Function to create the relevant dummy variables and interaction terms for the probit models.
Args:
dataFrame containing the categorial variables
Returns:
-------
A data frame containing the dummy variables and interaction terms needed for the regression
"""
wn.filterwarnings("ignore")
#general
df['cstck_b465'] = 0
df.loc[df['cstck'] < 1965, 'cstck_b465'] = 1
df_reg = df
#generate region dummy
df_reg['dreg2'] = 0
df_reg.loc[df_reg['_region'] == 2, 'dreg2'] = 1
df_reg['dreg3'] = 0
df_reg.loc[df_reg['_region'] == 3, 'dreg3'] = 1
df_reg['dreg4'] = 0
df_reg.loc[df_reg['_region'] == 4, 'dreg4'] = 1
#generate 1970 dummy
df_reg['d1970'] = 0
df_reg.loc[ df_reg['year'] == 1970, 'd1970'] = 1
df_reg['d1965'] = 0
df_reg.loc[ df_reg['year'] == 1965, 'd1965'] = 1
#generate interaction terms
df_reg['dsalesX1970'] = df_reg['sales']*df_reg['d1970']
df_reg['d_PhysX1970'] = df_reg['_Phys']*df_reg['d1970']
df_reg['dreg2X1970'] = df_reg['dreg2']*df_reg['d1970']
df_reg['dreg3X1970'] = df_reg['dreg3']*df_reg['d1970']
df_reg['dreg4X1970'] = df_reg['dreg4']*df_reg['d1970']
df_reg['dsalesX1965'] = df_reg['sales']*df_reg['d1965']
df_reg['d_PhysX1965'] = df_reg['_Phys']*df_reg['d1965']
df_reg['dreg2X1965'] = df_reg['dreg2']*df_reg['d1965']
df_reg['dreg3X1965'] = df_reg['dreg3']*df_reg['d1965']
df_reg['dreg4X1965'] = df_reg['dreg4']*df_reg['d1965']
#generate any interaction term
df_reg['anyX1970'] = df_reg['any']*df_reg['d1970']
df_reg['anyX1965'] = df_reg['any']*df_reg['d1965']
#generate dummies for _agecat
df_reg['d_agecat20'] = 0
df_reg.loc[df_reg['_agecat'] == 20, 'd_agecat20'] = 1
df_reg['d_agecat25'] = 0
df_reg.loc[df_reg['_agecat'] == 25, 'd_agecat25'] = 1
df_reg['d_agecat30'] = 0
df_reg.loc[df_reg['_agecat'] == 30, 'd_agecat30'] = 1
df_reg['d_agecat35'] = 0
df_reg.loc[df_reg['_agecat'] == 35, 'd_agecat35'] = 1
#generate interaction terms 1970 and d_agecatXX
df_reg['d_agecat20X1970'] = df_reg['d_agecat20']*df_reg['d1970']
df_reg['d_agecat25X1970'] = df_reg['d_agecat25']*df_reg['d1970']
df_reg['d_agecat30X1970'] = df_reg['d_agecat30']*df_reg['d1970']
df_reg['d_agecat35X1970'] = df_reg['d_agecat35']*df_reg['d1970']
df_reg['d_agecat20X1965'] = df_reg['d_agecat20']*df_reg['d1965']
df_reg['d_agecat25X1965'] = df_reg['d_agecat25']*df_reg['d1965']
df_reg['d_agecat30X1965'] = df_reg['d_agecat30']*df_reg['d1965']
df_reg['d_agecat35X1965'] = df_reg['d_agecat35']*df_reg['d1965']
#generate interaction term _Catholic
df_reg['_CatholicX1970'] = df_reg['_Catholic']*df_reg['d1970']
df_reg['_CatholicX1965'] = df_reg['_Catholic']*df_reg['d1965']
#generate dummies _ed_cat
df_reg['d_ed_cat9'] = 0
df_reg.loc[df_reg['_ed_cat'] == 9, 'd_ed_cat9'] = 1
df_reg['d_ed_cat12'] = 0
df_reg.loc[df_reg['_ed_cat'] == 12, 'd_ed_cat12'] = 1
df_reg['d_ed_cat13'] = 0
df_reg.loc[df_reg['_ed_cat'] == 13, 'd_ed_cat13'] = 1
df_reg['d_ed_cat16'] = 0
df_reg.loc[df_reg['_ed_cat'] == 16, 'd_ed_cat16'] = 1
#generate interaction terms d_ed_cat
df_reg['d_ed_cat9X1970'] = df_reg['d_ed_cat9']*df_reg['d1970']
df_reg['d_ed_cat12X1970'] = df_reg['d_ed_cat12']*df_reg['d1970']
df_reg['d_ed_cat13X1970'] = df_reg['d_ed_cat13']*df_reg['d1970']
df_reg['d_ed_cat16X1970'] = df_reg['d_ed_cat16']*df_reg['d1970']
df_reg['d_ed_cat9X1965'] = df_reg['d_ed_cat9']*df_reg['d1965']
df_reg['d_ed_cat12X1965'] = df_reg['d_ed_cat12']*df_reg['d1965']
df_reg['d_ed_cat13X1965'] = df_reg['d_ed_cat13']*df_reg['d1965']
df_reg['d_ed_cat16X1965'] = df_reg['d_ed_cat16']*df_reg['d1965']
#generate dummies _hinccat
df_reg['d_hinccat1'] = 0
df_reg.loc[df_reg['_hinccat'] == 1, 'd_hinccat1'] = 1
df_reg['d_hinccat2'] = 0
df_reg.loc[df_reg['_hinccat'] == 2, 'd_hinccat2'] = 1
df_reg['d_hinccat3'] = 0
df_reg.loc[df_reg['_hinccat'] == 3, 'd_hinccat3'] = 1
df_reg['d_hinccat4'] = 0
df_reg.loc[df_reg['_hinccat'] == 4, 'd_hinccat4'] = 1
#generate interaction terms d_hinccat
df_reg['d_hinccat1X1970'] = df_reg['d_hinccat1']*df_reg['d1970']
df_reg['d_hinccat2X1970'] = df_reg['d_hinccat2']*df_reg['d1970']
df_reg['d_hinccat3X1970'] = df_reg['d_hinccat3']*df_reg['d1970']
df_reg['d_hinccat4X1970'] = df_reg['d_hinccat4']*df_reg['d1970']
df_reg['d_hinccat1X1965'] = df_reg['d_hinccat1']*df_reg['d1965']
df_reg['d_hinccat2X1965'] = df_reg['d_hinccat2']*df_reg['d1965']
df_reg['d_hinccat3X1965'] = df_reg['d_hinccat3']*df_reg['d1965']
df_reg['d_hinccat4X1965'] = df_reg['d_hinccat4']*df_reg['d1965']
#generate dummies _idealcat
df_reg['d_idealcat2'] = 0
df_reg.loc[df_reg['_idealcat'] == 2, 'd_idealcat2'] = 1
df_reg['d_idealcat3'] = 0
df_reg.loc[df_reg['_idealcat'] == 3, 'd_idealcat3'] = 1
df_reg['d_idealcat4'] = 0
df_reg.loc[df_reg['_idealcat'] == 4, 'd_idealcat4'] = 1
df_reg['d_idealcat5'] = 0
df_reg.loc[df_reg['_idealcat'] == 5, 'd_idealcat5'] = 1
#generate interaction terms d_hinccat
df_reg['d_idealcat2X1970'] = df_reg['d_idealcat2']*df_reg['d1970']
df_reg['d_idealcat3X1970'] = df_reg['d_idealcat3']*df_reg['d1970']
df_reg['d_idealcat4X1970'] = df_reg['d_idealcat4']*df_reg['d1970']
df_reg['d_idealcat5X1970'] = df_reg['d_idealcat5']*df_reg['d1970']
df_reg['d_idealcat2X1965'] = df_reg['d_idealcat2']*df_reg['d1965']
df_reg['d_idealcat3X1965'] = df_reg['d_idealcat3']*df_reg['d1965']
df_reg['d_idealcat4X1965'] = df_reg['d_idealcat4']*df_reg['d1965']
df_reg['d_idealcat5X1965'] = df_reg['d_idealcat5']*df_reg['d1965']
return df_reg | auxiliary/gen_regdf.py | import warnings as wn
def gen_regdf(df):
"""Function to create the relevant dummy variables and interaction terms for the probit models.
Args:
dataFrame containing the categorial variables
Returns:
-------
A data frame containing the dummy variables and interaction terms needed for the regression
"""
wn.filterwarnings("ignore")
#general
df['cstck_b465'] = 0
df.loc[df['cstck'] < 1965, 'cstck_b465'] = 1
df_reg = df
#generate region dummy
df_reg['dreg2'] = 0
df_reg.loc[df_reg['_region'] == 2, 'dreg2'] = 1
df_reg['dreg3'] = 0
df_reg.loc[df_reg['_region'] == 3, 'dreg3'] = 1
df_reg['dreg4'] = 0
df_reg.loc[df_reg['_region'] == 4, 'dreg4'] = 1
#generate 1970 dummy
df_reg['d1970'] = 0
df_reg.loc[ df_reg['year'] == 1970, 'd1970'] = 1
df_reg['d1965'] = 0
df_reg.loc[ df_reg['year'] == 1965, 'd1965'] = 1
#generate interaction terms
df_reg['dsalesX1970'] = df_reg['sales']*df_reg['d1970']
df_reg['d_PhysX1970'] = df_reg['_Phys']*df_reg['d1970']
df_reg['dreg2X1970'] = df_reg['dreg2']*df_reg['d1970']
df_reg['dreg3X1970'] = df_reg['dreg3']*df_reg['d1970']
df_reg['dreg4X1970'] = df_reg['dreg4']*df_reg['d1970']
df_reg['dsalesX1965'] = df_reg['sales']*df_reg['d1965']
df_reg['d_PhysX1965'] = df_reg['_Phys']*df_reg['d1965']
df_reg['dreg2X1965'] = df_reg['dreg2']*df_reg['d1965']
df_reg['dreg3X1965'] = df_reg['dreg3']*df_reg['d1965']
df_reg['dreg4X1965'] = df_reg['dreg4']*df_reg['d1965']
#generate any interaction term
df_reg['anyX1970'] = df_reg['any']*df_reg['d1970']
df_reg['anyX1965'] = df_reg['any']*df_reg['d1965']
#generate dummies for _agecat
df_reg['d_agecat20'] = 0
df_reg.loc[df_reg['_agecat'] == 20, 'd_agecat20'] = 1
df_reg['d_agecat25'] = 0
df_reg.loc[df_reg['_agecat'] == 25, 'd_agecat25'] = 1
df_reg['d_agecat30'] = 0
df_reg.loc[df_reg['_agecat'] == 30, 'd_agecat30'] = 1
df_reg['d_agecat35'] = 0
df_reg.loc[df_reg['_agecat'] == 35, 'd_agecat35'] = 1
#generate interaction terms 1970 and d_agecatXX
df_reg['d_agecat20X1970'] = df_reg['d_agecat20']*df_reg['d1970']
df_reg['d_agecat25X1970'] = df_reg['d_agecat25']*df_reg['d1970']
df_reg['d_agecat30X1970'] = df_reg['d_agecat30']*df_reg['d1970']
df_reg['d_agecat35X1970'] = df_reg['d_agecat35']*df_reg['d1970']
df_reg['d_agecat20X1965'] = df_reg['d_agecat20']*df_reg['d1965']
df_reg['d_agecat25X1965'] = df_reg['d_agecat25']*df_reg['d1965']
df_reg['d_agecat30X1965'] = df_reg['d_agecat30']*df_reg['d1965']
df_reg['d_agecat35X1965'] = df_reg['d_agecat35']*df_reg['d1965']
#generate interaction term _Catholic
df_reg['_CatholicX1970'] = df_reg['_Catholic']*df_reg['d1970']
df_reg['_CatholicX1965'] = df_reg['_Catholic']*df_reg['d1965']
#generate dummies _ed_cat
df_reg['d_ed_cat9'] = 0
df_reg.loc[df_reg['_ed_cat'] == 9, 'd_ed_cat9'] = 1
df_reg['d_ed_cat12'] = 0
df_reg.loc[df_reg['_ed_cat'] == 12, 'd_ed_cat12'] = 1
df_reg['d_ed_cat13'] = 0
df_reg.loc[df_reg['_ed_cat'] == 13, 'd_ed_cat13'] = 1
df_reg['d_ed_cat16'] = 0
df_reg.loc[df_reg['_ed_cat'] == 16, 'd_ed_cat16'] = 1
#generate interaction terms d_ed_cat
df_reg['d_ed_cat9X1970'] = df_reg['d_ed_cat9']*df_reg['d1970']
df_reg['d_ed_cat12X1970'] = df_reg['d_ed_cat12']*df_reg['d1970']
df_reg['d_ed_cat13X1970'] = df_reg['d_ed_cat13']*df_reg['d1970']
df_reg['d_ed_cat16X1970'] = df_reg['d_ed_cat16']*df_reg['d1970']
df_reg['d_ed_cat9X1965'] = df_reg['d_ed_cat9']*df_reg['d1965']
df_reg['d_ed_cat12X1965'] = df_reg['d_ed_cat12']*df_reg['d1965']
df_reg['d_ed_cat13X1965'] = df_reg['d_ed_cat13']*df_reg['d1965']
df_reg['d_ed_cat16X1965'] = df_reg['d_ed_cat16']*df_reg['d1965']
#generate dummies _hinccat
df_reg['d_hinccat1'] = 0
df_reg.loc[df_reg['_hinccat'] == 1, 'd_hinccat1'] = 1
df_reg['d_hinccat2'] = 0
df_reg.loc[df_reg['_hinccat'] == 2, 'd_hinccat2'] = 1
df_reg['d_hinccat3'] = 0
df_reg.loc[df_reg['_hinccat'] == 3, 'd_hinccat3'] = 1
df_reg['d_hinccat4'] = 0
df_reg.loc[df_reg['_hinccat'] == 4, 'd_hinccat4'] = 1
#generate interaction terms d_hinccat
df_reg['d_hinccat1X1970'] = df_reg['d_hinccat1']*df_reg['d1970']
df_reg['d_hinccat2X1970'] = df_reg['d_hinccat2']*df_reg['d1970']
df_reg['d_hinccat3X1970'] = df_reg['d_hinccat3']*df_reg['d1970']
df_reg['d_hinccat4X1970'] = df_reg['d_hinccat4']*df_reg['d1970']
df_reg['d_hinccat1X1965'] = df_reg['d_hinccat1']*df_reg['d1965']
df_reg['d_hinccat2X1965'] = df_reg['d_hinccat2']*df_reg['d1965']
df_reg['d_hinccat3X1965'] = df_reg['d_hinccat3']*df_reg['d1965']
df_reg['d_hinccat4X1965'] = df_reg['d_hinccat4']*df_reg['d1965']
#generate dummies _idealcat
df_reg['d_idealcat2'] = 0
df_reg.loc[df_reg['_idealcat'] == 2, 'd_idealcat2'] = 1
df_reg['d_idealcat3'] = 0
df_reg.loc[df_reg['_idealcat'] == 3, 'd_idealcat3'] = 1
df_reg['d_idealcat4'] = 0
df_reg.loc[df_reg['_idealcat'] == 4, 'd_idealcat4'] = 1
df_reg['d_idealcat5'] = 0
df_reg.loc[df_reg['_idealcat'] == 5, 'd_idealcat5'] = 1
#generate interaction terms d_hinccat
df_reg['d_idealcat2X1970'] = df_reg['d_idealcat2']*df_reg['d1970']
df_reg['d_idealcat3X1970'] = df_reg['d_idealcat3']*df_reg['d1970']
df_reg['d_idealcat4X1970'] = df_reg['d_idealcat4']*df_reg['d1970']
df_reg['d_idealcat5X1970'] = df_reg['d_idealcat5']*df_reg['d1970']
df_reg['d_idealcat2X1965'] = df_reg['d_idealcat2']*df_reg['d1965']
df_reg['d_idealcat3X1965'] = df_reg['d_idealcat3']*df_reg['d1965']
df_reg['d_idealcat4X1965'] = df_reg['d_idealcat4']*df_reg['d1965']
df_reg['d_idealcat5X1965'] = df_reg['d_idealcat5']*df_reg['d1965']
return df_reg | 0.307878 | 0.358129 |
# ugly path patching
import sys
import os
sys.path.append(os.path.abspath(os.path.join(__file__, '..', 'lib')))
import base64
from proto.ved_pb2 import Ved
'''
The type of link encoded in the ved message. If you find out, what other values mean,
please either send me a pull request or comment in the article
(http://gqs-decoder.blogspot.com/2013/08/google-referrer-query-strings-debunked-part-1.html)
'''
LINK_TYPES = {
22 : 'web',
245 : 'image thumbnail',
429 : 'image',
311 : 'video',
312 : 'video thumbnail',
341 : 'related search',
1617 : 'advertisement',
2459 : 'knowledge sidebar link',
3836 : 'knowledge sidebar image',
3838 : 'knowledge sidebar image small',
3849 : 'knowledge sidebar "more images"'
}
def try_decode(s):
''' try to base64 decode s. return None, if decoding fails '''
try:
return base64.b64decode(str(s)+'=====', '_-')
except TypeError:
return None
def decode_ved_plain(s):
''' decode the plain text varian of the ved parameter. no error checking. '''
key_mapping = {'i':'index_boost', 't':'type', 'r':'result_position', 's':'start'}
kv_pairs = s.split(',')
kv_pairs = map(lambda x: x.split(':'), kv_pairs)
kv_pairs = map(lambda (k,v): (key_mapping[k], int(v)), kv_pairs)
return dict(kv_pairs)
def decode_ved_protobuf(s):
''' decode the protobuf variant of the ved parameter. '''
decoded = try_decode(s)
if not decoded:
return None
ved = Ved()
try:
ved.ParseFromString(decoded)
ret = {}
for k,v in ved.ListFields():
ret[k.name] = v
return ret
except DecodeError:
return None
def decode_ved(s):
''' decode a ved '''
if not s:
return None
if s[0] == '1': #TODO: decode plain text variant
return decode_ved_plain(s[1:])
elif s[0] == '0':
return decode_ved_protobuf(s[1:])
def format_type(type):
type_name = LINK_TYPES.get(type, 'unknown')
return '%s (%s)' % (type_name, type)
def format_ved(ved):
if 'type' in ved:
ved['type'] = format_type(ved['type'])
return ved
def main():
import sys
for line in sys.stdin:
line = line.strip()
if not line:
continue
print format_ved(decode_ved(line))
if __name__ == '__main__':
main() | ved.py |
# ugly path patching
import sys
import os
sys.path.append(os.path.abspath(os.path.join(__file__, '..', 'lib')))
import base64
from proto.ved_pb2 import Ved
'''
The type of link encoded in the ved message. If you find out, what other values mean,
please either send me a pull request or comment in the article
(http://gqs-decoder.blogspot.com/2013/08/google-referrer-query-strings-debunked-part-1.html)
'''
LINK_TYPES = {
22 : 'web',
245 : 'image thumbnail',
429 : 'image',
311 : 'video',
312 : 'video thumbnail',
341 : 'related search',
1617 : 'advertisement',
2459 : 'knowledge sidebar link',
3836 : 'knowledge sidebar image',
3838 : 'knowledge sidebar image small',
3849 : 'knowledge sidebar "more images"'
}
def try_decode(s):
''' try to base64 decode s. return None, if decoding fails '''
try:
return base64.b64decode(str(s)+'=====', '_-')
except TypeError:
return None
def decode_ved_plain(s):
''' decode the plain text varian of the ved parameter. no error checking. '''
key_mapping = {'i':'index_boost', 't':'type', 'r':'result_position', 's':'start'}
kv_pairs = s.split(',')
kv_pairs = map(lambda x: x.split(':'), kv_pairs)
kv_pairs = map(lambda (k,v): (key_mapping[k], int(v)), kv_pairs)
return dict(kv_pairs)
def decode_ved_protobuf(s):
''' decode the protobuf variant of the ved parameter. '''
decoded = try_decode(s)
if not decoded:
return None
ved = Ved()
try:
ved.ParseFromString(decoded)
ret = {}
for k,v in ved.ListFields():
ret[k.name] = v
return ret
except DecodeError:
return None
def decode_ved(s):
''' decode a ved '''
if not s:
return None
if s[0] == '1': #TODO: decode plain text variant
return decode_ved_plain(s[1:])
elif s[0] == '0':
return decode_ved_protobuf(s[1:])
def format_type(type):
type_name = LINK_TYPES.get(type, 'unknown')
return '%s (%s)' % (type_name, type)
def format_ved(ved):
if 'type' in ved:
ved['type'] = format_type(ved['type'])
return ved
def main():
import sys
for line in sys.stdin:
line = line.strip()
if not line:
continue
print format_ved(decode_ved(line))
if __name__ == '__main__':
main() | 0.253676 | 0.194406 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import BTrees
from BTrees.Interfaces import IBTreeFamily
from BTrees.Interfaces import IBTreeModule
from zope import interface
__all__ = (
'MAX_LEAF_SIZE',
'MAX_INTERNAL_SIZE',
'family64LargeBuckets',
)
#: The value used for ``max_leaf_size`` in all
#: BTree classes available in :obj:`family64LargeBuckets`.
MAX_LEAF_SIZE = 500
#: The value used for ``max_internal_size`` in all
#: BTree classes available in :obj:`family64LargeBuckets`
MAX_INTERNAL_SIZE = MAX_LEAF_SIZE
# We use the same value to keep it simple and easier to predict.
def _make_large_module(existing_module, generic_prefix, real_prefix):
new_module = type(existing_module)('nti.zodb.btrees.' + generic_prefix + 'BTree')
provides = interface.providedBy(existing_module)
interface.directlyProvides(new_module, provides)
for tree_name in IBTreeModule:
tree = getattr(existing_module, tree_name)
new_tree = type(
tree.__name__,
(tree,),
{
'__slots__': (),
'max_internal_size': MAX_INTERNAL_SIZE,
'max_leaf_size': MAX_LEAF_SIZE,
'__module__': new_module.__name__
}
)
setattr(new_module, tree_name, new_tree)
full_name = real_prefix + tree_name
setattr(new_module, full_name, new_tree)
for iface in provides:
for name in iface:
if not hasattr(new_module, name):
setattr(new_module, name, getattr(existing_module, name))
sys.modules[new_module.__name__] = new_module
return new_module
@interface.implementer(IBTreeFamily)
class _Family64LargeBuckets(object):
def __init__(self):
self.maxint = BTrees.family64.maxint
self.minint = BTrees.family64.minint
self.maxuint = BTrees.family64.maxuint
for name in IBTreeFamily:
if hasattr(self, name):
continue
prefix = name.replace('I', 'L').replace('U', 'Q')
mod = _make_large_module(
getattr(BTrees.family64, name),
name,
prefix)
mod.family = self
setattr(self, name, mod)
def __reduce__(self):
return _family64LargeBuckets, ()
def _family64LargeBuckets():
return family64LargeBuckets
_family64LargeBuckets.__safe_for_unpickling__ = True
#: A BTree family (:class:`BTrees.Interfaces.IBTreeFamily`)
#: where all modules have BTree and TreeSet objects that use
#: larger buckets than the default.
family64LargeBuckets = _Family64LargeBuckets() | src/nti/zodb/btrees.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import BTrees
from BTrees.Interfaces import IBTreeFamily
from BTrees.Interfaces import IBTreeModule
from zope import interface
__all__ = (
'MAX_LEAF_SIZE',
'MAX_INTERNAL_SIZE',
'family64LargeBuckets',
)
#: The value used for ``max_leaf_size`` in all
#: BTree classes available in :obj:`family64LargeBuckets`.
MAX_LEAF_SIZE = 500
#: The value used for ``max_internal_size`` in all
#: BTree classes available in :obj:`family64LargeBuckets`
MAX_INTERNAL_SIZE = MAX_LEAF_SIZE
# We use the same value to keep it simple and easier to predict.
def _make_large_module(existing_module, generic_prefix, real_prefix):
new_module = type(existing_module)('nti.zodb.btrees.' + generic_prefix + 'BTree')
provides = interface.providedBy(existing_module)
interface.directlyProvides(new_module, provides)
for tree_name in IBTreeModule:
tree = getattr(existing_module, tree_name)
new_tree = type(
tree.__name__,
(tree,),
{
'__slots__': (),
'max_internal_size': MAX_INTERNAL_SIZE,
'max_leaf_size': MAX_LEAF_SIZE,
'__module__': new_module.__name__
}
)
setattr(new_module, tree_name, new_tree)
full_name = real_prefix + tree_name
setattr(new_module, full_name, new_tree)
for iface in provides:
for name in iface:
if not hasattr(new_module, name):
setattr(new_module, name, getattr(existing_module, name))
sys.modules[new_module.__name__] = new_module
return new_module
@interface.implementer(IBTreeFamily)
class _Family64LargeBuckets(object):
def __init__(self):
self.maxint = BTrees.family64.maxint
self.minint = BTrees.family64.minint
self.maxuint = BTrees.family64.maxuint
for name in IBTreeFamily:
if hasattr(self, name):
continue
prefix = name.replace('I', 'L').replace('U', 'Q')
mod = _make_large_module(
getattr(BTrees.family64, name),
name,
prefix)
mod.family = self
setattr(self, name, mod)
def __reduce__(self):
return _family64LargeBuckets, ()
def _family64LargeBuckets():
return family64LargeBuckets
_family64LargeBuckets.__safe_for_unpickling__ = True
#: A BTree family (:class:`BTrees.Interfaces.IBTreeFamily`)
#: where all modules have BTree and TreeSet objects that use
#: larger buckets than the default.
family64LargeBuckets = _Family64LargeBuckets() | 0.591605 | 0.076373 |
from models.cbba import CBBA
from models.ccbba import CCBBA
import logging
import time
from numpy.random import randint, choice, seed
from numpy import exp
from math import sqrt
import numpy as np
num_bases = 5
num_agents = 3
num_deliveries = 20
# seed(10)
bases_pos = randint(1, 10, [num_bases, 2]).tolist()
agents_pos = randint(1, 10, [num_agents, 2]).tolist()
delivery_pos = []
delivery_target = []
delivery_reward= []
for d in range(num_deliveries):
pos_target = choice(num_bases, 2, replace=False)
delivery_pos.append(pos_target[0])
delivery_target.append(pos_target[1])
delivery_reward.append(100)
max_transit = 1
tasks_pos = []
tasks_target = []
tasks_reward = []
activities = []
dependencies = []
temporals = []
num_task = 0
for d in range(len(delivery_pos)):
activities.append([])
dependencies.append([])
temporals.append([])
tasks_pos.append(delivery_pos[d])
tasks_target.append(delivery_target[d])
tasks_reward.append(delivery_reward[d])
activities[d].append(num_task)
delv_pos = bases_pos[delivery_pos[d]]
delv_target = bases_pos[delivery_target[d]]
delv_dist = sqrt( (delv_target[0] - delv_pos[0])**2 + (delv_target[1] - delv_pos[1])**2 )
num_task = num_task + 1
transit_index = [0]
while True:
task_path = [delivery_target[d], delivery_pos[d]]
is_feasible = True
is_done = False
for n in range(len(transit_index)):
if transit_index[n] in task_path:
is_feasible = False
if transit_index[n] >= len(bases_pos):
transit_index[n] = 0
if n == len(transit_index) - 1:
if len(transit_index) == max_transit:
is_done = True
break
transit_index.append(0)
else:
transit_index[n+1] = transit_index[n+1] + 1
if is_feasible:
task_path.append(transit_index[n])
if is_done:
break
if is_feasible:
del task_path[0]
task_path.append(delivery_target[d])
distance = []
total_distance = 0
for j in range(1, len(task_path)):
path_pos = bases_pos[task_path[j-1]]
path_target = bases_pos[task_path[j]]
path_dist = sqrt( (path_target[0] - path_pos[0])**2 + (path_target[1] - path_pos[1])**2 )
if path_dist > delv_dist:
is_feasible = False
break
total_distance = total_distance + path_dist
distance.append(path_dist)
if is_feasible:
print(task_path)
for j in range(1, len(task_path)):
activities[d].append(num_task)
tasks_pos.append(task_path[j - 1])
tasks_target.append(task_path[j])
tasks_reward.append(delivery_reward[d] * distance[j-1] / total_distance)
num_task = num_task + 1
for q in range(len(activities[d])):
if len(dependencies[d]) <= q:
dependencies[d].append([])
temporals[d].append([])
for u in range(len(activities[d])):
if len(dependencies[d][q]) <= u:
dependencies[d][q].append(-1)
temporals[d][q].append(1e+10)
if q == u:
dependencies[d][q][u] = 0
temporals[d][q][u] = 0
continue
if q > len(activities[d]) - len(task_path) and u > len(activities[d]) - len(task_path):
dependencies[d][q][u] = 1
if u > len(activities[d]) - len(task_path) + 1 and q == u - 1:
temporals[d][q][u] = 0
transit_index[0] = transit_index[0] + 1
time_start = time.time()
logging.getLogger().setLevel(logging.DEBUG)
logging.debug('Starting Test Program')
cbba = CBBA(bases_pos=bases_pos, agents_pos=agents_pos, tasks_pos=delivery_pos, tasks_target=delivery_target, tasks_reward=delivery_reward)
cbba.run()
ccbba = CCBBA(activities=activities, dependencies=dependencies, temporals=temporals, bases_pos=bases_pos,
agents_pos=agents_pos, tasks_pos=tasks_pos, tasks_target=tasks_target, tasks_reward=tasks_reward)
ccbba.run()
time_end = time.time()
logging.info('Running time: %.2f', time_end - time_start)
#%%
logging.info('=== CBBA ============================================')
for d in range(len(activities)):
str = 'Activity %d: ' % d
for agent in cbba.agents:
for task_id in agent.path:
if task_id == d:
str = '%s%d (%d)' % (str, task_id, agent.id)
logging.info(str)
logging.info('=== CCBBA ============================================')
for d in range(len(activities)):
str = 'Activity %d: ' % d
for agent in ccbba.agents:
for task_id in agent.path:
if task_id in activities[d]:
str = '%s%d (%d)' % (str, task_id, agent.id)
logging.info(str)
#%%
from matplotlib import pyplot as plt
logging.getLogger().setLevel(logging.INFO)
fig = plt.figure()
ax = fig.gca()
ax.set_xticks(np.arange(0, 10))
ax.set_yticks(np.arange(0, 10))
plt.grid()
for base_pos in bases_pos:
plt.scatter(base_pos[0], base_pos[1], marker='s', s=1000, c='#dddddd')
for d in range(len(delivery_pos)):
plt.scatter(bases_pos[delivery_pos[d]][0], bases_pos[delivery_pos[d]][1], marker='o', c='#000000', s=100)
for agent in ccbba.agents:
plt.scatter(agent.pos[0], agent.pos[1], marker='^', s=200)
for agent in ccbba.agents:
path = agent.path
x = [agent.pos[0]]
y = [agent.pos[1]]
for task_id in path:
x.append(agent.tasks[task_id].pos[0])
y.append(agent.tasks[task_id].pos[1])
x.append(agent.tasks[task_id].target[0])
y.append(agent.tasks[task_id].target[1])
plt.plot(x, y)
plt.show() | test/compare_test.py | from models.cbba import CBBA
from models.ccbba import CCBBA
import logging
import time
from numpy.random import randint, choice, seed
from numpy import exp
from math import sqrt
import numpy as np
num_bases = 5
num_agents = 3
num_deliveries = 20
# seed(10)
bases_pos = randint(1, 10, [num_bases, 2]).tolist()
agents_pos = randint(1, 10, [num_agents, 2]).tolist()
delivery_pos = []
delivery_target = []
delivery_reward= []
for d in range(num_deliveries):
pos_target = choice(num_bases, 2, replace=False)
delivery_pos.append(pos_target[0])
delivery_target.append(pos_target[1])
delivery_reward.append(100)
max_transit = 1
tasks_pos = []
tasks_target = []
tasks_reward = []
activities = []
dependencies = []
temporals = []
num_task = 0
for d in range(len(delivery_pos)):
activities.append([])
dependencies.append([])
temporals.append([])
tasks_pos.append(delivery_pos[d])
tasks_target.append(delivery_target[d])
tasks_reward.append(delivery_reward[d])
activities[d].append(num_task)
delv_pos = bases_pos[delivery_pos[d]]
delv_target = bases_pos[delivery_target[d]]
delv_dist = sqrt( (delv_target[0] - delv_pos[0])**2 + (delv_target[1] - delv_pos[1])**2 )
num_task = num_task + 1
transit_index = [0]
while True:
task_path = [delivery_target[d], delivery_pos[d]]
is_feasible = True
is_done = False
for n in range(len(transit_index)):
if transit_index[n] in task_path:
is_feasible = False
if transit_index[n] >= len(bases_pos):
transit_index[n] = 0
if n == len(transit_index) - 1:
if len(transit_index) == max_transit:
is_done = True
break
transit_index.append(0)
else:
transit_index[n+1] = transit_index[n+1] + 1
if is_feasible:
task_path.append(transit_index[n])
if is_done:
break
if is_feasible:
del task_path[0]
task_path.append(delivery_target[d])
distance = []
total_distance = 0
for j in range(1, len(task_path)):
path_pos = bases_pos[task_path[j-1]]
path_target = bases_pos[task_path[j]]
path_dist = sqrt( (path_target[0] - path_pos[0])**2 + (path_target[1] - path_pos[1])**2 )
if path_dist > delv_dist:
is_feasible = False
break
total_distance = total_distance + path_dist
distance.append(path_dist)
if is_feasible:
print(task_path)
for j in range(1, len(task_path)):
activities[d].append(num_task)
tasks_pos.append(task_path[j - 1])
tasks_target.append(task_path[j])
tasks_reward.append(delivery_reward[d] * distance[j-1] / total_distance)
num_task = num_task + 1
for q in range(len(activities[d])):
if len(dependencies[d]) <= q:
dependencies[d].append([])
temporals[d].append([])
for u in range(len(activities[d])):
if len(dependencies[d][q]) <= u:
dependencies[d][q].append(-1)
temporals[d][q].append(1e+10)
if q == u:
dependencies[d][q][u] = 0
temporals[d][q][u] = 0
continue
if q > len(activities[d]) - len(task_path) and u > len(activities[d]) - len(task_path):
dependencies[d][q][u] = 1
if u > len(activities[d]) - len(task_path) + 1 and q == u - 1:
temporals[d][q][u] = 0
transit_index[0] = transit_index[0] + 1
time_start = time.time()
logging.getLogger().setLevel(logging.DEBUG)
logging.debug('Starting Test Program')
cbba = CBBA(bases_pos=bases_pos, agents_pos=agents_pos, tasks_pos=delivery_pos, tasks_target=delivery_target, tasks_reward=delivery_reward)
cbba.run()
ccbba = CCBBA(activities=activities, dependencies=dependencies, temporals=temporals, bases_pos=bases_pos,
agents_pos=agents_pos, tasks_pos=tasks_pos, tasks_target=tasks_target, tasks_reward=tasks_reward)
ccbba.run()
time_end = time.time()
logging.info('Running time: %.2f', time_end - time_start)
#%%
logging.info('=== CBBA ============================================')
for d in range(len(activities)):
str = 'Activity %d: ' % d
for agent in cbba.agents:
for task_id in agent.path:
if task_id == d:
str = '%s%d (%d)' % (str, task_id, agent.id)
logging.info(str)
logging.info('=== CCBBA ============================================')
for d in range(len(activities)):
str = 'Activity %d: ' % d
for agent in ccbba.agents:
for task_id in agent.path:
if task_id in activities[d]:
str = '%s%d (%d)' % (str, task_id, agent.id)
logging.info(str)
#%%
from matplotlib import pyplot as plt
logging.getLogger().setLevel(logging.INFO)
fig = plt.figure()
ax = fig.gca()
ax.set_xticks(np.arange(0, 10))
ax.set_yticks(np.arange(0, 10))
plt.grid()
for base_pos in bases_pos:
plt.scatter(base_pos[0], base_pos[1], marker='s', s=1000, c='#dddddd')
for d in range(len(delivery_pos)):
plt.scatter(bases_pos[delivery_pos[d]][0], bases_pos[delivery_pos[d]][1], marker='o', c='#000000', s=100)
for agent in ccbba.agents:
plt.scatter(agent.pos[0], agent.pos[1], marker='^', s=200)
for agent in ccbba.agents:
path = agent.path
x = [agent.pos[0]]
y = [agent.pos[1]]
for task_id in path:
x.append(agent.tasks[task_id].pos[0])
y.append(agent.tasks[task_id].pos[1])
x.append(agent.tasks[task_id].target[0])
y.append(agent.tasks[task_id].target[1])
plt.plot(x, y)
plt.show() | 0.107093 | 0.309011 |
import random
#===exercício 01===
num= int(input("Seu palpite:"))
sor= random.randint(1,5)
if (num == sor):
print("O número sorteado foi {}.\nVocê é o bichão mesmo hein".format(sor))
else:
print("O número sorteado foi {}.\nTente novamente".format(sor))
#===exercício02===
vel= float(input("velocidade do automóvel: "))
if(vel > 80):
mult = float((vel-80)*7)
print("O veículo estava acima do limite de velocidade.\nO valor da multa é de: R$ {}. ".format(mult))
else:
print("O veículo estava dentro do limite de velocidade.")
#===exercício03===
num= int(input("Insira o número: "))
if(num %2 == 0):
print("O número digitado é par.")
else:
print("O número digitado é impar.")
#===exercício04===
dist= float(input("Insira a distância da viagem em km:"))
if(dist <=200):
print("O preço da passagem é de R${}.".format(0.5*dist))
else:
print("O preço da passagem é de R${}.".format(0.45*dist))
#===exercício05===
ano = int(input("Insira o ano em questão:"))
if(ano % 4 == 0):
print("O ano de {} é um ano bissexto.".format(ano))
else:
print("O ano de {} não é um ano bissexto.".format(ano))
#===exercício06===
n1 = int(input("Insira o primeiro número: "))
n2 = int(input("Insira o segundo número: "))
n3 = int(input("Insira o terceiro número: "))
if(n1 > n2 and n1 > n3):
print("{} é o maior número.\n".format(n1))
elif(n2 > n1 and n2 > n3):
print("{} é o maior número.\n".format(n2))
else:
print("{} é o maior número.\n".format(n3))
if(n1 < n2 and n1 < n3):
print("{} é o menor número.\n".format(n1))
elif(n2 < n1 and n2 < n3):
print("{} é o menor número.\n".format(n2))
else:
print("{} é o menor número.\n".format(n3))
#==exercício07===
sal= float(input("Insira o valor do salário: "))
if(sal <= 1250.0):
ns = ((15*sal)/100)
print("O seu aumento será de R${}.\nTotalizando R${}.".format(ns, (sal+ns)))
else:
ns = ((10*sal)/100)
print("O seu aumento será de R${}.\nTotalizando R${}.".format(ns, (sal+ns)))
#===exercício08===
a = float(input("Insira a medida da primeira reta: "))
b = float(input("Insira a medida da segunda reta: "))
c = float(input("Insira a medida da terceira reta: "))
if((b-c) < a < (b+c) and (a-c) < b < (a+c) and (a-b) < c < (a+b)):
print("Essas 3 retas são capazes de formar um triângulo.")
else:
print("Essas 3 retas não formam um triângulo.") | pacote download/aula10.py | import random
#===exercício 01===
num= int(input("Seu palpite:"))
sor= random.randint(1,5)
if (num == sor):
print("O número sorteado foi {}.\nVocê é o bichão mesmo hein".format(sor))
else:
print("O número sorteado foi {}.\nTente novamente".format(sor))
#===exercício02===
vel= float(input("velocidade do automóvel: "))
if(vel > 80):
mult = float((vel-80)*7)
print("O veículo estava acima do limite de velocidade.\nO valor da multa é de: R$ {}. ".format(mult))
else:
print("O veículo estava dentro do limite de velocidade.")
#===exercício03===
num= int(input("Insira o número: "))
if(num %2 == 0):
print("O número digitado é par.")
else:
print("O número digitado é impar.")
#===exercício04===
dist= float(input("Insira a distância da viagem em km:"))
if(dist <=200):
print("O preço da passagem é de R${}.".format(0.5*dist))
else:
print("O preço da passagem é de R${}.".format(0.45*dist))
#===exercício05===
ano = int(input("Insira o ano em questão:"))
if(ano % 4 == 0):
print("O ano de {} é um ano bissexto.".format(ano))
else:
print("O ano de {} não é um ano bissexto.".format(ano))
#===exercício06===
n1 = int(input("Insira o primeiro número: "))
n2 = int(input("Insira o segundo número: "))
n3 = int(input("Insira o terceiro número: "))
if(n1 > n2 and n1 > n3):
print("{} é o maior número.\n".format(n1))
elif(n2 > n1 and n2 > n3):
print("{} é o maior número.\n".format(n2))
else:
print("{} é o maior número.\n".format(n3))
if(n1 < n2 and n1 < n3):
print("{} é o menor número.\n".format(n1))
elif(n2 < n1 and n2 < n3):
print("{} é o menor número.\n".format(n2))
else:
print("{} é o menor número.\n".format(n3))
#==exercício07===
sal= float(input("Insira o valor do salário: "))
if(sal <= 1250.0):
ns = ((15*sal)/100)
print("O seu aumento será de R${}.\nTotalizando R${}.".format(ns, (sal+ns)))
else:
ns = ((10*sal)/100)
print("O seu aumento será de R${}.\nTotalizando R${}.".format(ns, (sal+ns)))
#===exercício08===
a = float(input("Insira a medida da primeira reta: "))
b = float(input("Insira a medida da segunda reta: "))
c = float(input("Insira a medida da terceira reta: "))
if((b-c) < a < (b+c) and (a-c) < b < (a+c) and (a-b) < c < (a+b)):
print("Essas 3 retas são capazes de formar um triângulo.")
else:
print("Essas 3 retas não formam um triângulo.") | 0.131507 | 0.357287 |
import origen, _origen, pytest # pylint: disable=import-error
from tests.shared import clean_falcon, clean_eagle # pylint: disable=import-error
from tests.pins import pins, is_pin_group # pylint: disable=import-error
class TestPinsInDUTHierarchy:
def test_pins_in_subblocks(self, clean_falcon, pins):
# We should have pins at the DUT level, but not in any subblocks.
assert len(origen.dut.pins) == 4
assert len(origen.dut.sub_blocks["core1"].pins) == 0
# Add a pin at the subblock.
assert origen.dut.sub_blocks["core1"].add_pin("p1")
assert len(origen.dut.sub_blocks["core1"].pins) == 1
p = origen.dut.sub_blocks["core1"].pin("p1")
is_pin_group(p)
# Add another pin
assert origen.dut.sub_blocks["core1"].add_pin("_p1")
assert len(origen.dut.sub_blocks["core1"].pins) == 2
_p = origen.dut.sub_blocks["core1"].pin("_p1")
is_pin_group(_p)
# Verify the pins at origen.dut are unchanged.
assert len(origen.dut.pins) == 4
assert origen.dut.pin("_p1") is None
class TestPinMetadata:
class MyRandomClass:
pass
def test_physical_pin_has_empty_metadata(self, clean_eagle):
assert origen.dut.physical_pin("porta0").added_metadata == []
def test_adding_metadata_to_physical_pin(self):
# Essentially just check that nothing here throws an exception
origen.dut.physical_pin("porta0").add_metadata("meta1", 1)
origen.dut.physical_pin("porta0").add_metadata("meta2", "meta2!")
origen.dut.physical_pin("porta0").add_metadata("meta3", {})
origen.dut.physical_pin("porta0").add_metadata(
"meta4", TestPinMetadata.MyRandomClass())
def test_getting_all_metadata_keys(self):
assert origen.dut.physical_pin("porta0").added_metadata == [
"meta1", "meta2", "meta3", "meta4"
]
def test_getting_metadata_from_physical_pin(self):
assert origen.dut.physical_pin("porta0").get_metadata("meta1") == 1
assert origen.dut.physical_pin("porta0").get_metadata(
"meta2") == "meta2!"
assert isinstance(
origen.dut.physical_pin("porta0").get_metadata("meta3"), dict)
assert isinstance(
origen.dut.physical_pin("porta0").get_metadata("meta4"),
TestPinMetadata.MyRandomClass)
def test_setting_existing_metadata_on_physical_pin(self):
assert origen.dut.physical_pin("porta0").set_metadata("meta1", "hi!")
assert origen.dut.physical_pin("porta0").set_metadata(
"meta2", "meta2 updated!")
assert origen.dut.physical_pin("porta0").get_metadata("meta1") == "hi!"
assert origen.dut.physical_pin("porta0").get_metadata(
"meta2") == "meta2 updated!"
def test_setting_nonexistant_metadata_adds_it(self):
assert origen.dut.physical_pin('porta0').get_metadata("meta5") is None
assert origen.dut.physical_pin("porta0").set_metadata("meta5",
5.0) == False
assert origen.dut.physical_pin("porta0").get_metadata("meta5") == 5.0
def test_interacting_with_reference_metadata(self):
d = origen.dut.physical_pin("porta0").get_metadata("meta3")
assert isinstance(d, dict)
assert "test" not in d
d["test"] = True
assert "test" in d
d2 = origen.dut.physical_pin("porta0").get_metadata("meta3")
assert "test" in d2
def test_nonetype_on_retrieving_nonexistant_metadata(self):
assert origen.dut.physical_pin("porta0").get_metadata("blah") is None
def test_exception_on_adding_duplicate_metadata(self):
with pytest.raises(OSError):
origen.dut.physical_pin("porta0").add_metadata("meta1", False)
def test_additional_metadata(self):
origen.dut.physical_pin('porta1').add_metadata("m1", 1.0)
origen.dut.physical_pin('porta1').add_metadata("m2", -2)
assert origen.dut.physical_pin('porta1').get_metadata("m1") == 1.0
assert origen.dut.physical_pin('porta1').get_metadata("m2") == -2
assert origen.dut.physical_pin('porta0').get_metadata("m1") is None
assert origen.dut.physical_pin('porta0').get_metadata("m2") is None
def test_metadata_with_same_name_on_different_objects(self):
origen.dut.physical_pin('porta0').add_metadata("index", 0)
origen.dut.physical_pin('porta1').add_metadata("index", 1)
assert origen.dut.physical_pin('porta0').get_metadata("index") == 0
assert origen.dut.physical_pin('porta1').get_metadata("index") == 1
class TestPinLoaderAPI:
def test_pin_loader_api(self, clean_eagle):
assert origen.dut.pins.keys() == [
"porta0", "porta1", "porta", "portb0", "portb1", "portb2",
"portb3", "portb", "portc0", "portc1", "portc", "clk", "swd_clk",
"swdclk", "tclk", "swdio", "reset"
]
assert origen.dut.pin("clk").reset_actions == "0"
assert origen.dut.pin_headers.keys() == [
"ports", "clk", "all", "pins-for-toggle", "pins-for-toggle-rev",
"swd", 'cap_test'
]
assert origen.dut.pin_headers["ports"].pin_names == [
"porta", "portb", "portc"
]
assert origen.dut.pin_headers["clk"].pin_names == ["clk"] | test_apps/python_app/tests/pins/test_pin_misc.py | import origen, _origen, pytest # pylint: disable=import-error
from tests.shared import clean_falcon, clean_eagle # pylint: disable=import-error
from tests.pins import pins, is_pin_group # pylint: disable=import-error
class TestPinsInDUTHierarchy:
def test_pins_in_subblocks(self, clean_falcon, pins):
# We should have pins at the DUT level, but not in any subblocks.
assert len(origen.dut.pins) == 4
assert len(origen.dut.sub_blocks["core1"].pins) == 0
# Add a pin at the subblock.
assert origen.dut.sub_blocks["core1"].add_pin("p1")
assert len(origen.dut.sub_blocks["core1"].pins) == 1
p = origen.dut.sub_blocks["core1"].pin("p1")
is_pin_group(p)
# Add another pin
assert origen.dut.sub_blocks["core1"].add_pin("_p1")
assert len(origen.dut.sub_blocks["core1"].pins) == 2
_p = origen.dut.sub_blocks["core1"].pin("_p1")
is_pin_group(_p)
# Verify the pins at origen.dut are unchanged.
assert len(origen.dut.pins) == 4
assert origen.dut.pin("_p1") is None
class TestPinMetadata:
class MyRandomClass:
pass
def test_physical_pin_has_empty_metadata(self, clean_eagle):
assert origen.dut.physical_pin("porta0").added_metadata == []
def test_adding_metadata_to_physical_pin(self):
# Essentially just check that nothing here throws an exception
origen.dut.physical_pin("porta0").add_metadata("meta1", 1)
origen.dut.physical_pin("porta0").add_metadata("meta2", "meta2!")
origen.dut.physical_pin("porta0").add_metadata("meta3", {})
origen.dut.physical_pin("porta0").add_metadata(
"meta4", TestPinMetadata.MyRandomClass())
def test_getting_all_metadata_keys(self):
assert origen.dut.physical_pin("porta0").added_metadata == [
"meta1", "meta2", "meta3", "meta4"
]
def test_getting_metadata_from_physical_pin(self):
assert origen.dut.physical_pin("porta0").get_metadata("meta1") == 1
assert origen.dut.physical_pin("porta0").get_metadata(
"meta2") == "meta2!"
assert isinstance(
origen.dut.physical_pin("porta0").get_metadata("meta3"), dict)
assert isinstance(
origen.dut.physical_pin("porta0").get_metadata("meta4"),
TestPinMetadata.MyRandomClass)
def test_setting_existing_metadata_on_physical_pin(self):
assert origen.dut.physical_pin("porta0").set_metadata("meta1", "hi!")
assert origen.dut.physical_pin("porta0").set_metadata(
"meta2", "meta2 updated!")
assert origen.dut.physical_pin("porta0").get_metadata("meta1") == "hi!"
assert origen.dut.physical_pin("porta0").get_metadata(
"meta2") == "meta2 updated!"
def test_setting_nonexistant_metadata_adds_it(self):
assert origen.dut.physical_pin('porta0').get_metadata("meta5") is None
assert origen.dut.physical_pin("porta0").set_metadata("meta5",
5.0) == False
assert origen.dut.physical_pin("porta0").get_metadata("meta5") == 5.0
def test_interacting_with_reference_metadata(self):
d = origen.dut.physical_pin("porta0").get_metadata("meta3")
assert isinstance(d, dict)
assert "test" not in d
d["test"] = True
assert "test" in d
d2 = origen.dut.physical_pin("porta0").get_metadata("meta3")
assert "test" in d2
def test_nonetype_on_retrieving_nonexistant_metadata(self):
assert origen.dut.physical_pin("porta0").get_metadata("blah") is None
def test_exception_on_adding_duplicate_metadata(self):
with pytest.raises(OSError):
origen.dut.physical_pin("porta0").add_metadata("meta1", False)
def test_additional_metadata(self):
origen.dut.physical_pin('porta1').add_metadata("m1", 1.0)
origen.dut.physical_pin('porta1').add_metadata("m2", -2)
assert origen.dut.physical_pin('porta1').get_metadata("m1") == 1.0
assert origen.dut.physical_pin('porta1').get_metadata("m2") == -2
assert origen.dut.physical_pin('porta0').get_metadata("m1") is None
assert origen.dut.physical_pin('porta0').get_metadata("m2") is None
def test_metadata_with_same_name_on_different_objects(self):
origen.dut.physical_pin('porta0').add_metadata("index", 0)
origen.dut.physical_pin('porta1').add_metadata("index", 1)
assert origen.dut.physical_pin('porta0').get_metadata("index") == 0
assert origen.dut.physical_pin('porta1').get_metadata("index") == 1
class TestPinLoaderAPI:
def test_pin_loader_api(self, clean_eagle):
assert origen.dut.pins.keys() == [
"porta0", "porta1", "porta", "portb0", "portb1", "portb2",
"portb3", "portb", "portc0", "portc1", "portc", "clk", "swd_clk",
"swdclk", "tclk", "swdio", "reset"
]
assert origen.dut.pin("clk").reset_actions == "0"
assert origen.dut.pin_headers.keys() == [
"ports", "clk", "all", "pins-for-toggle", "pins-for-toggle-rev",
"swd", 'cap_test'
]
assert origen.dut.pin_headers["ports"].pin_names == [
"porta", "portb", "portc"
]
assert origen.dut.pin_headers["clk"].pin_names == ["clk"] | 0.587943 | 0.443118 |
import plotly.graph_objs as go
from numpy import (
arctanh,
corrcoef,
isnan,
NaN,
percentile,
zeros,
)
from ..fingers.max_activity import FINGERS
from .utils import TICKFONT
def plot_finger_chan(v, chans):
fig = go.Figure(
go.Heatmap(
x=chans,
y=FINGERS,
z=v.T,
zmax=v.max(),
zmin=v.max() * -1,
colorscale=P['viz']['colorscale'],
),
layout=go.Layout(
xaxis=dict(
title='Channels',
),
yaxis=dict(
title='Fingers',
autorange='reversed',
))
)
return fig
def plot_coefs_cc(parameters, df, region, movements):
region_type = parameters['ols']['results']['atlas']
i_region = df['channel'][region_type] == region
cc = zeros((5, 5))
cc.fill(NaN)
annots = []
for i0, f0 in enumerate(FINGERS):
for i1, f1 in enumerate(FINGERS):
mov0 = mov1 = movements
if movements == 'flexion' and i0 >= i1:
continue
if movements == 'extension' and i0 <= i1:
continue
if movements == 'compare':
if i0 != i1:
continue
else:
mov0 = 'flexion'
mov1 = 'extension'
val = fisher_corrcoef(df[mov0][f0][i_region], df[mov1][f1][i_region])
cc[i0, i1] = val
annots.append({
'x': i1,
'y': i0,
'showarrow': False,
'text': f'{val:0.2f}',
'xref': 'x',
'yref': 'y',
'font': TICKFONT,
})
traces = [
go.Heatmap(
z=cc,
zmin=-1,
zmax=1,
colorscale=parameters['viz']['diverging'],
),
]
fig = go.Figure(
data=traces,
layout=go.Layout(annotations=annots),
)
return fig
def fisher_corrcoef(x0, x1):
x0 = fisher_trans(x0)
x1 = fisher_trans(x1)
has_nan = isnan(x0) | isnan(x1)
return corrcoef(x0[~has_nan], x1[~has_nan])[0, 1]
def fisher_trans(x):
max0 = percentile(x, 99)
min0 = percentile(x, 1)
x = ((x - min0) / (max0 - min0)) * 2 - 1
x[x <= -1] = NaN
x[x >= 1] = NaN
return arctanh(x) | fima/viz/finger_channels.py | import plotly.graph_objs as go
from numpy import (
arctanh,
corrcoef,
isnan,
NaN,
percentile,
zeros,
)
from ..fingers.max_activity import FINGERS
from .utils import TICKFONT
def plot_finger_chan(v, chans):
fig = go.Figure(
go.Heatmap(
x=chans,
y=FINGERS,
z=v.T,
zmax=v.max(),
zmin=v.max() * -1,
colorscale=P['viz']['colorscale'],
),
layout=go.Layout(
xaxis=dict(
title='Channels',
),
yaxis=dict(
title='Fingers',
autorange='reversed',
))
)
return fig
def plot_coefs_cc(parameters, df, region, movements):
region_type = parameters['ols']['results']['atlas']
i_region = df['channel'][region_type] == region
cc = zeros((5, 5))
cc.fill(NaN)
annots = []
for i0, f0 in enumerate(FINGERS):
for i1, f1 in enumerate(FINGERS):
mov0 = mov1 = movements
if movements == 'flexion' and i0 >= i1:
continue
if movements == 'extension' and i0 <= i1:
continue
if movements == 'compare':
if i0 != i1:
continue
else:
mov0 = 'flexion'
mov1 = 'extension'
val = fisher_corrcoef(df[mov0][f0][i_region], df[mov1][f1][i_region])
cc[i0, i1] = val
annots.append({
'x': i1,
'y': i0,
'showarrow': False,
'text': f'{val:0.2f}',
'xref': 'x',
'yref': 'y',
'font': TICKFONT,
})
traces = [
go.Heatmap(
z=cc,
zmin=-1,
zmax=1,
colorscale=parameters['viz']['diverging'],
),
]
fig = go.Figure(
data=traces,
layout=go.Layout(annotations=annots),
)
return fig
def fisher_corrcoef(x0, x1):
x0 = fisher_trans(x0)
x1 = fisher_trans(x1)
has_nan = isnan(x0) | isnan(x1)
return corrcoef(x0[~has_nan], x1[~has_nan])[0, 1]
def fisher_trans(x):
max0 = percentile(x, 99)
min0 = percentile(x, 1)
x = ((x - min0) / (max0 - min0)) * 2 - 1
x[x <= -1] = NaN
x[x >= 1] = NaN
return arctanh(x) | 0.624294 | 0.230443 |
import csv
CSV_FILE = '/home/lemon/Documents/bcompiler/source/master_transposed.csv'
def get_approval_dates_for_project(project: str, csv_file: str) -> tuple:
reader = csv.DictReader(csv_file)
with open(csv_file, 'r') as f:
reader = csv.DictReader(f)
d = {}
for line in reader:
if line['Project/Programme Name'] == project:
d['mm1'] = dict(
type=line['Approval MM1'],
date=line['Approval MM1 Forecast / Actual'],
position=1)
d['mm2'] = dict(
type=line['Approval MM2'],
date=line['Approval MM2 Forecast / Actual'],
position=1)
d['mm3'] = dict(
type=line['Approval MM3'],
date=line['Approval MM3 Forecast / Actual'],
position=1)
d['mm4'] = dict(
type=line['Approval MM4'],
date=line['Approval MM4 Forecast / Actual'],
position=1)
d['mm5'] = dict(
type=line['Approval MM5'],
date=line['Approval MM5 Forecast / Actual'],
position=1)
d['mm6'] = dict(
type=line['Approval MM6'],
date=line['Approval MM6 Forecast / Actual'],
position=1)
d['mm7'] = dict(
type=line['Approval MM7'],
date=line['Approval MM7 Forecast / Actual'],
position=1)
d['mm8'] = dict(
type=line['Approval MM8'],
date=line['Approval MM8 Forecast / Actual'],
position=1)
d['mm9'] = dict(
type=line['Approval MM9'],
date=line['Approval MM9 Forecast / Actual'],
position=1)
d['mm10'] = dict(
type=line['Approval MM10'],
date=line['Approval MM10 Forecast / Actual'],
position=1)
d['mm11'] = dict(
type=line['Approval MM11'],
date=line['Approval MM11 Forecast / Actual'],
position=1)
return project, d
def get_project_names(csv_file: str) -> list:
with open(csv_file, 'r') as f:
reader = csv.DictReader(f)
p_names = [item['Project/Programme Name'] for item in reader]
return p_names
def write_to_csv(project_data: tuple) -> None:
pd = project_data[0]
with open('/home/lemon/Desktop/{}.csv'.format(pd), 'w') as csv_file:
fieldnames = ['Project Milestone', 'Date', 'Position']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for item in project_data[1].items():
writer.writerow({
'Project Milestone': item[1]['type'],
'Date': item[1]['date'],
'Position': 1
})
for p in get_project_names(CSV_FILE):
data = get_approval_dates_for_project(p, CSV_FILE)
write_to_csv(data) | xldigest/analysis/milestones.py | import csv
CSV_FILE = '/home/lemon/Documents/bcompiler/source/master_transposed.csv'
def get_approval_dates_for_project(project: str, csv_file: str) -> tuple:
reader = csv.DictReader(csv_file)
with open(csv_file, 'r') as f:
reader = csv.DictReader(f)
d = {}
for line in reader:
if line['Project/Programme Name'] == project:
d['mm1'] = dict(
type=line['Approval MM1'],
date=line['Approval MM1 Forecast / Actual'],
position=1)
d['mm2'] = dict(
type=line['Approval MM2'],
date=line['Approval MM2 Forecast / Actual'],
position=1)
d['mm3'] = dict(
type=line['Approval MM3'],
date=line['Approval MM3 Forecast / Actual'],
position=1)
d['mm4'] = dict(
type=line['Approval MM4'],
date=line['Approval MM4 Forecast / Actual'],
position=1)
d['mm5'] = dict(
type=line['Approval MM5'],
date=line['Approval MM5 Forecast / Actual'],
position=1)
d['mm6'] = dict(
type=line['Approval MM6'],
date=line['Approval MM6 Forecast / Actual'],
position=1)
d['mm7'] = dict(
type=line['Approval MM7'],
date=line['Approval MM7 Forecast / Actual'],
position=1)
d['mm8'] = dict(
type=line['Approval MM8'],
date=line['Approval MM8 Forecast / Actual'],
position=1)
d['mm9'] = dict(
type=line['Approval MM9'],
date=line['Approval MM9 Forecast / Actual'],
position=1)
d['mm10'] = dict(
type=line['Approval MM10'],
date=line['Approval MM10 Forecast / Actual'],
position=1)
d['mm11'] = dict(
type=line['Approval MM11'],
date=line['Approval MM11 Forecast / Actual'],
position=1)
return project, d
def get_project_names(csv_file: str) -> list:
with open(csv_file, 'r') as f:
reader = csv.DictReader(f)
p_names = [item['Project/Programme Name'] for item in reader]
return p_names
def write_to_csv(project_data: tuple) -> None:
pd = project_data[0]
with open('/home/lemon/Desktop/{}.csv'.format(pd), 'w') as csv_file:
fieldnames = ['Project Milestone', 'Date', 'Position']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for item in project_data[1].items():
writer.writerow({
'Project Milestone': item[1]['type'],
'Date': item[1]['date'],
'Position': 1
})
for p in get_project_names(CSV_FILE):
data = get_approval_dates_for_project(p, CSV_FILE)
write_to_csv(data) | 0.188249 | 0.102484 |
from __future__ import annotations
from typing import Any
import numpy as np
import pytest
from bqskit.ir.circuit import Circuit
from bqskit.ir.gate import Gate
from bqskit.ir.gates import CNOTGate
from bqskit.ir.gates import ConstantUnitaryGate
from bqskit.ir.gates import CSUMGate
from bqskit.ir.gates import HGate
from bqskit.ir.gates import TdgGate
from bqskit.ir.gates import TGate
from bqskit.ir.gates import U3Gate
from bqskit.ir.gates import XGate
from bqskit.ir.gates import ZGate
from bqskit.qis.unitary.differentiable import DifferentiableUnitary
from bqskit.utils.typing import is_integer
from bqskit.utils.typing import is_numeric
from bqskit.utils.typing import is_valid_coupling_graph
from bqskit.utils.typing import is_valid_radixes
class TestSimpleCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a simple circuit."""
def test_num_params(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_params == 0
def test_radixes(self, simple_circuit: Circuit) -> None:
assert len(simple_circuit.radixes) == simple_circuit.num_qudits
assert isinstance(simple_circuit.radixes, tuple)
assert all(r == 2 for r in simple_circuit.radixes)
def test_num_qudits(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_qudits == 2
def test_dim(self, simple_circuit: Circuit) -> None:
assert simple_circuit.dim == 4
def test_is_qubit_only(self, simple_circuit: Circuit) -> None:
assert simple_circuit.is_qubit_only()
def test_is_qutrit_only(self, simple_circuit: Circuit) -> None:
assert not simple_circuit.is_qutrit_only()
def test_is_parameterized(self, simple_circuit: Circuit) -> None:
assert not simple_circuit.is_parameterized()
def test_is_constant(self, simple_circuit: Circuit) -> None:
assert simple_circuit.is_constant()
def test_num_operations(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_operations == 4
def test_num_cycles(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_cycles == 4
def test_params(self, simple_circuit: Circuit) -> None:
assert len(simple_circuit.params) == 0
assert isinstance(simple_circuit.params, np.ndarray)
def test_depth(self, simple_circuit: Circuit) -> None:
assert simple_circuit.depth == 4
def test_parallelism(self, simple_circuit: Circuit) -> None:
assert simple_circuit.parallelism == 1.5
def test_coupling_graph(self, simple_circuit: Circuit) -> None:
cgraph = simple_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 2)
assert len(cgraph) == 1
assert (0, 1) in cgraph
def test_gate_set(self, simple_circuit: Circuit) -> None:
gate_set = simple_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 2
assert XGate() in gate_set
assert CNOTGate() in gate_set
def test_active_qudits(self, simple_circuit: Circuit) -> None:
qudits = simple_circuit.active_qudits
assert len(qudits) == simple_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(simple_circuit.num_qudits))
class TestSwapCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a swap circuit."""
def test_num_params(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_params == 0
def test_radixes(self, swap_circuit: Circuit) -> None:
assert len(swap_circuit.radixes) == swap_circuit.num_qudits
assert isinstance(swap_circuit.radixes, tuple)
assert all(r == 2 for r in swap_circuit.radixes)
def test_num_qudits(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_qudits == 2
def test_dim(self, swap_circuit: Circuit) -> None:
assert swap_circuit.dim == 4
def test_is_qubit_only(self, swap_circuit: Circuit) -> None:
assert swap_circuit.is_qubit_only()
def test_is_qutrit_only(self, swap_circuit: Circuit) -> None:
assert not swap_circuit.is_qutrit_only()
def test_is_parameterized(self, swap_circuit: Circuit) -> None:
assert not swap_circuit.is_parameterized()
def test_is_constant(self, swap_circuit: Circuit) -> None:
assert swap_circuit.is_constant()
def test_num_operations(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_operations == 3
def test_num_cycles(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_cycles == 3
def test_params(self, swap_circuit: Circuit) -> None:
assert len(swap_circuit.params) == 0
assert isinstance(swap_circuit.params, np.ndarray)
def test_depth(self, swap_circuit: Circuit) -> None:
assert swap_circuit.depth == 3
def test_parallelism(self, swap_circuit: Circuit) -> None:
assert swap_circuit.parallelism == 2
def test_coupling_graph(self, swap_circuit: Circuit) -> None:
cgraph = swap_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 2)
assert len(cgraph) == 1
assert (0, 1) in cgraph
def test_gate_set(self, swap_circuit: Circuit) -> None:
gate_set = swap_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 1
assert CNOTGate() in gate_set
def test_active_qudits(self, swap_circuit: Circuit) -> None:
qudits = swap_circuit.active_qudits
assert len(qudits) == swap_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(swap_circuit.num_qudits))
class TestToffoliCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a toffoli circuit."""
def test_num_params(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_params == 0
def test_radixes(self, toffoli_circuit: Circuit) -> None:
assert len(toffoli_circuit.radixes) == toffoli_circuit.num_qudits
assert isinstance(toffoli_circuit.radixes, tuple)
assert all(r == 2 for r in toffoli_circuit.radixes)
def test_num_qudits(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_qudits == 3
def test_dim(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.dim == 8
def test_is_qubit_only(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.is_qubit_only()
def test_is_qutrit_only(self, toffoli_circuit: Circuit) -> None:
assert not toffoli_circuit.is_qutrit_only()
def test_is_parameterized(self, toffoli_circuit: Circuit) -> None:
assert not toffoli_circuit.is_parameterized()
def test_is_constant(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.is_constant()
def test_num_operations(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_operations == 15
def test_num_cycles(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_cycles == 11
def test_params(self, toffoli_circuit: Circuit) -> None:
assert len(toffoli_circuit.params) == 0
assert isinstance(toffoli_circuit.params, np.ndarray)
def test_depth(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.depth == 11
def test_parallelism(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.parallelism == 21 / 11
def test_coupling_graph(self, toffoli_circuit: Circuit) -> None:
cgraph = toffoli_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 3)
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
def test_gate_set(self, toffoli_circuit: Circuit) -> None:
gate_set = toffoli_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 4
assert CNOTGate() in gate_set
assert HGate() in gate_set
assert TdgGate() in gate_set
assert TGate() in gate_set
def test_active_qudits(self, toffoli_circuit: Circuit) -> None:
qudits = toffoli_circuit.active_qudits
assert len(qudits) == toffoli_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(toffoli_circuit.num_qudits))
class TestGetNumParams:
"""This tests `circuit.num_params`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_params, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_params >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit = Circuit(4)
assert circuit.num_params == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_params == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 3
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 6
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 3
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 6
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 9
def test_removing_gate(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
circuit.remove(U3Gate())
assert circuit.num_params == 6
circuit.remove(U3Gate())
assert circuit.num_params == 3
circuit.remove(U3Gate())
assert circuit.num_params == 0
def test_freezing_param(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
circuit.freeze_param(0)
assert circuit.num_params == 8
circuit.freeze_param(0)
assert circuit.num_params == 7
circuit.freeze_param(0)
assert circuit.num_params == 6
circuit.freeze_param(0)
def test_r1(self, r3_qubit_circuit: Circuit) -> None:
start = r3_qubit_circuit.num_params
r3_qubit_circuit.append_gate(U3Gate(), [0])
assert r3_qubit_circuit.num_params == start + 3
r3_qubit_circuit.insert_gate(0, U3Gate(), [1])
assert r3_qubit_circuit.num_params == start + 6
r3_qubit_circuit.insert_gate(0, CNOTGate(), [0, 2])
assert r3_qubit_circuit.num_params == start + 6
r3_qubit_circuit.remove(U3Gate())
assert r3_qubit_circuit.num_params == start + 3
class TestGetRadixes:
"""This tests `circuit.radixes`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.radixes, tuple)
assert all(is_integer(r) for r in r6_qudit_circuit.radixes)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert is_valid_radixes(r6_qudit_circuit.radixes, 6)
def test_empty(self) -> None:
circuit = Circuit(1)
assert len(circuit.radixes) == 1
assert circuit.radixes[0] == 2
circuit = Circuit(4)
assert len(circuit.radixes) == 4
assert circuit.radixes[0] == 2
assert circuit.radixes[1] == 2
assert circuit.radixes[2] == 2
assert circuit.radixes[3] == 2
circuit = Circuit(4, [2, 2, 3, 3])
assert len(circuit.radixes) == 4
assert circuit.radixes[0] == 2
assert circuit.radixes[1] == 2
assert circuit.radixes[2] == 3
assert circuit.radixes[3] == 3
class TestGetSize:
"""This tests `circuit.num_qudits`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_qudits, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_qudits == 6
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_qudits == 1
circuit = Circuit(4)
assert circuit.num_qudits == 4
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.num_qudits == 4
class TestGetDim:
"""This tests `circuit.dim`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.dim, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.dim >= 64
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.dim == 2
circuit = Circuit(4)
assert circuit.dim == 16
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.dim == 36
class TestIsQubitOnly:
"""This tests `circuit.is_qubit_only`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_qubit_only(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
if r6_qudit_circuit.radixes.count(2) == 6:
assert r6_qudit_circuit.is_qubit_only()
else:
assert not r6_qudit_circuit.is_qubit_only()
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.is_qubit_only()
circuit = Circuit(4)
assert circuit.is_qubit_only()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_qubit_only()
class TestIsQutritOnly:
"""This tests `circuit.is_qutrit_only`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_qutrit_only(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
if r6_qudit_circuit.radixes.count(3) == 6:
assert r6_qudit_circuit.is_qutrit_only()
else:
assert not r6_qudit_circuit.is_qutrit_only()
def test_empty(self) -> None:
circuit = Circuit(1)
assert not circuit.is_qutrit_only()
circuit = Circuit(4)
assert not circuit.is_qutrit_only()
circuit = Circuit(4, [3, 3, 3, 3])
assert circuit.is_qutrit_only()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_qutrit_only()
class TestIsParameterized:
"""This tests `circuit.is_parameterized`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_parameterized(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.is_parameterized()
!= r6_qudit_circuit.is_constant()
)
if any(g.is_parameterized() for g in r6_qudit_circuit.gate_set):
assert r6_qudit_circuit.is_parameterized()
else:
assert not r6_qudit_circuit.is_parameterized()
def test_empty(self) -> None:
circuit = Circuit(1)
assert not circuit.is_parameterized()
circuit = Circuit(4)
assert not circuit.is_parameterized()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_parameterized()
class TestIsConstant:
"""This tests `circuit.is_constant`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_constant(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.is_parameterized()
!= r6_qudit_circuit.is_constant()
)
if all(g.is_constant() for g in r6_qudit_circuit.gate_set):
assert r6_qudit_circuit.is_constant()
else:
assert not r6_qudit_circuit.is_constant()
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.is_constant()
circuit = Circuit(4)
assert circuit.is_constant()
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.is_constant()
class TestGetNumOperations:
"""This tests `circuit.num_operations`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_operations, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_operations >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit = Circuit(4)
assert circuit.num_operations == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_operations == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 3
def test_removing_gate(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 3
circuit.remove(U3Gate())
assert circuit.num_operations == 2
circuit.remove(U3Gate())
assert circuit.num_operations == 1
circuit.remove(U3Gate())
assert circuit.num_operations == 0
def test_r1(self, r3_qubit_circuit: Circuit) -> None:
assert r3_qubit_circuit.num_operations == 10
r3_qubit_circuit.append_gate(U3Gate(), [0])
assert r3_qubit_circuit.num_operations == 11
r3_qubit_circuit.insert_gate(0, U3Gate(), [1])
assert r3_qubit_circuit.num_operations == 12
r3_qubit_circuit.insert_gate(0, CNOTGate(), [0, 2])
assert r3_qubit_circuit.num_operations == 13
r3_qubit_circuit.remove(U3Gate())
assert r3_qubit_circuit.num_operations == 12
r3_qubit_circuit.remove(CNOTGate())
assert r3_qubit_circuit.num_operations == 11
class TestGetNumCycles:
"""This tests `circuit.num_cycles`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_cycles, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_cycles >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit = Circuit(4)
assert circuit.num_cycles == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_cycles == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 3
def test_removing_gate1(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 3
circuit.remove(U3Gate())
assert circuit.num_cycles == 2
circuit.remove(U3Gate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 0
def test_removing_gate2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [1])
assert circuit.num_cycles == 3
circuit.remove(U3Gate())
assert circuit.num_cycles == 2
circuit.remove(CNOTGate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 0
class TestGetParams:
"""This tests `circuit.params`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
params = r6_qudit_circuit.params
assert isinstance(params, np.ndarray)
assert all(is_numeric(param) for param in params)
def test_count(self, r6_qudit_circuit: Circuit) -> None:
num_params = r6_qudit_circuit.num_params
params = r6_qudit_circuit.params
assert len(params) == num_params
def test_no_modify(self, r6_qudit_circuit: Circuit) -> None:
params = r6_qudit_circuit.params
if len(params) == 0:
return
params[0] = -params[0] + 1
assert params[0] != r6_qudit_circuit.params[0]
def test_empty(self) -> None:
circuit = Circuit(1)
assert len(circuit.params) == 0
circuit = Circuit(4)
assert len(circuit.params) == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert len(circuit.params) == 0
class TestGetDepth:
"""This tests `circuit.depth`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.depth, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.depth >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit = Circuit(4)
assert circuit.depth == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.depth == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 3
def test_removing_gate1(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 3
circuit.remove(U3Gate())
assert circuit.depth == 2
circuit.remove(U3Gate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 0
def test_removing_gate2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [1])
assert circuit.depth == 3
circuit.remove(U3Gate())
assert circuit.depth == 2
circuit.remove(CNOTGate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 0
def test_vs_cycles(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.depth
<= r6_qudit_circuit.num_cycles
)
class TestGetParallelism:
"""This tests `circuit.parallelism`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.parallelism, float)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.parallelism > 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.parallelism == 0
circuit = Circuit(4)
assert circuit.parallelism == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.parallelism == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
def test_adding_gate_2(self) -> None:
circuit = Circuit(2)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1.5
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism - 5 / 3 < 1e-12
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
def test_adding_gate_3(self) -> None:
circuit = Circuit(2)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(CNOTGate(), [0, 1])
assert circuit.parallelism == 2
class TestGetCouplingGraph:
"""This tests `circuit.coupling_graph`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert is_valid_coupling_graph(
r6_qudit_circuit.coupling_graph, 6,
)
def test_empty(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
assert isinstance(circuit.coupling_graph, set)
def test_single_qubit_1(self) -> None:
circuit = Circuit(1)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
def test_single_qubit_2(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
for i in range(4):
circuit.append_gate(U3Gate(), [i])
assert len(circuit.coupling_graph) == 0
for j in range(4):
for i in range(4):
circuit.append_gate(U3Gate(), [i])
assert len(circuit.coupling_graph) == 0
def test_two_qubit_1(self) -> None:
circuit = Circuit(2)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(CNOTGate(), [0, 1])
cgraph = circuit.coupling_graph
assert len(cgraph) == 1
assert (0, 1) in cgraph
circuit.append_gate(CNOTGate(), [1, 0])
cgraph = circuit.coupling_graph
assert len(cgraph) == 1
assert (0, 1) in cgraph
circuit.remove(CNOTGate())
circuit.remove(CNOTGate())
assert len(circuit.coupling_graph) == 0
def test_two_qubit_2(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(CNOTGate(), [1, 2])
circuit.append_gate(CNOTGate(), [2, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(CNOTGate(), [2, 3])
circuit.append_gate(CNOTGate(), [1, 2])
circuit.append_gate(CNOTGate(), [0, 1])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(CNOTGate(), [0, 2])
circuit.append_gate(CNOTGate(), [3, 0])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
assert (0, 2) in cgraph
assert (0, 3) in cgraph
def test_multi_qubit_1(self, gen_random_utry_np: Any) -> None:
circuit = Circuit(6)
assert len(circuit.coupling_graph) == 0
three_qubit_gate = ConstantUnitaryGate(gen_random_utry_np(8))
circuit.append_gate(three_qubit_gate, [0, 1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
circuit.append_gate(three_qubit_gate, [0, 1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
circuit.append_gate(three_qubit_gate, [1, 2, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
assert (1, 3) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(three_qubit_gate, [3, 4, 5])
cgraph = circuit.coupling_graph
assert len(cgraph) == 8
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
assert (1, 3) in cgraph
assert (2, 3) in cgraph
assert (3, 4) in cgraph
assert (3, 5) in cgraph
assert (4, 5) in cgraph
def test_multi_qudit_2(self, gen_random_utry_np: Any) -> None:
circuit = Circuit(6, [2, 2, 2, 3, 3, 3])
assert len(circuit.coupling_graph) == 0
three_qubit_gate = ConstantUnitaryGate(
gen_random_utry_np(12), [2, 2, 3],
)
circuit.append_gate(three_qubit_gate, [0, 1, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
circuit.append_gate(CNOTGate(), [1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 4
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
assert (1, 2) in cgraph
circuit.append_gate(CSUMGate(), [4, 5])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
assert (1, 2) in cgraph
assert (4, 5) in cgraph
class TestGetGateSet:
"""This tests `circuit.gate_set`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.gate_set, set)
assert all(
isinstance(gate, Gate)
for gate in r6_qudit_circuit.gate_set
)
def test_empty(self) -> None:
circuit = Circuit(4)
assert len(circuit.gate_set) == 0
assert isinstance(circuit.gate_set, set)
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.append_gate(XGate(), [0])
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
circuit.append_gate(ZGate(), [0])
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
def test_removing_gate(self) -> None:
circuit = Circuit(1)
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(XGate(), [0])
circuit.append_gate(ZGate(), [0])
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
circuit.remove(TGate())
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.remove(XGate())
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.remove(ZGate())
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.remove(U3Gate())
assert len(circuit.gate_set) == 0
def test_qudit(self) -> None:
circuit = Circuit(3, [2, 3, 3])
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.append_gate(XGate(), [0])
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
circuit.append_gate(ZGate(), [0])
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
circuit.append_gate(CSUMGate(), [1, 2])
assert len(circuit.gate_set) == 5
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
assert CSUMGate() in circuit.gate_set
class TestIsDifferentiable:
"""This tests `circuit.is_differentiable`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_differentiable(), bool)
def test_value(self, gate: Gate) -> None:
circuit = Circuit(gate.num_qudits, gate.radixes)
assert circuit.is_differentiable()
circuit.append_gate(gate, list(range(gate.num_qudits)))
if isinstance(gate, DifferentiableUnitary):
assert circuit.is_differentiable()
else:
assert not circuit.is_differentiable()
@pytest.mark.parametrize(
'circuit', [
Circuit(1),
Circuit(4),
Circuit(4, [2, 3, 4, 5]),
],
)
def test_empty(self, circuit: Circuit) -> None:
assert circuit.is_differentiable() | tests/ir/circuit/test_properties.py | from __future__ import annotations
from typing import Any
import numpy as np
import pytest
from bqskit.ir.circuit import Circuit
from bqskit.ir.gate import Gate
from bqskit.ir.gates import CNOTGate
from bqskit.ir.gates import ConstantUnitaryGate
from bqskit.ir.gates import CSUMGate
from bqskit.ir.gates import HGate
from bqskit.ir.gates import TdgGate
from bqskit.ir.gates import TGate
from bqskit.ir.gates import U3Gate
from bqskit.ir.gates import XGate
from bqskit.ir.gates import ZGate
from bqskit.qis.unitary.differentiable import DifferentiableUnitary
from bqskit.utils.typing import is_integer
from bqskit.utils.typing import is_numeric
from bqskit.utils.typing import is_valid_coupling_graph
from bqskit.utils.typing import is_valid_radixes
class TestSimpleCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a simple circuit."""
def test_num_params(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_params == 0
def test_radixes(self, simple_circuit: Circuit) -> None:
assert len(simple_circuit.radixes) == simple_circuit.num_qudits
assert isinstance(simple_circuit.radixes, tuple)
assert all(r == 2 for r in simple_circuit.radixes)
def test_num_qudits(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_qudits == 2
def test_dim(self, simple_circuit: Circuit) -> None:
assert simple_circuit.dim == 4
def test_is_qubit_only(self, simple_circuit: Circuit) -> None:
assert simple_circuit.is_qubit_only()
def test_is_qutrit_only(self, simple_circuit: Circuit) -> None:
assert not simple_circuit.is_qutrit_only()
def test_is_parameterized(self, simple_circuit: Circuit) -> None:
assert not simple_circuit.is_parameterized()
def test_is_constant(self, simple_circuit: Circuit) -> None:
assert simple_circuit.is_constant()
def test_num_operations(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_operations == 4
def test_num_cycles(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_cycles == 4
def test_params(self, simple_circuit: Circuit) -> None:
assert len(simple_circuit.params) == 0
assert isinstance(simple_circuit.params, np.ndarray)
def test_depth(self, simple_circuit: Circuit) -> None:
assert simple_circuit.depth == 4
def test_parallelism(self, simple_circuit: Circuit) -> None:
assert simple_circuit.parallelism == 1.5
def test_coupling_graph(self, simple_circuit: Circuit) -> None:
cgraph = simple_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 2)
assert len(cgraph) == 1
assert (0, 1) in cgraph
def test_gate_set(self, simple_circuit: Circuit) -> None:
gate_set = simple_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 2
assert XGate() in gate_set
assert CNOTGate() in gate_set
def test_active_qudits(self, simple_circuit: Circuit) -> None:
qudits = simple_circuit.active_qudits
assert len(qudits) == simple_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(simple_circuit.num_qudits))
class TestSwapCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a swap circuit."""
def test_num_params(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_params == 0
def test_radixes(self, swap_circuit: Circuit) -> None:
assert len(swap_circuit.radixes) == swap_circuit.num_qudits
assert isinstance(swap_circuit.radixes, tuple)
assert all(r == 2 for r in swap_circuit.radixes)
def test_num_qudits(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_qudits == 2
def test_dim(self, swap_circuit: Circuit) -> None:
assert swap_circuit.dim == 4
def test_is_qubit_only(self, swap_circuit: Circuit) -> None:
assert swap_circuit.is_qubit_only()
def test_is_qutrit_only(self, swap_circuit: Circuit) -> None:
assert not swap_circuit.is_qutrit_only()
def test_is_parameterized(self, swap_circuit: Circuit) -> None:
assert not swap_circuit.is_parameterized()
def test_is_constant(self, swap_circuit: Circuit) -> None:
assert swap_circuit.is_constant()
def test_num_operations(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_operations == 3
def test_num_cycles(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_cycles == 3
def test_params(self, swap_circuit: Circuit) -> None:
assert len(swap_circuit.params) == 0
assert isinstance(swap_circuit.params, np.ndarray)
def test_depth(self, swap_circuit: Circuit) -> None:
assert swap_circuit.depth == 3
def test_parallelism(self, swap_circuit: Circuit) -> None:
assert swap_circuit.parallelism == 2
def test_coupling_graph(self, swap_circuit: Circuit) -> None:
cgraph = swap_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 2)
assert len(cgraph) == 1
assert (0, 1) in cgraph
def test_gate_set(self, swap_circuit: Circuit) -> None:
gate_set = swap_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 1
assert CNOTGate() in gate_set
def test_active_qudits(self, swap_circuit: Circuit) -> None:
qudits = swap_circuit.active_qudits
assert len(qudits) == swap_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(swap_circuit.num_qudits))
class TestToffoliCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a toffoli circuit."""
def test_num_params(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_params == 0
def test_radixes(self, toffoli_circuit: Circuit) -> None:
assert len(toffoli_circuit.radixes) == toffoli_circuit.num_qudits
assert isinstance(toffoli_circuit.radixes, tuple)
assert all(r == 2 for r in toffoli_circuit.radixes)
def test_num_qudits(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_qudits == 3
def test_dim(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.dim == 8
def test_is_qubit_only(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.is_qubit_only()
def test_is_qutrit_only(self, toffoli_circuit: Circuit) -> None:
assert not toffoli_circuit.is_qutrit_only()
def test_is_parameterized(self, toffoli_circuit: Circuit) -> None:
assert not toffoli_circuit.is_parameterized()
def test_is_constant(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.is_constant()
def test_num_operations(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_operations == 15
def test_num_cycles(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_cycles == 11
def test_params(self, toffoli_circuit: Circuit) -> None:
assert len(toffoli_circuit.params) == 0
assert isinstance(toffoli_circuit.params, np.ndarray)
def test_depth(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.depth == 11
def test_parallelism(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.parallelism == 21 / 11
def test_coupling_graph(self, toffoli_circuit: Circuit) -> None:
cgraph = toffoli_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 3)
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
def test_gate_set(self, toffoli_circuit: Circuit) -> None:
gate_set = toffoli_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 4
assert CNOTGate() in gate_set
assert HGate() in gate_set
assert TdgGate() in gate_set
assert TGate() in gate_set
def test_active_qudits(self, toffoli_circuit: Circuit) -> None:
qudits = toffoli_circuit.active_qudits
assert len(qudits) == toffoli_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(toffoli_circuit.num_qudits))
class TestGetNumParams:
"""This tests `circuit.num_params`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_params, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_params >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit = Circuit(4)
assert circuit.num_params == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_params == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 3
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 6
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 3
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 6
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 9
def test_removing_gate(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
circuit.remove(U3Gate())
assert circuit.num_params == 6
circuit.remove(U3Gate())
assert circuit.num_params == 3
circuit.remove(U3Gate())
assert circuit.num_params == 0
def test_freezing_param(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
circuit.freeze_param(0)
assert circuit.num_params == 8
circuit.freeze_param(0)
assert circuit.num_params == 7
circuit.freeze_param(0)
assert circuit.num_params == 6
circuit.freeze_param(0)
def test_r1(self, r3_qubit_circuit: Circuit) -> None:
start = r3_qubit_circuit.num_params
r3_qubit_circuit.append_gate(U3Gate(), [0])
assert r3_qubit_circuit.num_params == start + 3
r3_qubit_circuit.insert_gate(0, U3Gate(), [1])
assert r3_qubit_circuit.num_params == start + 6
r3_qubit_circuit.insert_gate(0, CNOTGate(), [0, 2])
assert r3_qubit_circuit.num_params == start + 6
r3_qubit_circuit.remove(U3Gate())
assert r3_qubit_circuit.num_params == start + 3
class TestGetRadixes:
"""This tests `circuit.radixes`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.radixes, tuple)
assert all(is_integer(r) for r in r6_qudit_circuit.radixes)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert is_valid_radixes(r6_qudit_circuit.radixes, 6)
def test_empty(self) -> None:
circuit = Circuit(1)
assert len(circuit.radixes) == 1
assert circuit.radixes[0] == 2
circuit = Circuit(4)
assert len(circuit.radixes) == 4
assert circuit.radixes[0] == 2
assert circuit.radixes[1] == 2
assert circuit.radixes[2] == 2
assert circuit.radixes[3] == 2
circuit = Circuit(4, [2, 2, 3, 3])
assert len(circuit.radixes) == 4
assert circuit.radixes[0] == 2
assert circuit.radixes[1] == 2
assert circuit.radixes[2] == 3
assert circuit.radixes[3] == 3
class TestGetSize:
"""This tests `circuit.num_qudits`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_qudits, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_qudits == 6
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_qudits == 1
circuit = Circuit(4)
assert circuit.num_qudits == 4
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.num_qudits == 4
class TestGetDim:
"""This tests `circuit.dim`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.dim, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.dim >= 64
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.dim == 2
circuit = Circuit(4)
assert circuit.dim == 16
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.dim == 36
class TestIsQubitOnly:
"""This tests `circuit.is_qubit_only`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_qubit_only(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
if r6_qudit_circuit.radixes.count(2) == 6:
assert r6_qudit_circuit.is_qubit_only()
else:
assert not r6_qudit_circuit.is_qubit_only()
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.is_qubit_only()
circuit = Circuit(4)
assert circuit.is_qubit_only()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_qubit_only()
class TestIsQutritOnly:
"""This tests `circuit.is_qutrit_only`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_qutrit_only(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
if r6_qudit_circuit.radixes.count(3) == 6:
assert r6_qudit_circuit.is_qutrit_only()
else:
assert not r6_qudit_circuit.is_qutrit_only()
def test_empty(self) -> None:
circuit = Circuit(1)
assert not circuit.is_qutrit_only()
circuit = Circuit(4)
assert not circuit.is_qutrit_only()
circuit = Circuit(4, [3, 3, 3, 3])
assert circuit.is_qutrit_only()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_qutrit_only()
class TestIsParameterized:
"""This tests `circuit.is_parameterized`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_parameterized(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.is_parameterized()
!= r6_qudit_circuit.is_constant()
)
if any(g.is_parameterized() for g in r6_qudit_circuit.gate_set):
assert r6_qudit_circuit.is_parameterized()
else:
assert not r6_qudit_circuit.is_parameterized()
def test_empty(self) -> None:
circuit = Circuit(1)
assert not circuit.is_parameterized()
circuit = Circuit(4)
assert not circuit.is_parameterized()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_parameterized()
class TestIsConstant:
"""This tests `circuit.is_constant`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_constant(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.is_parameterized()
!= r6_qudit_circuit.is_constant()
)
if all(g.is_constant() for g in r6_qudit_circuit.gate_set):
assert r6_qudit_circuit.is_constant()
else:
assert not r6_qudit_circuit.is_constant()
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.is_constant()
circuit = Circuit(4)
assert circuit.is_constant()
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.is_constant()
class TestGetNumOperations:
"""This tests `circuit.num_operations`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_operations, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_operations >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit = Circuit(4)
assert circuit.num_operations == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_operations == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 3
def test_removing_gate(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 3
circuit.remove(U3Gate())
assert circuit.num_operations == 2
circuit.remove(U3Gate())
assert circuit.num_operations == 1
circuit.remove(U3Gate())
assert circuit.num_operations == 0
def test_r1(self, r3_qubit_circuit: Circuit) -> None:
assert r3_qubit_circuit.num_operations == 10
r3_qubit_circuit.append_gate(U3Gate(), [0])
assert r3_qubit_circuit.num_operations == 11
r3_qubit_circuit.insert_gate(0, U3Gate(), [1])
assert r3_qubit_circuit.num_operations == 12
r3_qubit_circuit.insert_gate(0, CNOTGate(), [0, 2])
assert r3_qubit_circuit.num_operations == 13
r3_qubit_circuit.remove(U3Gate())
assert r3_qubit_circuit.num_operations == 12
r3_qubit_circuit.remove(CNOTGate())
assert r3_qubit_circuit.num_operations == 11
class TestGetNumCycles:
"""This tests `circuit.num_cycles`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_cycles, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_cycles >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit = Circuit(4)
assert circuit.num_cycles == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_cycles == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 3
def test_removing_gate1(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 3
circuit.remove(U3Gate())
assert circuit.num_cycles == 2
circuit.remove(U3Gate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 0
def test_removing_gate2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [1])
assert circuit.num_cycles == 3
circuit.remove(U3Gate())
assert circuit.num_cycles == 2
circuit.remove(CNOTGate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 0
class TestGetParams:
"""This tests `circuit.params`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
params = r6_qudit_circuit.params
assert isinstance(params, np.ndarray)
assert all(is_numeric(param) for param in params)
def test_count(self, r6_qudit_circuit: Circuit) -> None:
num_params = r6_qudit_circuit.num_params
params = r6_qudit_circuit.params
assert len(params) == num_params
def test_no_modify(self, r6_qudit_circuit: Circuit) -> None:
params = r6_qudit_circuit.params
if len(params) == 0:
return
params[0] = -params[0] + 1
assert params[0] != r6_qudit_circuit.params[0]
def test_empty(self) -> None:
circuit = Circuit(1)
assert len(circuit.params) == 0
circuit = Circuit(4)
assert len(circuit.params) == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert len(circuit.params) == 0
class TestGetDepth:
"""This tests `circuit.depth`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.depth, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.depth >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit = Circuit(4)
assert circuit.depth == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.depth == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 3
def test_removing_gate1(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 3
circuit.remove(U3Gate())
assert circuit.depth == 2
circuit.remove(U3Gate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 0
def test_removing_gate2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [1])
assert circuit.depth == 3
circuit.remove(U3Gate())
assert circuit.depth == 2
circuit.remove(CNOTGate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 0
def test_vs_cycles(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.depth
<= r6_qudit_circuit.num_cycles
)
class TestGetParallelism:
"""This tests `circuit.parallelism`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.parallelism, float)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.parallelism > 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.parallelism == 0
circuit = Circuit(4)
assert circuit.parallelism == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.parallelism == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
def test_adding_gate_2(self) -> None:
circuit = Circuit(2)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1.5
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism - 5 / 3 < 1e-12
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
def test_adding_gate_3(self) -> None:
circuit = Circuit(2)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(CNOTGate(), [0, 1])
assert circuit.parallelism == 2
class TestGetCouplingGraph:
"""This tests `circuit.coupling_graph`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert is_valid_coupling_graph(
r6_qudit_circuit.coupling_graph, 6,
)
def test_empty(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
assert isinstance(circuit.coupling_graph, set)
def test_single_qubit_1(self) -> None:
circuit = Circuit(1)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
def test_single_qubit_2(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
for i in range(4):
circuit.append_gate(U3Gate(), [i])
assert len(circuit.coupling_graph) == 0
for j in range(4):
for i in range(4):
circuit.append_gate(U3Gate(), [i])
assert len(circuit.coupling_graph) == 0
def test_two_qubit_1(self) -> None:
circuit = Circuit(2)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(CNOTGate(), [0, 1])
cgraph = circuit.coupling_graph
assert len(cgraph) == 1
assert (0, 1) in cgraph
circuit.append_gate(CNOTGate(), [1, 0])
cgraph = circuit.coupling_graph
assert len(cgraph) == 1
assert (0, 1) in cgraph
circuit.remove(CNOTGate())
circuit.remove(CNOTGate())
assert len(circuit.coupling_graph) == 0
def test_two_qubit_2(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(CNOTGate(), [1, 2])
circuit.append_gate(CNOTGate(), [2, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(CNOTGate(), [2, 3])
circuit.append_gate(CNOTGate(), [1, 2])
circuit.append_gate(CNOTGate(), [0, 1])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(CNOTGate(), [0, 2])
circuit.append_gate(CNOTGate(), [3, 0])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
assert (0, 2) in cgraph
assert (0, 3) in cgraph
def test_multi_qubit_1(self, gen_random_utry_np: Any) -> None:
circuit = Circuit(6)
assert len(circuit.coupling_graph) == 0
three_qubit_gate = ConstantUnitaryGate(gen_random_utry_np(8))
circuit.append_gate(three_qubit_gate, [0, 1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
circuit.append_gate(three_qubit_gate, [0, 1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
circuit.append_gate(three_qubit_gate, [1, 2, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
assert (1, 3) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(three_qubit_gate, [3, 4, 5])
cgraph = circuit.coupling_graph
assert len(cgraph) == 8
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
assert (1, 3) in cgraph
assert (2, 3) in cgraph
assert (3, 4) in cgraph
assert (3, 5) in cgraph
assert (4, 5) in cgraph
def test_multi_qudit_2(self, gen_random_utry_np: Any) -> None:
circuit = Circuit(6, [2, 2, 2, 3, 3, 3])
assert len(circuit.coupling_graph) == 0
three_qubit_gate = ConstantUnitaryGate(
gen_random_utry_np(12), [2, 2, 3],
)
circuit.append_gate(three_qubit_gate, [0, 1, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
circuit.append_gate(CNOTGate(), [1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 4
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
assert (1, 2) in cgraph
circuit.append_gate(CSUMGate(), [4, 5])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
assert (1, 2) in cgraph
assert (4, 5) in cgraph
class TestGetGateSet:
"""This tests `circuit.gate_set`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.gate_set, set)
assert all(
isinstance(gate, Gate)
for gate in r6_qudit_circuit.gate_set
)
def test_empty(self) -> None:
circuit = Circuit(4)
assert len(circuit.gate_set) == 0
assert isinstance(circuit.gate_set, set)
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.append_gate(XGate(), [0])
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
circuit.append_gate(ZGate(), [0])
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
def test_removing_gate(self) -> None:
circuit = Circuit(1)
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(XGate(), [0])
circuit.append_gate(ZGate(), [0])
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
circuit.remove(TGate())
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.remove(XGate())
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.remove(ZGate())
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.remove(U3Gate())
assert len(circuit.gate_set) == 0
def test_qudit(self) -> None:
circuit = Circuit(3, [2, 3, 3])
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.append_gate(XGate(), [0])
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
circuit.append_gate(ZGate(), [0])
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
circuit.append_gate(CSUMGate(), [1, 2])
assert len(circuit.gate_set) == 5
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
assert CSUMGate() in circuit.gate_set
class TestIsDifferentiable:
"""This tests `circuit.is_differentiable`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_differentiable(), bool)
def test_value(self, gate: Gate) -> None:
circuit = Circuit(gate.num_qudits, gate.radixes)
assert circuit.is_differentiable()
circuit.append_gate(gate, list(range(gate.num_qudits)))
if isinstance(gate, DifferentiableUnitary):
assert circuit.is_differentiable()
else:
assert not circuit.is_differentiable()
@pytest.mark.parametrize(
'circuit', [
Circuit(1),
Circuit(4),
Circuit(4, [2, 3, 4, 5]),
],
)
def test_empty(self, circuit: Circuit) -> None:
assert circuit.is_differentiable() | 0.928991 | 0.719223 |
import os
import json
from pkg_resources import resource_filename
from docutils import nodes
from docutils.utils import new_document
from sphinx.transforms import SphinxTransform
from sphinx.util.docutils import LoggingReporter
from sphinx.util.fileutil import copy_asset
from . import __version__
emoji_styles = {
'twemoji': [
'https://twemoji.maxcdn.com/v/latest/twemoji.min.js',
'twemoji.js',
'twemoji.css',
],
}
def load_emoji_codes():
"""
Load emoji codes from the JSON file.
This function tweaks some emojis to avoid Sphinx warnings when generating
the documentation. See:
- Original issue: https://github.com/sphinx-doc/sphinx/issues/8276
- New issue: https://sourceforge.net/p/docutils/feature-requests/79/
"""
fname = resource_filename(__name__, 'codes.json')
with open(fname, encoding='utf-8') as fp:
codes = json.load(fp)
# Avoid unexpected warnings
warning_keys = []
for key, value in codes.items():
if value.startswith("*"):
warning_keys.append(key)
for key in warning_keys:
codes[key] = "\\" + codes[key]
return codes
class EmojiSubstitutions(SphinxTransform):
default_priority = 211
def __init__(self, document, startnode=None):
super().__init__(document, startnode)
self.parser = self.app.registry.create_source_parser(self.app, 'rst')
def apply(self):
config = self.document.settings.env.config
settings, source = self.document.settings, self.document['source']
codes = load_emoji_codes()
to_handle = (set(codes.keys()) -
set(self.document.substitution_defs))
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
text = codes[refname]
doc = new_document(source, settings)
doc.reporter = LoggingReporter.from_reporter(doc.reporter)
self.parser.parse(text, doc)
substitution = doc.next_node()
# Remove encapsulating paragraph
if isinstance(substitution, nodes.paragraph):
substitution = substitution.next_node()
ref.replace_self(substitution)
def copy_asset_files(app, exc):
asset_files = [
resource_filename(__name__, 'twemoji.js'),
resource_filename(__name__, 'twemoji.css'),
]
if exc is None: # build succeeded
for path in asset_files:
copy_asset(path, os.path.join(app.outdir, '_static'))
def setup(app):
app.connect('build-finished', copy_asset_files)
style = app.config._raw_config.get('sphinxemoji_style')
if style in emoji_styles:
for fname in emoji_styles[style]:
if fname.endswith('.js'):
app.add_js_file(fname)
elif fname.endswith('.css'):
app.add_css_file(fname)
app.add_transform(EmojiSubstitutions)
return {'version': __version__, 'parallel_read_safe': True} | sphinxemoji/sphinxemoji.py | import os
import json
from pkg_resources import resource_filename
from docutils import nodes
from docutils.utils import new_document
from sphinx.transforms import SphinxTransform
from sphinx.util.docutils import LoggingReporter
from sphinx.util.fileutil import copy_asset
from . import __version__
emoji_styles = {
'twemoji': [
'https://twemoji.maxcdn.com/v/latest/twemoji.min.js',
'twemoji.js',
'twemoji.css',
],
}
def load_emoji_codes():
"""
Load emoji codes from the JSON file.
This function tweaks some emojis to avoid Sphinx warnings when generating
the documentation. See:
- Original issue: https://github.com/sphinx-doc/sphinx/issues/8276
- New issue: https://sourceforge.net/p/docutils/feature-requests/79/
"""
fname = resource_filename(__name__, 'codes.json')
with open(fname, encoding='utf-8') as fp:
codes = json.load(fp)
# Avoid unexpected warnings
warning_keys = []
for key, value in codes.items():
if value.startswith("*"):
warning_keys.append(key)
for key in warning_keys:
codes[key] = "\\" + codes[key]
return codes
class EmojiSubstitutions(SphinxTransform):
default_priority = 211
def __init__(self, document, startnode=None):
super().__init__(document, startnode)
self.parser = self.app.registry.create_source_parser(self.app, 'rst')
def apply(self):
config = self.document.settings.env.config
settings, source = self.document.settings, self.document['source']
codes = load_emoji_codes()
to_handle = (set(codes.keys()) -
set(self.document.substitution_defs))
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
text = codes[refname]
doc = new_document(source, settings)
doc.reporter = LoggingReporter.from_reporter(doc.reporter)
self.parser.parse(text, doc)
substitution = doc.next_node()
# Remove encapsulating paragraph
if isinstance(substitution, nodes.paragraph):
substitution = substitution.next_node()
ref.replace_self(substitution)
def copy_asset_files(app, exc):
asset_files = [
resource_filename(__name__, 'twemoji.js'),
resource_filename(__name__, 'twemoji.css'),
]
if exc is None: # build succeeded
for path in asset_files:
copy_asset(path, os.path.join(app.outdir, '_static'))
def setup(app):
app.connect('build-finished', copy_asset_files)
style = app.config._raw_config.get('sphinxemoji_style')
if style in emoji_styles:
for fname in emoji_styles[style]:
if fname.endswith('.js'):
app.add_js_file(fname)
elif fname.endswith('.css'):
app.add_css_file(fname)
app.add_transform(EmojiSubstitutions)
return {'version': __version__, 'parallel_read_safe': True} | 0.439988 | 0.089773 |
import pyspark
import json
import nltk
from nltk import word_tokenize
def _init_line(line):
name = line.lower().split()[0]
return (name,line.lower().split())
def _init_list(sc):
results = {}
companyRDD = sc.textFile("gs://group688/companylist")
coms = companyRDD.map(_init_line).collect()
for com in coms:
for name in com[1]:
results[name] = com[0]
return results
def _data_filter(lines,company,source):
import nltk
nltk.download('punkt',download_dir='./nltk_data')
nltk.data.path.append("./nltk_data")
results = []
for datum in lines:
data = json.loads(datum)
authors = data["authors"]
date = data["date"]
text = data["text"]
title = data["title"]
tokens_text = word_tokenize(text.lower())
tokens_title = word_tokenize(title.lower())
tags = []
for word in text.lower().split():
if word[0]=="#":
tags.append(word.lower())
#Stat is a dictionary, key is the company name, and value is the attribute
#attributes: [in_title,title_count,total_count]
stat = {}
for token in tokens_title:
if token in company:
if company[token] in stat:
stat[company[token]][0] = True
stat[company[token]][1] += 1
else:
stat[company[token]] = [True,1,0]
for token in tokens_text:
if token in company:
if company[token] in stat:
stat[company[token]][2] += 1
else:
stat[company[token]] = [False,0,1]
for name in stat:
result = {}
if (source=="wsj"):
result["date"] = date[:5] + '0' + date[5:9]
else:
result["date"] = date[:10]
result["text"] = text
result["tokens"] = tokens_text
result["company"] = name
result["source"] = source
result["in_title"] = stat[name][0]
result["title_count"] = max(stat[name][1],title.lower().count(name))
result["total_count"] = max(stat[name][2],text.lower().count(name))
result["title"] = title
result["authors"] = authors
result["tags"] = tags
results.append((name,json.dumps(result)))
return results
def real_main():
sc = pyspark.SparkContext()
company = _init_list(sc)
dataRDD1 = sc.textFile("gs://group688/nytimes",5)
dataRDD1 = dataRDD1.mapPartitions(lambda x:_data_filter(x,company,"nytimes"))
dataRDD2 = sc.textFile("gs://group688/wsj",10)
dataRDD2 = dataRDD2.mapPartitions(lambda x:_data_filter(x,company,"wsj"))
dataRDD3 = sc.textFile("gs://group688/reuters.dat",10)
dataRDD3 = dataRDD3.mapPartitions(lambda x:_data_filter(x,company,"reuters"))
dataRDD = dataRDD3.union(dataRDD2).union(dataRDD1)
dataRDD.sortByKey().map(lambda x:x[1]).saveAsTextFile("gs://group688/688v1")
if __name__=="__main__":
real_main() | etl/rawfilter/etl.py | import pyspark
import json
import nltk
from nltk import word_tokenize
def _init_line(line):
name = line.lower().split()[0]
return (name,line.lower().split())
def _init_list(sc):
results = {}
companyRDD = sc.textFile("gs://group688/companylist")
coms = companyRDD.map(_init_line).collect()
for com in coms:
for name in com[1]:
results[name] = com[0]
return results
def _data_filter(lines,company,source):
import nltk
nltk.download('punkt',download_dir='./nltk_data')
nltk.data.path.append("./nltk_data")
results = []
for datum in lines:
data = json.loads(datum)
authors = data["authors"]
date = data["date"]
text = data["text"]
title = data["title"]
tokens_text = word_tokenize(text.lower())
tokens_title = word_tokenize(title.lower())
tags = []
for word in text.lower().split():
if word[0]=="#":
tags.append(word.lower())
#Stat is a dictionary, key is the company name, and value is the attribute
#attributes: [in_title,title_count,total_count]
stat = {}
for token in tokens_title:
if token in company:
if company[token] in stat:
stat[company[token]][0] = True
stat[company[token]][1] += 1
else:
stat[company[token]] = [True,1,0]
for token in tokens_text:
if token in company:
if company[token] in stat:
stat[company[token]][2] += 1
else:
stat[company[token]] = [False,0,1]
for name in stat:
result = {}
if (source=="wsj"):
result["date"] = date[:5] + '0' + date[5:9]
else:
result["date"] = date[:10]
result["text"] = text
result["tokens"] = tokens_text
result["company"] = name
result["source"] = source
result["in_title"] = stat[name][0]
result["title_count"] = max(stat[name][1],title.lower().count(name))
result["total_count"] = max(stat[name][2],text.lower().count(name))
result["title"] = title
result["authors"] = authors
result["tags"] = tags
results.append((name,json.dumps(result)))
return results
def real_main():
sc = pyspark.SparkContext()
company = _init_list(sc)
dataRDD1 = sc.textFile("gs://group688/nytimes",5)
dataRDD1 = dataRDD1.mapPartitions(lambda x:_data_filter(x,company,"nytimes"))
dataRDD2 = sc.textFile("gs://group688/wsj",10)
dataRDD2 = dataRDD2.mapPartitions(lambda x:_data_filter(x,company,"wsj"))
dataRDD3 = sc.textFile("gs://group688/reuters.dat",10)
dataRDD3 = dataRDD3.mapPartitions(lambda x:_data_filter(x,company,"reuters"))
dataRDD = dataRDD3.union(dataRDD2).union(dataRDD1)
dataRDD.sortByKey().map(lambda x:x[1]).saveAsTextFile("gs://group688/688v1")
if __name__=="__main__":
real_main() | 0.155848 | 0.137677 |
from logging import StreamHandler
from unittest import TestCase, main
from expects import expect, contain, equal
from twin_sister import open_dependency_context
from questions_three.constants import TestEvent
from questions_three.event_broker import EventBroker, subscribe_event_handlers
from questions_three.exceptions import TestSkipped
from questions_three.scaffolds.xunit import TestSuite
from twin_sister.fakes import EmptyFake
class TestSkipAllTests(TestCase):
"""
As a test developer,
I would like to mark an entire suite as skipped
So I can skip suites in a systematic way and avoid side effects
from needless suite setup
"""
def run_suite(self):
class SkippedSuite(TestSuite):
def setup_suite(suite):
raise TestSkipped(self.skip_msg)
def teardown_suite(suite):
self.suite_teardown_ran = True
def setup(suite):
self.test_setup_ran = True
def teardown(suite):
self.test_teardown_ran = True
def test_one(suite):
pass
def test_two(suite):
pass
def test_three(suite):
pass
def setUp(self):
self.context = open_dependency_context(supply_env=True, supply_fs=True, supply_logging=True)
self.context.inject(StreamHandler, EmptyFake())
EventBroker.reset()
subscribe_event_handlers(self)
self.skip_events = 0
self.skip_msg = "intentional"
self.suite_teardown_ran = False
self.test_setup_ran = False
self.test_teardown_ran = False
def tearDown(self):
self.context.close()
def on_test_skipped(self, **kwargs):
self.skip_events += 1
def test_skips_all_tests(self):
self.run_suite()
expect(self.skip_events).to(equal(3)) # number of tests
def test_does_not_run_test_setup(self):
self.run_suite()
assert not self.test_setup_ran, "Setup ran"
def test_does_not_run_test_teardown(self):
self.run_suite()
assert not self.test_teardown_ran, "Teardown ran"
def test_does_not_run_suite_teardown(self):
self.run_suite()
assert not self.suite_teardown_ran, "Teardown ran"
def test_repeats_skip_message_for_tests(self):
caught = None
def on_test_skipped(*, exception, **kwargs):
nonlocal caught
caught = exception
EventBroker.subscribe(event=TestEvent.test_skipped, func=on_test_skipped)
self.skip_msg = "Sometimes you feel like a nut. Sometimes you don't."
self.run_suite()
expect(str(caught)).to(contain(self.skip_msg))
if "__main__" == __name__:
main() | tests/scaffolds/xunit/test_skip_all_tests.py | from logging import StreamHandler
from unittest import TestCase, main
from expects import expect, contain, equal
from twin_sister import open_dependency_context
from questions_three.constants import TestEvent
from questions_three.event_broker import EventBroker, subscribe_event_handlers
from questions_three.exceptions import TestSkipped
from questions_three.scaffolds.xunit import TestSuite
from twin_sister.fakes import EmptyFake
class TestSkipAllTests(TestCase):
"""
As a test developer,
I would like to mark an entire suite as skipped
So I can skip suites in a systematic way and avoid side effects
from needless suite setup
"""
def run_suite(self):
class SkippedSuite(TestSuite):
def setup_suite(suite):
raise TestSkipped(self.skip_msg)
def teardown_suite(suite):
self.suite_teardown_ran = True
def setup(suite):
self.test_setup_ran = True
def teardown(suite):
self.test_teardown_ran = True
def test_one(suite):
pass
def test_two(suite):
pass
def test_three(suite):
pass
def setUp(self):
self.context = open_dependency_context(supply_env=True, supply_fs=True, supply_logging=True)
self.context.inject(StreamHandler, EmptyFake())
EventBroker.reset()
subscribe_event_handlers(self)
self.skip_events = 0
self.skip_msg = "intentional"
self.suite_teardown_ran = False
self.test_setup_ran = False
self.test_teardown_ran = False
def tearDown(self):
self.context.close()
def on_test_skipped(self, **kwargs):
self.skip_events += 1
def test_skips_all_tests(self):
self.run_suite()
expect(self.skip_events).to(equal(3)) # number of tests
def test_does_not_run_test_setup(self):
self.run_suite()
assert not self.test_setup_ran, "Setup ran"
def test_does_not_run_test_teardown(self):
self.run_suite()
assert not self.test_teardown_ran, "Teardown ran"
def test_does_not_run_suite_teardown(self):
self.run_suite()
assert not self.suite_teardown_ran, "Teardown ran"
def test_repeats_skip_message_for_tests(self):
caught = None
def on_test_skipped(*, exception, **kwargs):
nonlocal caught
caught = exception
EventBroker.subscribe(event=TestEvent.test_skipped, func=on_test_skipped)
self.skip_msg = "Sometimes you feel like a nut. Sometimes you don't."
self.run_suite()
expect(str(caught)).to(contain(self.skip_msg))
if "__main__" == __name__:
main() | 0.739705 | 0.432183 |
import json
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask import json, jsonify, make_response, g, current_app as app
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.types import TypeDecorator, Text
from utils import *
class JSONEncodedDict(TypeDecorator):
impl = Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
@login_manager.user_loader
def load_user(id):
return User.query.get(id)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer(), primary_key=True, nullable=False)
created = db.Column(db.DateTime(), default=datetime.utcnow, nullable=False)
updated = db.Column(db.DateTime(), onupdate=datetime.utcnow)
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(200), nullable=False, unique=True)
enabled = db.Column(db.Boolean(), default=True, nullable=False)
verified = db.Column(db.Boolean(), default=False, nullable=False)
projects = db.relationship('Project', order_by=db.desc('projects.created'), backref='user')
def __str__(self):
return self.name or self.email
def __repr__(self):
return 'User(%s)' % self.id
def is_authenticated(self):
return True
def is_active(self):
return self.enabled
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def set_password(self, password):
self.password = generate_password_hash(password, method='pbkdf2:sha256:<PASSWORD>')
def check_password(self, password):
return check_password_hash(self.password, password)
class Project(db.Model):
__tablename__ = 'projects'
def __repr__(self):
return 'Project(%s)' % self.id
id = db.Column(db.String(), primary_key=True, nullable=False)
created = db.Column(db.DateTime(), default=datetime.utcnow, nullable=False)
updated = db.Column(db.DateTime(), onupdate=datetime.utcnow)
name = db.Column(db.String(100))
public = db.Column(db.Boolean(), default=True, nullable=False)
expires = db.Column(db.DateTime())
rendered = db.Column(db.Boolean(), default=False, nullable=False)
store_gerbers = db.Column(db.Boolean(), default=True, nullable=False)
width = db.Column(db.Float())
height = db.Column(db.Float())
layer_info = db.Column(JSONEncodedDict())
color_backgorund = db.Column(db.String(20))
color_copper = db.Column(db.String(20))
color_silkscreen = db.Column(db.String(20))
user_id = db.Column(db.Integer(),
db.ForeignKey('users.id', onupdate='CASCADE', ondelete='CASCADE')) | gerblook/models.py | import json
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask import json, jsonify, make_response, g, current_app as app
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.types import TypeDecorator, Text
from utils import *
class JSONEncodedDict(TypeDecorator):
impl = Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
@login_manager.user_loader
def load_user(id):
return User.query.get(id)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer(), primary_key=True, nullable=False)
created = db.Column(db.DateTime(), default=datetime.utcnow, nullable=False)
updated = db.Column(db.DateTime(), onupdate=datetime.utcnow)
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(200), nullable=False, unique=True)
enabled = db.Column(db.Boolean(), default=True, nullable=False)
verified = db.Column(db.Boolean(), default=False, nullable=False)
projects = db.relationship('Project', order_by=db.desc('projects.created'), backref='user')
def __str__(self):
return self.name or self.email
def __repr__(self):
return 'User(%s)' % self.id
def is_authenticated(self):
return True
def is_active(self):
return self.enabled
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def set_password(self, password):
self.password = generate_password_hash(password, method='pbkdf2:sha256:<PASSWORD>')
def check_password(self, password):
return check_password_hash(self.password, password)
class Project(db.Model):
__tablename__ = 'projects'
def __repr__(self):
return 'Project(%s)' % self.id
id = db.Column(db.String(), primary_key=True, nullable=False)
created = db.Column(db.DateTime(), default=datetime.utcnow, nullable=False)
updated = db.Column(db.DateTime(), onupdate=datetime.utcnow)
name = db.Column(db.String(100))
public = db.Column(db.Boolean(), default=True, nullable=False)
expires = db.Column(db.DateTime())
rendered = db.Column(db.Boolean(), default=False, nullable=False)
store_gerbers = db.Column(db.Boolean(), default=True, nullable=False)
width = db.Column(db.Float())
height = db.Column(db.Float())
layer_info = db.Column(JSONEncodedDict())
color_backgorund = db.Column(db.String(20))
color_copper = db.Column(db.String(20))
color_silkscreen = db.Column(db.String(20))
user_id = db.Column(db.Integer(),
db.ForeignKey('users.id', onupdate='CASCADE', ondelete='CASCADE')) | 0.381335 | 0.066146 |
from __future__ import division, print_function
import os
import sys
import time
from collections import defaultdict, deque
from functools import partial
from builtins import map, range
import re
import six
import pandas as pd
from tgirt_map.trim_function import fastp_trimming, atropos_trimming
class sample_object():
def __init__(self, args):
# read input
self.fastq1 = args.fastq1
self.fastq2 = args.fastq2
self.outpath = args.outdir
self.hisat_index = args.hisat_index
self.bowtie2_index = args.bowtie2_index
self.rRNA_mt_index = args.rRNA_mt_index
self.rmsk_index = args.repeats_index
self.univec_index = args.univec_index
self.smRNA_index = args.smRNA_index
self.bedpath = args.bedpath
self.splicesite = args.splicesite
self.threads = args.threads
self.rmsk = args.repeats
self.UMI = args.umi
self.TTN = args.TTN
self.trim_hard = args.trim_aggressive
self.dry = args.dry
self.count_all = args.count_all
self.novel_splice = args.novel_splice
self.polyA = args.polyA
self.multi = args.multi
self.use_fastp = args.fastp
#### make folder
self.trim_folder = self.outpath + '/Trim'
self.count_folder= self.outpath + '/Counts'
self.count_raw = self.count_folder + '/RAW'
self.count_rmsk = self.count_folder + '/repeat_RAW'
#define sample folder
self.samplename = args.samplename
self.sample_folder = self.outpath + '/' + self.samplename
self.hisat_out = self.sample_folder + '/Hisat'
self.rRNA_mt_out = self.sample_folder + '/rRNA_mt'
self.bowtie_out = self.sample_folder + '/Bowtie'
self.combined_out = self.sample_folder + '/Combined'
self.repeat_out = self.sample_folder + '/repeats'
self.univec_contaminants = self.sample_folder + '/UniVec'
self.smRNA_out = self.sample_folder + '/smallRNA'
#make output file names
self.trimed1= '%s/%s.1.fq.gz' %(self.trim_folder, self.samplename)
self.trimed2= self.trimed1.replace('.1.fq.gz','.2.fq.gz')
if self.UMI == 0 or self.count_all:
self.count_bam = self.combined_out + '/primary.bam'
else:
self.count_bam = self.combined_out + '/primary.dedup.bam'
self.run_process = partial(system_run, args.dry, self.samplename)
self.HISAT2 = 'hisat2 '\
'--no-mixed --no-discordant '\
' --new-summary --dta --mp 4,2 '\
'-p {threads} '.format(threads = self.threads)
self.BOWTIE2 = ' bowtie2 ' \
'--very-sensitive-local ' \
'-L 8 --mp 4,2 -N 1 '\
'--no-mixed --no-discordant --dovetail '\
'-p {threads}'.format(threads = self.threads)
def make_result_dir(self):
print('Checking output folders', file=sys.stderr)
folders = [self.outpath, self.trim_folder, self.count_folder, self.count_raw,
self.sample_folder, self.hisat_out, self.rRNA_mt_out,
self.bowtie_out, self.combined_out, self.smRNA_out,
self.univec_contaminants]
mf = deque(map(makeFolder, folders))
if self.rmsk:
makeFolder(self.repeat_out)
makeFolder(self.count_rmsk)
def trimming(self):
trimming = fastp_trimming if self.use_fastp else atropos_trimming
config, input, output, params = {}, {}, {}, {}
config['TTN'] = self.TTN
config['threads'] = self.threads
config['umi'] = self.UMI
config['trim_aggressive'] = self.trim_hard
config['polyA'] = self.polyA
input['FQ1'] = self.fastq1
input['FQ2'] = self.fastq2
output['FQ1'] = self.trimed1
output['FQ2'] = self.trimed2
command = trimming(config, input, output)
self.run_process(command)
def RNA_filter(self, RNA='univec'):
if RNA=="univec":
RNA_filter_out = self.univec_contaminants
index = self.univec_index
elif RNA == 'rRNA_mt':
RNA_filter_out = self.rRNA_mt_out
index = self.rRNA_mt_index
elif RNA == 'smallRNA':
RNA_filter_out = self.smRNA_out
index = self.smRNA_index
self.filtered_fq1 = RNA_filter_out + '/filtered.1.fq.gz'
self.filtered_fq2 = RNA_filter_out + '/filtered.2.fq.gz'
_input = '-1 {trimmed1} -2 {trimmed2}'.format(trimmed1 = self.trimed1, trimmed2 = self.trimed2)
_out_bam = RNA_filter_out + '/aligned.bam'
_out_bed = RNA_filter_out + '/aligned.bed'
_out_count = RNA_filter_out + '/aligned.count'
command = self.BOWTIE2 + \
' -k 1 -x {index} {input} '\
'| samtools view -bS@{threads} - '\
'> {out_bam} ' \
'; samtools fastq -nf4 -1 {filtered_fq1} -2 {filtered_fq2} {out_bam}'\
'; cat {out_bam} '\
'| samtools view -bF2048 -F256 -F4 '\
'| bam_to_bed.py -i - -o {out_bed} '\
'-m 5 -M 10000 -p ' \
.format(filtered_fq1 = self.filtered_fq1,
filtered_fq2 = self.filtered_fq2,
index = index,
threads = self.threads,
input = _input,
out_bam = _out_bam,
out_bed = _out_bed)
self.run_process(command)
if not self.dry:
count_bed(_out_bed, _out_count)
def hisat_map(self):
_input = '-1 {PREMAP_FASTQ1} -2 {PREMAP_FASTQ2} '\
.format(PREMAP_FASTQ1=self.trimed1, PREMAP_FASTQ2=self.trimed2)
_split_option = ' '
_unaligned = '| bamToFastq -i - -fq {bowtie_out}/unmapped.1.fq -fq2 {bowtie_out}/unmapped.2.fq'\
.format(bowtie_out=self.bowtie_out)
_zip_command = 'gzip -f {bowtie_out}/unmapped.1.fq;gzip -f {bowtie_out}/unmapped.2.fq'\
.format(bowtie_out=self.bowtie_out)
splice_option = ' '
if self.novel_splice:
splice_option = '--pen-canintronlen C,0,0 --pen-noncanintronlen C,1,0 ' \
'--pen-cansplice 0 --pen-noncansplice 2 --max-intronlen 1000000 '\
'--rna-strandness FR '
# map reads
command = self.HISAT2 +\
' -k {multi} --known-splicesite-infile {splicesite} {splice_option} '\
'--novel-splicesite-outfile {hisat_out}/novelsite.txt -x {ref} {input}'\
'| samtools view -bS -@ {threads} - > {hisat_out}/hisat.bam'\
.format(threads=self.threads,
multi = self.multi,
splicesite=self.splicesite,
splice_option = splice_option,
hisat_out=self.hisat_out,
ref=self.hisat_index,
input=_input)
self.run_process(command)
#split to uniq and multimap
uniq_command = 'split_uniq_bam.py -i {hisat_out}/hisat.bam '\
'-o {hisat_out}/hisat -a hisat2 {option}'\
.format(hisat_out=self.hisat_out, option=_split_option)
self.run_process(uniq_command)
#extract unaligned
command = 'samtools view -@ {threads} -bf4 {hisat_out}/hisat.bam {unaligned_op}'\
.format(threads=self.threads,
hisat_out=self.hisat_out,
unaligned_op = _unaligned)
self.run_process(command)
self.run_process(_zip_command)
def bowtie_map(self):
_input = '-1 {fq_path}/unmapped.1.fq.gz '\
'-2 {fq_path}/unmapped.2.fq.gz'\
.format(fq_path = self.bowtie_out)
_split_option = ' '
# map reads
command= self.BOWTIE2 + ' -k {multi} -x {index} {input} '\
'| samtools view -@{threads} -bS - > {bowtie_out}/bowtie2.bam'\
.format(threads=self.threads,
multi = self.multi,
index=self.bowtie2_index,
input=_input,
bowtie_out=self.bowtie_out)
self.run_process(command)
# split to uniq and multimap
command = 'split_uniq_bam.py '\
'-i {bowtie_out}/bowtie2.bam '\
'-o {bowtie_out}/bowtie '\
'-a bowtie2 {option}'\
.format(bowtie_out=self.bowtie_out, option=_split_option)
self.run_process(command)
def combined_aligned(self):
command = 'samtools cat {hisat}/hisat.multi.bam '\
'{bowtie}/bowtie.multi.bam '\
' > {out}/multi.bam' \
.format(hisat = self.hisat_out,
bowtie = self.bowtie_out,
out = self.combined_out)
self.run_process(command)
command = 'reduce_multi_reads.py --infile {combined_out}/multi.bam '\
'--outfile {combined_out}/multi_filtered.bam '\
' --bam_in --bam_out '\
.format(combined_out = self.combined_out)
self.run_process(command)
command ='samtools cat {combined}/multi_filtered.bam {hisat}/hisat.unique.bam {bowtie}/bowtie.unique.bam' \
'| filter_soft_clip.py -s 0.1 -b 0.2 -i - -o - --pe ' \
'| samtools sort -n -@ {threads} -O bam -T {combined}/temp '\
'> {combined}/primary.bam' \
.format(combined = self.combined_out,
hisat = self.hisat_out,
bowtie = self.bowtie_out,
threads = self.threads)
self.run_process(command)
def dedup_bam(self):
bam_file = self.combined_out + '/primary.bam'
tag_bam = bam_file.replace('.bam','.tag.bam')
sorted_bam = bam_file.replace('.bam','.sorted.bam')
dedup_bam = bam_file.replace('.bam','.dedup.bam')
umi_text = self.combined_out + '/primary.umi_metrics'
duplicate_text = self.combined_out + '/primary.duplicate_metrics'
umi_command = 'bam_umi_tag.py --in_bam {inbam} --out_bam - --tag RX '\
'| picard SortSam I=/dev/stdin O=/dev/stdout SORT_ORDER=queryname '\
'| picard FixMateInformation ADD_MATE_CIGAR=true ASSUME_SORTED=true '\
'INPUT=/dev/stdin OUTPUT=/dev/stdout '\
'> {outbam}'\
.format(inbam = bam_file, outbam = tag_bam)
sort_command = 'samtools sort -@ %i -O bam -T %s/temp %s > %s' \
%(self.threads, self.combined_out, tag_bam, sorted_bam)
index_command = 'samtools index %s' %(sorted_bam)
dedup_command = 'picard UmiAwareMarkDuplicatesWithMateCigar UMI_METRICS_FILE=%s ' %(umi_text)+\
'MAX_EDIT_DISTANCE_TO_JOIN=1 TAG_DUPLICATE_SET_MEMBERS=true' +\
'UMI_TAG_NAME=RX INPUT=%s OUTPUT=%s ' %(sorted_bam, tag_bam) +\
'METRICS_FILE=%s REMOVE_DUPLICATES=false ASSUME_SORT_ORDER=coordinate' %(duplicate_text)
resort_command = 'samtools sort -n@ %i -O bam -T %s/temp %s > %s ' \
%(self.threads, self.combined_out, tag_bam, dedup_bam)
self.run_process(umi_command)
self.run_process(sort_command)
self.run_process(index_command)
self.run_process(dedup_command)
self.run_process(resort_command)
def combined_filter(self):
_verb = 'pairtobed'
_option = '-type neither'
### filter out tRNA
command = 'bedtools {verb} -s -f 0.01 -abam {combined_path}/primary.bam'\
' -b {bed_path}/tRNA.bed '\
'> {out}/tRNA_primary.bam' \
.format(combined_path = self.combined_out,
bed_path = self.bedpath,
out=self.combined_out,
verb = _verb)
self.run_process(command)
### filter out rRNA
command = 'bedtools {verb} -s -f 0.01 '\
'-abam {combined_path}/primary.bam '\
' -b {bed_path}/rRNA_for_bam_filter.bed '\
'> {out}/rRNA_primary.bam' \
.format(combined_path = self.combined_out,
bed_path = self.bedpath,
out=self.combined_out,
verb = _verb)
self.run_process(command)
### filter out smallRNA
command = 'bedtools {verb} -s -f 0.01 -abam {count_bam} '\
'-b {bed_path}/sncRNA_no_tRNA.bed '\
' > {combined_path}/sncRNA.bam' \
.format(count_bam = self.count_bam,
bed_path = self.bedpath,
combined_path = self.combined_out,
verb = _verb)
self.run_process(command)
### filter out long RNA
command = 'bedtools {verb} -s -f 0.01 {option} '\
'-abam {count_bam} '\
'-b {bed_path}/sncRNA_rRNA_for_bam_filter.bed '\
'> {combined_path}/primary_no_sncRNA_tRNA_rRNA.bam' \
.format(count_bam = self.count_bam,
bed_path = self.bedpath,
combined_path = self.combined_out,
option=_option,
verb = _verb)
self.run_process(command)
if self.rmsk:
command = 'bedtools {verb} -f 0.01 {option} '\
'-abam {combined_path}/primary_no_sncRNA_tRNA_rRNA.bam '\
'-b {rmsk_bed} '\
'> {combined_path}/primary_no_sncRNA_tRNA_rRNA_repeats.bam' \
.format(count_bam = self.count_bam,
option=_option,
bed_path = self.bedpath,
combined_path = self.combined_out,
verb = _verb,
rmsk_bed = self.rmsk)
self.run_process(command)
command = 'bedtools {verb} -f 0.5 '\
'-abam {combined_path}/primary_no_sncRNA_tRNA_rRNA.bam '\
'-b {rmsk_bed} -type both '\
'> {combined_path}/repeats.bam' \
.format(count_bam = self.count_bam,
bed_path = self.bedpath,
combined_path = self.combined_out,
verb = _verb,
rmsk_bed = self.rmsk)
self.run_process(command)
def make_alignment_bed(self):
command = 'bam_to_bed.py -m 5 -M 1000000 '\
'-i {combined_out}/sncRNA.bam > {combined_out}/sncRNA.bed'\
.format(combined_out=self.combined_out)
self.run_process(command)
command = 'bam_to_bed.py '\
'-i {combined_out}/primary_no_sncRNA_tRNA_rRNA.bam '\
'-m 5 -M 1000000 '\
'> {combined_out}/primary_no_sRNAs.bed'\
.format(combined_out=self.combined_out)
self.run_process(command)
def generate_all_count(self):
command = 'bedtools coverage -s -counts -F 0.1 '\
'-a {bed_path}/sncRNA_no_tRNA.bed '\
'-b {combined}/sncRNA.bed '\
'> {combined}/sncRNA.counts'\
.format(combined=self.combined_out,
bed_path=self.bedpath)
self.run_process(command)
command = 'bedtools coverage -s -counts -F 0.1 '\
'-a {bed_path}/genes_no_sncRNA_rRNA_tRNA.bed '\
'-b {combined}/primary_no_sRNAs.bed '\
'> {combined}/non_sRNAs.counts'\
.format(combined=self.combined_out,
bed_path=self.bedpath)
self.run_process(command)
command = 'cat {combined}/non_sRNAs.counts '\
'{combined}/sncRNA.counts '\
'> {count_path}/{samplename}.counts'\
.format(count_path=self.count_raw,
samplename = self.samplename,
combined=self.combined_out)
self.run_process(command)
def generate_repeat_count(self):
command = 'samtools fastq -N@ {threads} {combined_path}/repeats.bam '\
'-1 {repeat_path}/repeats_1.fq.gz -2 {repeat_path}/repeats_2.fq.gz'\
.format(repeat_path=self.repeat_out,
combined_path =self.combined_out,
threads = self.threads)
self.run_process(command)
fq_input = ' -1 {repeat_path}/repeats_1.fq.gz -2 {repeat_path}/repeats_2.fq.gz '\
.format(repeat_path = self.repeat_out)
command = self.BOWTIE2 + \
' -x {repeat_index} {input} '\
'| samtools view -bS@ {threads} - > {repeat_path}/repeat_remap.bam'\
.format(threads=self.threads,
repeat_index=self.rmsk_index,
input = fq_input,
repeat_path=self.repeat_out)
command += '; filter_umi.py -i {repeat_path}/repeat_remap.bam '\
'--consecutive_bases 3 '\
'| bam_to_bed.py -i - -o {repeat_path}/repeat.bed '\
'-m 5 -M 10000'\
.format(repeat_path=self.repeat_out)
self.run_process(command)
repeat_count = defaultdict(lambda: defaultdict(int))
repeat_bed = self.repeat_out + '/repeat.bed'
repeat_count_file = self.count_rmsk + '/' + self.samplename + '.repeat'
print('Reading from %s' %repeat_bed, file=sys.stderr)
if not self.dry:
with open(repeat_bed,'r') as bed:
for line in bed:
fields = line.strip().split('\t')
repeat = fields[0]
strand = fields[5]
repeat_count[repeat][strand] += 1
with open(repeat_count_file, 'w') as count_file:
for repeat_name, strand_dict in six.iteritems(repeat_count):
for strand, value in six.iteritems(strand_dict):
print('%s\t%s\t%i' %(repeat_name, strand, value), file=count_file)
print('Written %s' %repeat_count_file, file=sys.stderr)
def count_rRNA(RNA, start, end):
gene = 'rDNA'
RNA_5S = RNA == 'gi|23898|emb|X12811.1|' and end > 274 and start < 394
RNA_18S = RNA== 'gi|555853|gb|U13369.1|HSU13369' and end > 3657 and start < 5527
RNA_58S = RNA == 'gi|555853|gb|U13369.1|HSU13369' and end > 6623 and start < 6779
RNA_28S = RNA == 'gi|555853|gb|U13369.1|HSU13369' and end > 7935 and start < 12969
if RNA_5S:
gene = '5S_rRNA'
elif RNA_18S:
gene = '18S_rRNA'
elif RNA_58S:
gene = '5.8S_rRNA'
elif RNA_28S:
gene = '28S_rRNA'
return gene
def count_bed(inbed, out_count):
count_dict = defaultdict(int)
with open(inbed, 'r') as inb:
for line in inb:
fields = line.rstrip().split('\t')
RNA = fields[0]
strand = fields[5]
if strand == '+':
if not RNA.startswith('gi'):
count_dict[RNA] += 1
else:
gene = count_rRNA(RNA, int(fields[1]), int(fields[2]))
count_dict[gene] += 1
pd.DataFrame({'gene':list(count_dict.keys()),
'count': list(count_dict.values())}) \
.filter(['gene','count'])\
.to_csv(out_count, index=False, sep='\t', header=False)
print('Written %s\n' %out_count, file=sys.stderr)
def system_run(dry, samplename, command):
print('[%s] Running: %s' %(samplename, command), file=sys.stderr)
if dry:
return 0
else:
start = time.time()
os.system(command)
end = time.time() - start
print('[%s] Used time %.3f min\n' %(samplename, end/60), file=sys.stderr)
return 0
def makeFolder(folder):
"""
Input a folder name and make a folder if it is non-existed
"""
print('Creating %s....' %folder, file = sys.stderr)
if os.path.isdir(folder):
print('%s exists.' %folder, file = sys.stderr)
else:
os.mkdir(folder)
print('Created %s.' %folder, file = sys.stderr)
return 0 | tgirt_map/mapping_tools.py | from __future__ import division, print_function
import os
import sys
import time
from collections import defaultdict, deque
from functools import partial
from builtins import map, range
import re
import six
import pandas as pd
from tgirt_map.trim_function import fastp_trimming, atropos_trimming
class sample_object():
def __init__(self, args):
# read input
self.fastq1 = args.fastq1
self.fastq2 = args.fastq2
self.outpath = args.outdir
self.hisat_index = args.hisat_index
self.bowtie2_index = args.bowtie2_index
self.rRNA_mt_index = args.rRNA_mt_index
self.rmsk_index = args.repeats_index
self.univec_index = args.univec_index
self.smRNA_index = args.smRNA_index
self.bedpath = args.bedpath
self.splicesite = args.splicesite
self.threads = args.threads
self.rmsk = args.repeats
self.UMI = args.umi
self.TTN = args.TTN
self.trim_hard = args.trim_aggressive
self.dry = args.dry
self.count_all = args.count_all
self.novel_splice = args.novel_splice
self.polyA = args.polyA
self.multi = args.multi
self.use_fastp = args.fastp
#### make folder
self.trim_folder = self.outpath + '/Trim'
self.count_folder= self.outpath + '/Counts'
self.count_raw = self.count_folder + '/RAW'
self.count_rmsk = self.count_folder + '/repeat_RAW'
#define sample folder
self.samplename = args.samplename
self.sample_folder = self.outpath + '/' + self.samplename
self.hisat_out = self.sample_folder + '/Hisat'
self.rRNA_mt_out = self.sample_folder + '/rRNA_mt'
self.bowtie_out = self.sample_folder + '/Bowtie'
self.combined_out = self.sample_folder + '/Combined'
self.repeat_out = self.sample_folder + '/repeats'
self.univec_contaminants = self.sample_folder + '/UniVec'
self.smRNA_out = self.sample_folder + '/smallRNA'
#make output file names
self.trimed1= '%s/%s.1.fq.gz' %(self.trim_folder, self.samplename)
self.trimed2= self.trimed1.replace('.1.fq.gz','.2.fq.gz')
if self.UMI == 0 or self.count_all:
self.count_bam = self.combined_out + '/primary.bam'
else:
self.count_bam = self.combined_out + '/primary.dedup.bam'
self.run_process = partial(system_run, args.dry, self.samplename)
self.HISAT2 = 'hisat2 '\
'--no-mixed --no-discordant '\
' --new-summary --dta --mp 4,2 '\
'-p {threads} '.format(threads = self.threads)
self.BOWTIE2 = ' bowtie2 ' \
'--very-sensitive-local ' \
'-L 8 --mp 4,2 -N 1 '\
'--no-mixed --no-discordant --dovetail '\
'-p {threads}'.format(threads = self.threads)
def make_result_dir(self):
print('Checking output folders', file=sys.stderr)
folders = [self.outpath, self.trim_folder, self.count_folder, self.count_raw,
self.sample_folder, self.hisat_out, self.rRNA_mt_out,
self.bowtie_out, self.combined_out, self.smRNA_out,
self.univec_contaminants]
mf = deque(map(makeFolder, folders))
if self.rmsk:
makeFolder(self.repeat_out)
makeFolder(self.count_rmsk)
def trimming(self):
trimming = fastp_trimming if self.use_fastp else atropos_trimming
config, input, output, params = {}, {}, {}, {}
config['TTN'] = self.TTN
config['threads'] = self.threads
config['umi'] = self.UMI
config['trim_aggressive'] = self.trim_hard
config['polyA'] = self.polyA
input['FQ1'] = self.fastq1
input['FQ2'] = self.fastq2
output['FQ1'] = self.trimed1
output['FQ2'] = self.trimed2
command = trimming(config, input, output)
self.run_process(command)
def RNA_filter(self, RNA='univec'):
if RNA=="univec":
RNA_filter_out = self.univec_contaminants
index = self.univec_index
elif RNA == 'rRNA_mt':
RNA_filter_out = self.rRNA_mt_out
index = self.rRNA_mt_index
elif RNA == 'smallRNA':
RNA_filter_out = self.smRNA_out
index = self.smRNA_index
self.filtered_fq1 = RNA_filter_out + '/filtered.1.fq.gz'
self.filtered_fq2 = RNA_filter_out + '/filtered.2.fq.gz'
_input = '-1 {trimmed1} -2 {trimmed2}'.format(trimmed1 = self.trimed1, trimmed2 = self.trimed2)
_out_bam = RNA_filter_out + '/aligned.bam'
_out_bed = RNA_filter_out + '/aligned.bed'
_out_count = RNA_filter_out + '/aligned.count'
command = self.BOWTIE2 + \
' -k 1 -x {index} {input} '\
'| samtools view -bS@{threads} - '\
'> {out_bam} ' \
'; samtools fastq -nf4 -1 {filtered_fq1} -2 {filtered_fq2} {out_bam}'\
'; cat {out_bam} '\
'| samtools view -bF2048 -F256 -F4 '\
'| bam_to_bed.py -i - -o {out_bed} '\
'-m 5 -M 10000 -p ' \
.format(filtered_fq1 = self.filtered_fq1,
filtered_fq2 = self.filtered_fq2,
index = index,
threads = self.threads,
input = _input,
out_bam = _out_bam,
out_bed = _out_bed)
self.run_process(command)
if not self.dry:
count_bed(_out_bed, _out_count)
def hisat_map(self):
_input = '-1 {PREMAP_FASTQ1} -2 {PREMAP_FASTQ2} '\
.format(PREMAP_FASTQ1=self.trimed1, PREMAP_FASTQ2=self.trimed2)
_split_option = ' '
_unaligned = '| bamToFastq -i - -fq {bowtie_out}/unmapped.1.fq -fq2 {bowtie_out}/unmapped.2.fq'\
.format(bowtie_out=self.bowtie_out)
_zip_command = 'gzip -f {bowtie_out}/unmapped.1.fq;gzip -f {bowtie_out}/unmapped.2.fq'\
.format(bowtie_out=self.bowtie_out)
splice_option = ' '
if self.novel_splice:
splice_option = '--pen-canintronlen C,0,0 --pen-noncanintronlen C,1,0 ' \
'--pen-cansplice 0 --pen-noncansplice 2 --max-intronlen 1000000 '\
'--rna-strandness FR '
# map reads
command = self.HISAT2 +\
' -k {multi} --known-splicesite-infile {splicesite} {splice_option} '\
'--novel-splicesite-outfile {hisat_out}/novelsite.txt -x {ref} {input}'\
'| samtools view -bS -@ {threads} - > {hisat_out}/hisat.bam'\
.format(threads=self.threads,
multi = self.multi,
splicesite=self.splicesite,
splice_option = splice_option,
hisat_out=self.hisat_out,
ref=self.hisat_index,
input=_input)
self.run_process(command)
#split to uniq and multimap
uniq_command = 'split_uniq_bam.py -i {hisat_out}/hisat.bam '\
'-o {hisat_out}/hisat -a hisat2 {option}'\
.format(hisat_out=self.hisat_out, option=_split_option)
self.run_process(uniq_command)
#extract unaligned
command = 'samtools view -@ {threads} -bf4 {hisat_out}/hisat.bam {unaligned_op}'\
.format(threads=self.threads,
hisat_out=self.hisat_out,
unaligned_op = _unaligned)
self.run_process(command)
self.run_process(_zip_command)
def bowtie_map(self):
_input = '-1 {fq_path}/unmapped.1.fq.gz '\
'-2 {fq_path}/unmapped.2.fq.gz'\
.format(fq_path = self.bowtie_out)
_split_option = ' '
# map reads
command= self.BOWTIE2 + ' -k {multi} -x {index} {input} '\
'| samtools view -@{threads} -bS - > {bowtie_out}/bowtie2.bam'\
.format(threads=self.threads,
multi = self.multi,
index=self.bowtie2_index,
input=_input,
bowtie_out=self.bowtie_out)
self.run_process(command)
# split to uniq and multimap
command = 'split_uniq_bam.py '\
'-i {bowtie_out}/bowtie2.bam '\
'-o {bowtie_out}/bowtie '\
'-a bowtie2 {option}'\
.format(bowtie_out=self.bowtie_out, option=_split_option)
self.run_process(command)
def combined_aligned(self):
command = 'samtools cat {hisat}/hisat.multi.bam '\
'{bowtie}/bowtie.multi.bam '\
' > {out}/multi.bam' \
.format(hisat = self.hisat_out,
bowtie = self.bowtie_out,
out = self.combined_out)
self.run_process(command)
command = 'reduce_multi_reads.py --infile {combined_out}/multi.bam '\
'--outfile {combined_out}/multi_filtered.bam '\
' --bam_in --bam_out '\
.format(combined_out = self.combined_out)
self.run_process(command)
command ='samtools cat {combined}/multi_filtered.bam {hisat}/hisat.unique.bam {bowtie}/bowtie.unique.bam' \
'| filter_soft_clip.py -s 0.1 -b 0.2 -i - -o - --pe ' \
'| samtools sort -n -@ {threads} -O bam -T {combined}/temp '\
'> {combined}/primary.bam' \
.format(combined = self.combined_out,
hisat = self.hisat_out,
bowtie = self.bowtie_out,
threads = self.threads)
self.run_process(command)
def dedup_bam(self):
bam_file = self.combined_out + '/primary.bam'
tag_bam = bam_file.replace('.bam','.tag.bam')
sorted_bam = bam_file.replace('.bam','.sorted.bam')
dedup_bam = bam_file.replace('.bam','.dedup.bam')
umi_text = self.combined_out + '/primary.umi_metrics'
duplicate_text = self.combined_out + '/primary.duplicate_metrics'
umi_command = 'bam_umi_tag.py --in_bam {inbam} --out_bam - --tag RX '\
'| picard SortSam I=/dev/stdin O=/dev/stdout SORT_ORDER=queryname '\
'| picard FixMateInformation ADD_MATE_CIGAR=true ASSUME_SORTED=true '\
'INPUT=/dev/stdin OUTPUT=/dev/stdout '\
'> {outbam}'\
.format(inbam = bam_file, outbam = tag_bam)
sort_command = 'samtools sort -@ %i -O bam -T %s/temp %s > %s' \
%(self.threads, self.combined_out, tag_bam, sorted_bam)
index_command = 'samtools index %s' %(sorted_bam)
dedup_command = 'picard UmiAwareMarkDuplicatesWithMateCigar UMI_METRICS_FILE=%s ' %(umi_text)+\
'MAX_EDIT_DISTANCE_TO_JOIN=1 TAG_DUPLICATE_SET_MEMBERS=true' +\
'UMI_TAG_NAME=RX INPUT=%s OUTPUT=%s ' %(sorted_bam, tag_bam) +\
'METRICS_FILE=%s REMOVE_DUPLICATES=false ASSUME_SORT_ORDER=coordinate' %(duplicate_text)
resort_command = 'samtools sort -n@ %i -O bam -T %s/temp %s > %s ' \
%(self.threads, self.combined_out, tag_bam, dedup_bam)
self.run_process(umi_command)
self.run_process(sort_command)
self.run_process(index_command)
self.run_process(dedup_command)
self.run_process(resort_command)
def combined_filter(self):
_verb = 'pairtobed'
_option = '-type neither'
### filter out tRNA
command = 'bedtools {verb} -s -f 0.01 -abam {combined_path}/primary.bam'\
' -b {bed_path}/tRNA.bed '\
'> {out}/tRNA_primary.bam' \
.format(combined_path = self.combined_out,
bed_path = self.bedpath,
out=self.combined_out,
verb = _verb)
self.run_process(command)
### filter out rRNA
command = 'bedtools {verb} -s -f 0.01 '\
'-abam {combined_path}/primary.bam '\
' -b {bed_path}/rRNA_for_bam_filter.bed '\
'> {out}/rRNA_primary.bam' \
.format(combined_path = self.combined_out,
bed_path = self.bedpath,
out=self.combined_out,
verb = _verb)
self.run_process(command)
### filter out smallRNA
command = 'bedtools {verb} -s -f 0.01 -abam {count_bam} '\
'-b {bed_path}/sncRNA_no_tRNA.bed '\
' > {combined_path}/sncRNA.bam' \
.format(count_bam = self.count_bam,
bed_path = self.bedpath,
combined_path = self.combined_out,
verb = _verb)
self.run_process(command)
### filter out long RNA
command = 'bedtools {verb} -s -f 0.01 {option} '\
'-abam {count_bam} '\
'-b {bed_path}/sncRNA_rRNA_for_bam_filter.bed '\
'> {combined_path}/primary_no_sncRNA_tRNA_rRNA.bam' \
.format(count_bam = self.count_bam,
bed_path = self.bedpath,
combined_path = self.combined_out,
option=_option,
verb = _verb)
self.run_process(command)
if self.rmsk:
command = 'bedtools {verb} -f 0.01 {option} '\
'-abam {combined_path}/primary_no_sncRNA_tRNA_rRNA.bam '\
'-b {rmsk_bed} '\
'> {combined_path}/primary_no_sncRNA_tRNA_rRNA_repeats.bam' \
.format(count_bam = self.count_bam,
option=_option,
bed_path = self.bedpath,
combined_path = self.combined_out,
verb = _verb,
rmsk_bed = self.rmsk)
self.run_process(command)
command = 'bedtools {verb} -f 0.5 '\
'-abam {combined_path}/primary_no_sncRNA_tRNA_rRNA.bam '\
'-b {rmsk_bed} -type both '\
'> {combined_path}/repeats.bam' \
.format(count_bam = self.count_bam,
bed_path = self.bedpath,
combined_path = self.combined_out,
verb = _verb,
rmsk_bed = self.rmsk)
self.run_process(command)
def make_alignment_bed(self):
command = 'bam_to_bed.py -m 5 -M 1000000 '\
'-i {combined_out}/sncRNA.bam > {combined_out}/sncRNA.bed'\
.format(combined_out=self.combined_out)
self.run_process(command)
command = 'bam_to_bed.py '\
'-i {combined_out}/primary_no_sncRNA_tRNA_rRNA.bam '\
'-m 5 -M 1000000 '\
'> {combined_out}/primary_no_sRNAs.bed'\
.format(combined_out=self.combined_out)
self.run_process(command)
def generate_all_count(self):
command = 'bedtools coverage -s -counts -F 0.1 '\
'-a {bed_path}/sncRNA_no_tRNA.bed '\
'-b {combined}/sncRNA.bed '\
'> {combined}/sncRNA.counts'\
.format(combined=self.combined_out,
bed_path=self.bedpath)
self.run_process(command)
command = 'bedtools coverage -s -counts -F 0.1 '\
'-a {bed_path}/genes_no_sncRNA_rRNA_tRNA.bed '\
'-b {combined}/primary_no_sRNAs.bed '\
'> {combined}/non_sRNAs.counts'\
.format(combined=self.combined_out,
bed_path=self.bedpath)
self.run_process(command)
command = 'cat {combined}/non_sRNAs.counts '\
'{combined}/sncRNA.counts '\
'> {count_path}/{samplename}.counts'\
.format(count_path=self.count_raw,
samplename = self.samplename,
combined=self.combined_out)
self.run_process(command)
def generate_repeat_count(self):
command = 'samtools fastq -N@ {threads} {combined_path}/repeats.bam '\
'-1 {repeat_path}/repeats_1.fq.gz -2 {repeat_path}/repeats_2.fq.gz'\
.format(repeat_path=self.repeat_out,
combined_path =self.combined_out,
threads = self.threads)
self.run_process(command)
fq_input = ' -1 {repeat_path}/repeats_1.fq.gz -2 {repeat_path}/repeats_2.fq.gz '\
.format(repeat_path = self.repeat_out)
command = self.BOWTIE2 + \
' -x {repeat_index} {input} '\
'| samtools view -bS@ {threads} - > {repeat_path}/repeat_remap.bam'\
.format(threads=self.threads,
repeat_index=self.rmsk_index,
input = fq_input,
repeat_path=self.repeat_out)
command += '; filter_umi.py -i {repeat_path}/repeat_remap.bam '\
'--consecutive_bases 3 '\
'| bam_to_bed.py -i - -o {repeat_path}/repeat.bed '\
'-m 5 -M 10000'\
.format(repeat_path=self.repeat_out)
self.run_process(command)
repeat_count = defaultdict(lambda: defaultdict(int))
repeat_bed = self.repeat_out + '/repeat.bed'
repeat_count_file = self.count_rmsk + '/' + self.samplename + '.repeat'
print('Reading from %s' %repeat_bed, file=sys.stderr)
if not self.dry:
with open(repeat_bed,'r') as bed:
for line in bed:
fields = line.strip().split('\t')
repeat = fields[0]
strand = fields[5]
repeat_count[repeat][strand] += 1
with open(repeat_count_file, 'w') as count_file:
for repeat_name, strand_dict in six.iteritems(repeat_count):
for strand, value in six.iteritems(strand_dict):
print('%s\t%s\t%i' %(repeat_name, strand, value), file=count_file)
print('Written %s' %repeat_count_file, file=sys.stderr)
def count_rRNA(RNA, start, end):
gene = 'rDNA'
RNA_5S = RNA == 'gi|23898|emb|X12811.1|' and end > 274 and start < 394
RNA_18S = RNA== 'gi|555853|gb|U13369.1|HSU13369' and end > 3657 and start < 5527
RNA_58S = RNA == 'gi|555853|gb|U13369.1|HSU13369' and end > 6623 and start < 6779
RNA_28S = RNA == 'gi|555853|gb|U13369.1|HSU13369' and end > 7935 and start < 12969
if RNA_5S:
gene = '5S_rRNA'
elif RNA_18S:
gene = '18S_rRNA'
elif RNA_58S:
gene = '5.8S_rRNA'
elif RNA_28S:
gene = '28S_rRNA'
return gene
def count_bed(inbed, out_count):
count_dict = defaultdict(int)
with open(inbed, 'r') as inb:
for line in inb:
fields = line.rstrip().split('\t')
RNA = fields[0]
strand = fields[5]
if strand == '+':
if not RNA.startswith('gi'):
count_dict[RNA] += 1
else:
gene = count_rRNA(RNA, int(fields[1]), int(fields[2]))
count_dict[gene] += 1
pd.DataFrame({'gene':list(count_dict.keys()),
'count': list(count_dict.values())}) \
.filter(['gene','count'])\
.to_csv(out_count, index=False, sep='\t', header=False)
print('Written %s\n' %out_count, file=sys.stderr)
def system_run(dry, samplename, command):
print('[%s] Running: %s' %(samplename, command), file=sys.stderr)
if dry:
return 0
else:
start = time.time()
os.system(command)
end = time.time() - start
print('[%s] Used time %.3f min\n' %(samplename, end/60), file=sys.stderr)
return 0
def makeFolder(folder):
"""
Input a folder name and make a folder if it is non-existed
"""
print('Creating %s....' %folder, file = sys.stderr)
if os.path.isdir(folder):
print('%s exists.' %folder, file = sys.stderr)
else:
os.mkdir(folder)
print('Created %s.' %folder, file = sys.stderr)
return 0 | 0.144752 | 0.078325 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('image_info', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='customerimage',
options={'verbose_name': 'customer image', 'verbose_name_plural': 'customer image'},
),
migrations.AlterField(
model_name='customerimage',
name='damage_grad',
field=models.CharField(max_length=64, verbose_name='damage grade'),
),
migrations.AlterField(
model_name='customerimage',
name='id',
field=models.IntegerField(auto_created=True, primary_key=True, serialize=False, verbose_name='id (primary key)'),
),
migrations.AlterField(
model_name='customerimage',
name='img_file',
field=models.ImageField(blank=True, upload_to='cust_upload/%Y%m', verbose_name='original image'),
),
migrations.AlterField(
model_name='customerimage',
name='img_name',
field=models.CharField(max_length=64, verbose_name='image name'),
),
migrations.AlterField(
model_name='customerimage',
name='is_right_estimation',
field=models.BooleanField(default=True, verbose_name='verified result'),
),
migrations.AlterField(
model_name='customerimage',
name='pred_file',
field=models.ImageField(blank=True, upload_to='ml_estimated/%Y%m', verbose_name='estimated image'),
),
migrations.AlterField(
model_name='customerimage',
name='recording',
field=models.CharField(max_length=64, verbose_name='type of recording'),
),
migrations.AlterField(
model_name='customerimage',
name='username',
field=models.CharField(max_length=64, verbose_name='username'),
),
] | apps/image_info/migrations/0002_auto_20190305_1247.py | from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('image_info', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='customerimage',
options={'verbose_name': 'customer image', 'verbose_name_plural': 'customer image'},
),
migrations.AlterField(
model_name='customerimage',
name='damage_grad',
field=models.CharField(max_length=64, verbose_name='damage grade'),
),
migrations.AlterField(
model_name='customerimage',
name='id',
field=models.IntegerField(auto_created=True, primary_key=True, serialize=False, verbose_name='id (primary key)'),
),
migrations.AlterField(
model_name='customerimage',
name='img_file',
field=models.ImageField(blank=True, upload_to='cust_upload/%Y%m', verbose_name='original image'),
),
migrations.AlterField(
model_name='customerimage',
name='img_name',
field=models.CharField(max_length=64, verbose_name='image name'),
),
migrations.AlterField(
model_name='customerimage',
name='is_right_estimation',
field=models.BooleanField(default=True, verbose_name='verified result'),
),
migrations.AlterField(
model_name='customerimage',
name='pred_file',
field=models.ImageField(blank=True, upload_to='ml_estimated/%Y%m', verbose_name='estimated image'),
),
migrations.AlterField(
model_name='customerimage',
name='recording',
field=models.CharField(max_length=64, verbose_name='type of recording'),
),
migrations.AlterField(
model_name='customerimage',
name='username',
field=models.CharField(max_length=64, verbose_name='username'),
),
] | 0.694821 | 0.121634 |
import random
import string
import requests
import urllib3
from pprint import pprint
import json
import getpass
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
log_file="fwbackuplog.log"
link_log_file="lnk.log"
user_log="user.log"
headers = {'Content-Type': 'application/json', 'User-Agent':'REST API Agent'}
cli_path = "/api/cli/"
api_path = "/api/mgmtaccess/hosts/"
user_path="/api/objects/localusers/"
openlog=open(log_file, "w")
payload={"commands":["show run user"]}
backup_payload={"commands": ["show run"]}
fw_user=input("Username: ")
try:
fw_pwd=get<PASSWORD>(prompt='Password: ', stream=None)
#en_pwd=getpass.<PASSWORD>pass(prompt='Enable Password: ', stream=None)
except Exception as error:
print ('ERROR',error)
def get_backups():
openlog=open(log_file, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + cli_path
print(" ")
backupresponse=requests.post(url,auth=(fw_user, fw_pwd),data=json.dumps(backup_payload),headers=headers,verify=False)
backup_data=json.loads(backupresponse.text)
pprint(backup_data)
openlog.write(backupresponse.text)
openlog.write("\n\n")
openlog.close()
print(" ")
pass
def get_random_password_string(length):
password_characters = string.ascii_letters + string.digits + string.punctuation
password = ''.join(random.choice(password_characters) for p in range(length))
return password
def get_mgmtdata():
openlog=open(link_log_file, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + api_path
print(" ")
mgmtresponse=requests.get(url,auth=(fw_user, fw_pwd),verify=False)
data=json.loads(mgmtresponse.text)
print(data['selfLink'])
for i in data['items']:
print("type : " + i["type"],i["ip"]["value"],i["netmask"]["value"],i["interface"]["name"])
strType=i["type"]
strIP=i["ip"]["value"]
strNM=i["netmask"]["value"]
strInt=i["interface"]["name"]
openlog.write("Type: %s\tIP: %s\tNetmask: %s\tInterface: %s \n" % (strType,strIP,strNM,strInt))
openlog.write("\n")
openlog.close()
print(" ")
def update_passwords():
openlog=open(user_log, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + user_path
print("")
print(url)
userreq=requests.get(url,auth=(fw_user, fw_pwd),headers=headers,verify=False)
usernameres = json.loads(userreq.text)
for i in usernameres['items']:
print("Username : " + i["name"],",Privilege Level : ",i["privilegeLevel"])
str_username=i["name"]
str_privilegeLevel=i["privilegeLevel"]
openlog.write("Url: %s \tUsername: %s\tPrivilege Level: %s \n" % (url,str_username,str_privilegeLevel))
print("")
for j in usernameres['items']:
username=j["name"]
privilege=j["privilegeLevel"]
password=get_random_password_string(16)
cmdline=f"username {username} password {password} privilege {privilege}"
newcli='"'+ cmdline + '"'
_jsonpayload="{"+ '"'+"commands"+'"'+':'+"[" + newcli +"]}"
print(_jsonpayload)
openlog.write(_jsonpayload)
for host in hosts_array:
pwdurl = "https://"+ host + cli_path
print(pwdurl)
requests.post(pwdurl,auth=(fw_user, fw_pwd),data=_jsonpayload,headers=headers,verify=False)
openlog.write("\n")
print("")
openlog.close()
print(" ")
if __name__ == "__main__":
#get_creds()#Get credentials for Login and access to your script to run
get_backups()# Back it up before you start.
get_mgmtdata()#Get Management Access Information
update_passwords()#This will change all passwords returned for any local accounts on the ASA! | CiscoASARESTAPI-BlogPost.py |
import random
import string
import requests
import urllib3
from pprint import pprint
import json
import getpass
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
log_file="fwbackuplog.log"
link_log_file="lnk.log"
user_log="user.log"
headers = {'Content-Type': 'application/json', 'User-Agent':'REST API Agent'}
cli_path = "/api/cli/"
api_path = "/api/mgmtaccess/hosts/"
user_path="/api/objects/localusers/"
openlog=open(log_file, "w")
payload={"commands":["show run user"]}
backup_payload={"commands": ["show run"]}
fw_user=input("Username: ")
try:
fw_pwd=get<PASSWORD>(prompt='Password: ', stream=None)
#en_pwd=getpass.<PASSWORD>pass(prompt='Enable Password: ', stream=None)
except Exception as error:
print ('ERROR',error)
def get_backups():
openlog=open(log_file, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + cli_path
print(" ")
backupresponse=requests.post(url,auth=(fw_user, fw_pwd),data=json.dumps(backup_payload),headers=headers,verify=False)
backup_data=json.loads(backupresponse.text)
pprint(backup_data)
openlog.write(backupresponse.text)
openlog.write("\n\n")
openlog.close()
print(" ")
pass
def get_random_password_string(length):
password_characters = string.ascii_letters + string.digits + string.punctuation
password = ''.join(random.choice(password_characters) for p in range(length))
return password
def get_mgmtdata():
openlog=open(link_log_file, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + api_path
print(" ")
mgmtresponse=requests.get(url,auth=(fw_user, fw_pwd),verify=False)
data=json.loads(mgmtresponse.text)
print(data['selfLink'])
for i in data['items']:
print("type : " + i["type"],i["ip"]["value"],i["netmask"]["value"],i["interface"]["name"])
strType=i["type"]
strIP=i["ip"]["value"]
strNM=i["netmask"]["value"]
strInt=i["interface"]["name"]
openlog.write("Type: %s\tIP: %s\tNetmask: %s\tInterface: %s \n" % (strType,strIP,strNM,strInt))
openlog.write("\n")
openlog.close()
print(" ")
def update_passwords():
openlog=open(user_log, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + user_path
print("")
print(url)
userreq=requests.get(url,auth=(fw_user, fw_pwd),headers=headers,verify=False)
usernameres = json.loads(userreq.text)
for i in usernameres['items']:
print("Username : " + i["name"],",Privilege Level : ",i["privilegeLevel"])
str_username=i["name"]
str_privilegeLevel=i["privilegeLevel"]
openlog.write("Url: %s \tUsername: %s\tPrivilege Level: %s \n" % (url,str_username,str_privilegeLevel))
print("")
for j in usernameres['items']:
username=j["name"]
privilege=j["privilegeLevel"]
password=get_random_password_string(16)
cmdline=f"username {username} password {password} privilege {privilege}"
newcli='"'+ cmdline + '"'
_jsonpayload="{"+ '"'+"commands"+'"'+':'+"[" + newcli +"]}"
print(_jsonpayload)
openlog.write(_jsonpayload)
for host in hosts_array:
pwdurl = "https://"+ host + cli_path
print(pwdurl)
requests.post(pwdurl,auth=(fw_user, fw_pwd),data=_jsonpayload,headers=headers,verify=False)
openlog.write("\n")
print("")
openlog.close()
print(" ")
if __name__ == "__main__":
#get_creds()#Get credentials for Login and access to your script to run
get_backups()# Back it up before you start.
get_mgmtdata()#Get Management Access Information
update_passwords()#This will change all passwords returned for any local accounts on the ASA! | 0.076604 | 0.037538 |
import os
import csv
import json
from unet3d.data import write_data_to_file, open_data_file
from unet3d.generator import get_training_and_validation_generators
from unet3d.prediction import run_validation_cases
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
cur_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(cur_dir)
config_json = os.path.abspath('models/T1/51U_tissue_w_template_init_pennmodel.json')
with open(config_json) as jf:
config = json.load(jf)
config["data_file"] = os.path.abspath("data/51U_T1_tissue_w_template_test.h5")
config["training_file"] = os.path.abspath("data/T1tissue_w_template_test1.pkl")
config["validation_file"] = os.path.abspath("data/T1tissue_w_template_test2.pkl")
config["validation_split"] = 1 # portion of the data that will be used for training
test_data_files = list()
subject_ids = list()
def read_split(splitFile):
trainID = []
testID = []
with open(splitFile, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[1] == "Train":
trainID.append(row[0])
elif row[1] == "Test":
testID.append(row[0])
return trainID, testID
trainID, testID = read_split(config["split_file"])
baseDir = config["image_dir"]
trainDirs = ["%s/%s" % (baseDir, x) for x in trainID]
testDirs = ["%s/%s" % (baseDir, x) for x in testID]
for i in range(len(testDirs)):
subject_files = list()
subject_files.append("%s_%s" % (trainDirs[i],"T1.nii.gz"))
subject_files.append("%s_%s" % (trainDirs[i],"spatial_prior.nii.gz"))
subject_files.append("%s_%s" % (trainDirs[i],"seg.nii.gz"))
test_data_files.append(tuple(subject_files))
if not(os.path.exists(config["data_file"])):
write_data_to_file(test_data_files, config["data_file"],
image_shape=config["image_shape"],
subject_ids=testID,
label_indices=[len(subject_files)-1, len(subject_files)-2])
data_file_opened = open_data_file(config["data_file"])
train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
data_file_opened,
batch_size=config["batch_size"],
data_split=config["validation_split"],
overwrite=False,
validation_keys_file=config["validation_file"],
training_keys_file=config["training_file"],
n_labels=config["n_labels"],
labels=config["labels"],
patch_shape=config["patch_shape"],
validation_batch_size=config["validation_batch_size"],
validation_patch_overlap=config["validation_patch_overlap"],
training_patch_start_offset=config["training_patch_start_offset"],
permute=config["permute"],
augment=config["augment"],
skip_blank=config["skip_blank"],
augment_flip=config["flip"],
augment_distortion_factor=config["distort"])
prediction_dir = os.path.abspath("predictions/T1/test")
run_validation_cases(validation_keys_file=config["training_file"],
model_file=config["model_file"],
training_modalities=config["training_modalities"],
labels=config["labels"],
hdf5_file=config["data_file"],
output_label_map=True,
output_dir=prediction_dir,
threshold=0.5) | run_test_cases.py | import os
import csv
import json
from unet3d.data import write_data_to_file, open_data_file
from unet3d.generator import get_training_and_validation_generators
from unet3d.prediction import run_validation_cases
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
cur_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(cur_dir)
config_json = os.path.abspath('models/T1/51U_tissue_w_template_init_pennmodel.json')
with open(config_json) as jf:
config = json.load(jf)
config["data_file"] = os.path.abspath("data/51U_T1_tissue_w_template_test.h5")
config["training_file"] = os.path.abspath("data/T1tissue_w_template_test1.pkl")
config["validation_file"] = os.path.abspath("data/T1tissue_w_template_test2.pkl")
config["validation_split"] = 1 # portion of the data that will be used for training
test_data_files = list()
subject_ids = list()
def read_split(splitFile):
trainID = []
testID = []
with open(splitFile, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[1] == "Train":
trainID.append(row[0])
elif row[1] == "Test":
testID.append(row[0])
return trainID, testID
trainID, testID = read_split(config["split_file"])
baseDir = config["image_dir"]
trainDirs = ["%s/%s" % (baseDir, x) for x in trainID]
testDirs = ["%s/%s" % (baseDir, x) for x in testID]
for i in range(len(testDirs)):
subject_files = list()
subject_files.append("%s_%s" % (trainDirs[i],"T1.nii.gz"))
subject_files.append("%s_%s" % (trainDirs[i],"spatial_prior.nii.gz"))
subject_files.append("%s_%s" % (trainDirs[i],"seg.nii.gz"))
test_data_files.append(tuple(subject_files))
if not(os.path.exists(config["data_file"])):
write_data_to_file(test_data_files, config["data_file"],
image_shape=config["image_shape"],
subject_ids=testID,
label_indices=[len(subject_files)-1, len(subject_files)-2])
data_file_opened = open_data_file(config["data_file"])
train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
data_file_opened,
batch_size=config["batch_size"],
data_split=config["validation_split"],
overwrite=False,
validation_keys_file=config["validation_file"],
training_keys_file=config["training_file"],
n_labels=config["n_labels"],
labels=config["labels"],
patch_shape=config["patch_shape"],
validation_batch_size=config["validation_batch_size"],
validation_patch_overlap=config["validation_patch_overlap"],
training_patch_start_offset=config["training_patch_start_offset"],
permute=config["permute"],
augment=config["augment"],
skip_blank=config["skip_blank"],
augment_flip=config["flip"],
augment_distortion_factor=config["distort"])
prediction_dir = os.path.abspath("predictions/T1/test")
run_validation_cases(validation_keys_file=config["training_file"],
model_file=config["model_file"],
training_modalities=config["training_modalities"],
labels=config["labels"],
hdf5_file=config["data_file"],
output_label_map=True,
output_dir=prediction_dir,
threshold=0.5) | 0.196595 | 0.16388 |
import requests
from requests_oauthlib import OAuth2Session
from urllib.parse import urljoin
from .models import DayliteData, Contact, Thin_Contact, Company, Opportunity
USER_AUTHORISE_URL="https://www.marketcircle.com/account/oauth/authorize"
OAUTH_TOKEN_URL="https://www.marketcircle.com/account/oauth/token"
AUTH_URL = "https://api.marketcircle.net/v1"
URL_ROOT = "https://api.marketcircle.net/"
class Daylite:
def __init__(self, client_id, token):
_token = {"access_token": token}
self.session = OAuth2Session(client_id, token=_token)
def check_session(self) -> int:
res = self.session.get(AUTH_URL)
return res.status_code == 200
def save(self, method: str, url: str, data: dict):
url = urljoin(URL_ROOT, url)
# print("save URL {}".format(url))
# print("saving {}".format(data))
res = self.session.request(method, url, json=data)
res.raise_for_status()
# For now, return the straight result
return res
def fetch(self, schema, ref) -> DayliteData:
res = self.session.get(urljoin(URL_ROOT, ref))
res.raise_for_status()
return DayliteData._server(schema, res.json(), self)
def contacts(self) -> [DayliteData]:
"""
Returns a list of all contacts,
in the "tiny contacts" variant.
"""
res = self.session.get(urljoin(URL_ROOT, "/v1/contacts"))
res.raise_for_status()
body = res.json()
# Thin_Contact doesn't have the normal server-provided
# values in it
# And shouldn't be used commonly anyway
return [DayliteData(Thin_Contact, row) for row in body]
def contact(self, id_) -> DayliteData:
return self.fetch(Contact, id_)
def companies(self) -> [DayliteData]:
"""
Returns a list of all contacts,
in the "tiny contacts" variant.
"""
res = self.session.get(urljoin(URL_ROOT, "/v1/companies"))
res.raise_for_status()
body = res.json()
return [DayliteData(Company, row) for row in body]
def company(self, id_):
return self.fetch(Company, id_)
def opportunity(self, id_):
return self.fetch(Opportunity, id_)
def project(self):
pass
def resource(self):
pass
def appointment(self):
pass
def task(self):
pass
def user(self):
pass
def note(self):
pass
def form(self):
pass
def group(self):
pass
def subscription(self):
pass | daylite/__init__.py | import requests
from requests_oauthlib import OAuth2Session
from urllib.parse import urljoin
from .models import DayliteData, Contact, Thin_Contact, Company, Opportunity
USER_AUTHORISE_URL="https://www.marketcircle.com/account/oauth/authorize"
OAUTH_TOKEN_URL="https://www.marketcircle.com/account/oauth/token"
AUTH_URL = "https://api.marketcircle.net/v1"
URL_ROOT = "https://api.marketcircle.net/"
class Daylite:
def __init__(self, client_id, token):
_token = {"access_token": token}
self.session = OAuth2Session(client_id, token=_token)
def check_session(self) -> int:
res = self.session.get(AUTH_URL)
return res.status_code == 200
def save(self, method: str, url: str, data: dict):
url = urljoin(URL_ROOT, url)
# print("save URL {}".format(url))
# print("saving {}".format(data))
res = self.session.request(method, url, json=data)
res.raise_for_status()
# For now, return the straight result
return res
def fetch(self, schema, ref) -> DayliteData:
res = self.session.get(urljoin(URL_ROOT, ref))
res.raise_for_status()
return DayliteData._server(schema, res.json(), self)
def contacts(self) -> [DayliteData]:
"""
Returns a list of all contacts,
in the "tiny contacts" variant.
"""
res = self.session.get(urljoin(URL_ROOT, "/v1/contacts"))
res.raise_for_status()
body = res.json()
# Thin_Contact doesn't have the normal server-provided
# values in it
# And shouldn't be used commonly anyway
return [DayliteData(Thin_Contact, row) for row in body]
def contact(self, id_) -> DayliteData:
return self.fetch(Contact, id_)
def companies(self) -> [DayliteData]:
"""
Returns a list of all contacts,
in the "tiny contacts" variant.
"""
res = self.session.get(urljoin(URL_ROOT, "/v1/companies"))
res.raise_for_status()
body = res.json()
return [DayliteData(Company, row) for row in body]
def company(self, id_):
return self.fetch(Company, id_)
def opportunity(self, id_):
return self.fetch(Opportunity, id_)
def project(self):
pass
def resource(self):
pass
def appointment(self):
pass
def task(self):
pass
def user(self):
pass
def note(self):
pass
def form(self):
pass
def group(self):
pass
def subscription(self):
pass | 0.277277 | 0.143638 |
from django.db import models
from django.conf import settings
from django.utils.functional import cached_property
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from ..utils import EnhancedTextField
from tagging.fields import TagField
from tagging import registry
class CategoryQueryset(models.QuerySet):
pass
class CategoryManager(models.Manager):
pass
class Category(models.Model):
name = models.CharField(_(u'Nazwa'), max_length=255)
slug = models.SlugField(_(u'Adres/Slug'), unique=True)
objects = CategoryManager.from_queryset(CategoryQueryset)
class Meta:
verbose_name = _(u'Kategoria')
verbose_name_plural = _(u'Kategorie')
ordering = ['name']
def __unicode__(self):
return self.name
@staticmethod
def autocomplete_search_fields():
return 'id__iexact', 'name__icontains'
class EntryQuerySet(models.QuerySet):
def active(self):
return self.filter(active=True)
def before(self, date):
return self.filter(publish_time__lte=date)
def after(self, date):
return self.filter(publish_time__gte=date)
def published(self):
return self.active().before(timezone.now())
class EntryManager(models.Manager):
pass
class PublishedEntryManager(models.Manager):
def get_queryset(self):
return super(PublishedEntryManager, self).get_queryset().active().before(timezone.now())
class Entry(models.Model):
ALIGN_CHOICES = (
('alignleft', _(u'Do lewej')),
('alignright', _(u'Do prawej')),
('aligncenter', _(u'Do środka'))
)
title = models.CharField(_(u'Tytuł'), max_length=255)
slug = models.SlugField(_(u'Adres/Slug'), unique=True)
text = EnhancedTextField(_(u'Tresc'))
new_tags = TagField(_('Tagi'), blank=True, null=True)
categories = models.ManyToManyField(Category, verbose_name=_('Kategorie'))
posted = models.DateTimeField(_(u'Utworzono'), auto_now_add=True)
modified = models.DateTimeField(_(u'Zmieniono'), auto_now=True)
active = models.BooleanField(_(u'Aktywny'),
help_text=_(u'Zaznacz, jeżeli tekst jest gotowy do publikacji (nie notka)'),
default=False)
publish_time = models.DateTimeField(_('Czas publikacji'), default=timezone.now)
image = models.ImageField(_(u'Obraz szeroki (cover)'), blank=True, null=True, upload_to='blog-images')
image_right = models.ImageField(_(u'Obraz wąski/pływający'), blank=True, null=True, upload_to='blog-images')
related_event = models.ForeignKey('partners.MediaPatronage', blank=True, null=True, related_name='blog_entries', on_delete=models.CASCADE)
objects = EntryManager.from_queryset(EntryQuerySet)()
published = PublishedEntryManager.from_queryset(EntryQuerySet)()
class Meta:
verbose_name = _(u'Wpis')
verbose_name_plural = _(u'Wpisy')
ordering = ['-publish_time', '-posted', '-pk']
def __unicode__(self):
return self.title
def get_text_length(self):
return len(self.text)
def get_absolute_url(self):
return reverse('blog_entry', kwargs={'year': str(self.posted.year),
'month': str(self.posted.month),
'slug': str(self.slug)})
@cached_property
def is_long(self):
return len(self.text) > settings.LONG_ENTRY_LENGTH
registry.register(Entry) | rotv_apps/blog/models.py | from django.db import models
from django.conf import settings
from django.utils.functional import cached_property
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from ..utils import EnhancedTextField
from tagging.fields import TagField
from tagging import registry
class CategoryQueryset(models.QuerySet):
pass
class CategoryManager(models.Manager):
pass
class Category(models.Model):
name = models.CharField(_(u'Nazwa'), max_length=255)
slug = models.SlugField(_(u'Adres/Slug'), unique=True)
objects = CategoryManager.from_queryset(CategoryQueryset)
class Meta:
verbose_name = _(u'Kategoria')
verbose_name_plural = _(u'Kategorie')
ordering = ['name']
def __unicode__(self):
return self.name
@staticmethod
def autocomplete_search_fields():
return 'id__iexact', 'name__icontains'
class EntryQuerySet(models.QuerySet):
def active(self):
return self.filter(active=True)
def before(self, date):
return self.filter(publish_time__lte=date)
def after(self, date):
return self.filter(publish_time__gte=date)
def published(self):
return self.active().before(timezone.now())
class EntryManager(models.Manager):
pass
class PublishedEntryManager(models.Manager):
def get_queryset(self):
return super(PublishedEntryManager, self).get_queryset().active().before(timezone.now())
class Entry(models.Model):
ALIGN_CHOICES = (
('alignleft', _(u'Do lewej')),
('alignright', _(u'Do prawej')),
('aligncenter', _(u'Do środka'))
)
title = models.CharField(_(u'Tytuł'), max_length=255)
slug = models.SlugField(_(u'Adres/Slug'), unique=True)
text = EnhancedTextField(_(u'Tresc'))
new_tags = TagField(_('Tagi'), blank=True, null=True)
categories = models.ManyToManyField(Category, verbose_name=_('Kategorie'))
posted = models.DateTimeField(_(u'Utworzono'), auto_now_add=True)
modified = models.DateTimeField(_(u'Zmieniono'), auto_now=True)
active = models.BooleanField(_(u'Aktywny'),
help_text=_(u'Zaznacz, jeżeli tekst jest gotowy do publikacji (nie notka)'),
default=False)
publish_time = models.DateTimeField(_('Czas publikacji'), default=timezone.now)
image = models.ImageField(_(u'Obraz szeroki (cover)'), blank=True, null=True, upload_to='blog-images')
image_right = models.ImageField(_(u'Obraz wąski/pływający'), blank=True, null=True, upload_to='blog-images')
related_event = models.ForeignKey('partners.MediaPatronage', blank=True, null=True, related_name='blog_entries', on_delete=models.CASCADE)
objects = EntryManager.from_queryset(EntryQuerySet)()
published = PublishedEntryManager.from_queryset(EntryQuerySet)()
class Meta:
verbose_name = _(u'Wpis')
verbose_name_plural = _(u'Wpisy')
ordering = ['-publish_time', '-posted', '-pk']
def __unicode__(self):
return self.title
def get_text_length(self):
return len(self.text)
def get_absolute_url(self):
return reverse('blog_entry', kwargs={'year': str(self.posted.year),
'month': str(self.posted.month),
'slug': str(self.slug)})
@cached_property
def is_long(self):
return len(self.text) > settings.LONG_ENTRY_LENGTH
registry.register(Entry) | 0.515376 | 0.116663 |
__author__ = '<EMAIL> (<NAME>)'
import re
from base_parser import BaseParser, Identity
class HttpOutgoingParser(BaseParser):
DPORTS = [80, 8000, 8080]
PROTOCOL = 'HTTP'
GMAIL_CHAT_RE = re.compile('\; gmailchat=(%s)\/')
GRAVATAR_RE = re.compile('Cookie: gravatar=([\w]+)%7C')
AGENT_RE = re.compile('User-Agent: (\w.*?)\r')
def parse(self, pkt, payload):
if not payload:
yield None
else:
match = self.AGENT_RE.search(payload)
if match:
yield Identity(service='Browser', event='Request', type='browser_version',
value=match.group(1), certainty=0.5)
# Wordpress
if 'POST /wp-admin/' in payload:
match = re.search('Host: ([\w\.]+)', payload)
if match:
yield Identity(service='Wordpress', event='Admin', type='url',
value=match.group(1), certainty=0.7)
# Google Talk
match = self.GMAIL_CHAT_RE.search(payload)
if match:
yield Identity(service='Google Talk', event='Update', type='login',
value=match.group(1), certainty=0.8)
yield Identity(service='Google Account', event='Update', type='login',
value=match.group(1), certainty=0.5)
# GMail
elif 'GET /mail/' in payload:
match = re.search('\&gausr=(%s)' % self.EMAIL_REGEXP, payload)
if match:
yield Identity(service='Google Account', event='Access', type='login',
value=match.group(1), certainty=0.8)
yield Identity(service='Gmail', event='Access', type='login',
value=match.group(1), certainty=0.8)
yield Identity(service='Gmail', event='Access', type='email',
value=match.group(1), certainty=0.5)
# Gravatar
match = self.GRAVATAR_RE.search(payload)
if match:
yield Identity(service='Gravatar', event='Access', type='login',
value=match.group(1), certainty=1)
# brizzly.com
if 'Brizzly%20%20%2F%20' in payload:
match = re.search('Brizzly%20%20%2F%20(\w+)%0A', payload)
if match:
yield Identity(service='Brizzly', event='Access', type='login',
value=match.group(1), certainty=1)
# Generic e-mail
elif '&email=' in payload:
match = re.search('&email=(%s)' % self.EMAIL_REGEXP, payload)
if match:
yield Identity(service='E-Mail', event='POST', type='email',
value=match.group(1), certainty=0.5) | parsers/http_outgoing.py | __author__ = '<EMAIL> (<NAME>)'
import re
from base_parser import BaseParser, Identity
class HttpOutgoingParser(BaseParser):
DPORTS = [80, 8000, 8080]
PROTOCOL = 'HTTP'
GMAIL_CHAT_RE = re.compile('\; gmailchat=(%s)\/')
GRAVATAR_RE = re.compile('Cookie: gravatar=([\w]+)%7C')
AGENT_RE = re.compile('User-Agent: (\w.*?)\r')
def parse(self, pkt, payload):
if not payload:
yield None
else:
match = self.AGENT_RE.search(payload)
if match:
yield Identity(service='Browser', event='Request', type='browser_version',
value=match.group(1), certainty=0.5)
# Wordpress
if 'POST /wp-admin/' in payload:
match = re.search('Host: ([\w\.]+)', payload)
if match:
yield Identity(service='Wordpress', event='Admin', type='url',
value=match.group(1), certainty=0.7)
# Google Talk
match = self.GMAIL_CHAT_RE.search(payload)
if match:
yield Identity(service='Google Talk', event='Update', type='login',
value=match.group(1), certainty=0.8)
yield Identity(service='Google Account', event='Update', type='login',
value=match.group(1), certainty=0.5)
# GMail
elif 'GET /mail/' in payload:
match = re.search('\&gausr=(%s)' % self.EMAIL_REGEXP, payload)
if match:
yield Identity(service='Google Account', event='Access', type='login',
value=match.group(1), certainty=0.8)
yield Identity(service='Gmail', event='Access', type='login',
value=match.group(1), certainty=0.8)
yield Identity(service='Gmail', event='Access', type='email',
value=match.group(1), certainty=0.5)
# Gravatar
match = self.GRAVATAR_RE.search(payload)
if match:
yield Identity(service='Gravatar', event='Access', type='login',
value=match.group(1), certainty=1)
# brizzly.com
if 'Brizzly%20%20%2F%20' in payload:
match = re.search('Brizzly%20%20%2F%20(\w+)%0A', payload)
if match:
yield Identity(service='Brizzly', event='Access', type='login',
value=match.group(1), certainty=1)
# Generic e-mail
elif '&email=' in payload:
match = re.search('&email=(%s)' % self.EMAIL_REGEXP, payload)
if match:
yield Identity(service='E-Mail', event='POST', type='email',
value=match.group(1), certainty=0.5) | 0.296451 | 0.085327 |
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage import img_as_float32
import numpy as np
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
"""This function is adapted from Martin Weigert"""
"""Percentile-based image normalization."""
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
"""This function is adapted from Martin Weigert"""
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
"""This function is adapted from Martin Weigert"""
"""
normalizes and affinely scales an image pair such that the MSE is minimized
Parameters
----------
gt: ndarray
the ground truth image
x: ndarray
the image that will be affinely scaled
normalize_gt: bool
set to True of gt image should be normalized (default)
Returns
-------
gt_scaled, x_scaled
"""
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
#--------------------------------------------------------------
def get_full_file_paths(folder):
import os
list_files = []
for root, dirs, files in os.walk(folder):
for file in files:
file_path = os.path.join(root, file)
list_files.append(file_path)
return list_files
def create_shift_image(img, y_shift, x_shift):
"""this function shifts a 2D image with a given shift to x and y dimension - used to find the shift introduced by the network (to correct it manually)"""
y_dim = img.shape[-2]
x_dim = img.shape[-1]
if x_shift<0:
line = img[:,-1:]
for i in range(0, x_shift, -1):
img = np.concatenate((img,line), axis = 1) #right
img = img[:,abs(x_shift):]
if x_shift>0:
line = img[:,:1]
for i in range(0, x_shift):
img = np.concatenate((line, img), axis = 1) #left
img = img[:,:x_dim]
if y_shift<0:
line = img[-1:,:]
for i in range(0, y_shift, -1):
img = np.concatenate((img,line), axis = 0) #bottom
img = img[abs(y_shift):]
if y_shift>0:
line = img[:1,:]
for i in range(0, y_shift):
img = np.concatenate((line, img), axis = 0) #top
img = img[:y_dim,:]
return img | ZS4Mic/load_functions/quality_metrics_estimation.py | from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage import img_as_float32
import numpy as np
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
"""This function is adapted from Martin Weigert"""
"""Percentile-based image normalization."""
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
"""This function is adapted from Martin Weigert"""
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
"""This function is adapted from Martin Weigert"""
"""
normalizes and affinely scales an image pair such that the MSE is minimized
Parameters
----------
gt: ndarray
the ground truth image
x: ndarray
the image that will be affinely scaled
normalize_gt: bool
set to True of gt image should be normalized (default)
Returns
-------
gt_scaled, x_scaled
"""
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
#--------------------------------------------------------------
def get_full_file_paths(folder):
import os
list_files = []
for root, dirs, files in os.walk(folder):
for file in files:
file_path = os.path.join(root, file)
list_files.append(file_path)
return list_files
def create_shift_image(img, y_shift, x_shift):
"""this function shifts a 2D image with a given shift to x and y dimension - used to find the shift introduced by the network (to correct it manually)"""
y_dim = img.shape[-2]
x_dim = img.shape[-1]
if x_shift<0:
line = img[:,-1:]
for i in range(0, x_shift, -1):
img = np.concatenate((img,line), axis = 1) #right
img = img[:,abs(x_shift):]
if x_shift>0:
line = img[:,:1]
for i in range(0, x_shift):
img = np.concatenate((line, img), axis = 1) #left
img = img[:,:x_dim]
if y_shift<0:
line = img[-1:,:]
for i in range(0, y_shift, -1):
img = np.concatenate((img,line), axis = 0) #bottom
img = img[abs(y_shift):]
if y_shift>0:
line = img[:1,:]
for i in range(0, y_shift):
img = np.concatenate((line, img), axis = 0) #top
img = img[:y_dim,:]
return img | 0.865679 | 0.472379 |