text stringlengths 957 885k |
|---|
<reponame>smehdia/NTIRE2021-IQA-MACS<filename>model_attention.py
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
'''
model architecture with Batch Normalization, Attention and Residual Blocks
'''
def normalize_tensor_image(inp):
out = tf.convert_to_tensor(inp)
out = tf.dtypes.cast(out, tf.float32)
out = (out - 127.5) / 127.5
return out
def res_block(x_in):
x = Conv2D(x_in.shape[-1], 3, padding='same', activation='relu')(x_in)
x = Conv2D(x_in.shape[-1], 3, padding='same')(x)
x = Add()([x_in, x])
return x
def get_shared_model_bn(nc, nfs, kss, l2regconv, alpha):
kss1, kss2, kss3, kss4 = kss
nf1, nf2, nf3, nf4 = nfs[0], nfs[1], nfs[2], nfs[3]
l2reg1 = tf.keras.regularizers.l2(l2regconv)
inp_tensor = Input(shape=(288, 288, nc))
first_conv = Conv2D(3, (kss1, kss1), padding='same', kernel_regularizer=l2reg1)(inp_tensor)
y1 = Conv2D(nf1, (kss1, kss1), padding='same', kernel_regularizer=l2reg1)(first_conv)
y1 = LeakyReLU(alpha)(y1)
y1 = BatchNormalization()(y1)
y2 = MaxPooling2D(2)(y1)
y2 = Conv2D(nf2, (kss2, kss2), padding='same', kernel_regularizer=l2reg1)(y2)
y2 = LeakyReLU(alpha)(y2)
y2 = BatchNormalization()(y2)
y3 = MaxPooling2D(2)(y2)
y3 = Conv2D(nf3, (kss3, kss3), padding='same', kernel_regularizer=l2reg1)(y3)
y3 = LeakyReLU(alpha)(y3)
y3 = BatchNormalization()(y3)
y4 = MaxPooling2D(2)(y3)
y4 = Conv2D(nf4, (kss4, kss4), padding='same', kernel_regularizer=l2reg1)(y4)
y4 = LeakyReLU(alpha)(y4)
y4 = BatchNormalization()(y4)
model = Model(inp_tensor, [y1, y2, y3, y4])
model.summary()
return model
def attention(inp_tensor, filters1):
x1 = Conv2D(filters1, 3, padding='same', activation='relu')(inp_tensor)
x2 = BatchNormalization()(x1)
x3 = Conv2D(filters1, 3, padding='same')(x2)
ch1 = GlobalAveragePooling2D()(x3)
ch1 = Reshape([1, 1, filters1])(ch1)
ch1 = Conv2D(filters1, 1, activation='relu')(ch1)
ch1 = Conv2D(filters1, 1, activation='sigmoid')(ch1)
channel_attention = Lambda(lambda x: tf.multiply(x[0], x[1]))([x3, ch1])
ch2 = Conv2D(filters1, 1, activation='relu')(x3)
ch2 = Conv2D(filters1, 1, activation='sigmoid')(ch2)
spatial_attention = Lambda(lambda x: tf.multiply(x[0], x[1]))([x3, ch2])
output = Concatenate()([inp_tensor, channel_attention, spatial_attention])
output = Conv2D(filters1, 1)(output)
return output
def get_model_attention(num_channels, nfs, kss, l2regfactors, alpha, dropout_factor, num_dense, num_resblocks,
attention_flag):
inp_tensor1 = Input(shape=(288, 288, num_channels))
inp_tensor2 = Input(shape=(288, 288, num_channels))
n_img1 = normalize_tensor_image(inp_tensor1)
n_img2 = normalize_tensor_image(inp_tensor2)
total_inp_1 = n_img1
total_inp_2 = n_img2
l2reg_dense, l2regconv = l2regfactors
shared_model = get_shared_model_bn(total_inp_1.shape[-1], nfs, kss, l2regconv, alpha)
[y1_1, y2_1, y3_1, y4_1] = shared_model(total_inp_1)
[y1_2, y2_2, y3_2, y4_2] = shared_model(total_inp_2)
branch = MaxPooling2D(2)(y4_2)
branch = Conv2D(nfs[4], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_1 = tf.math.abs(y4_1 - y4_2)
branch = Concatenate()([branch, diff_1])
branch = Conv2D(nfs[5], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_2 = tf.math.abs(y3_1 - y3_2)
branch = Concatenate()([branch, diff_2])
branch = Conv2D(nfs[6], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_3 = tf.math.abs(y2_1 - y2_2)
branch = Concatenate()([branch, diff_3])
branch = Conv2D(nfs[7], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_4 = tf.math.abs(y1_1 - y1_2)
branch = Concatenate()([branch, diff_4])
branch = Conv2D(nfs[9], (3, 3), padding='same', strides=(4, 4),
kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = Conv2D(nfs[9], (3, 3), padding='same', strides=(2, 2),
kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
if attention_flag:
branch = attention(branch, nfs[8])
for i in range(num_resblocks):
branch = res_block(branch)
branch = Flatten()(branch)
branch = Dense(num_dense, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(l2reg_dense))(branch)
branch = Dropout(dropout_factor)(branch)
branch = Dense(1, activation='tanh')(branch)
branch = Dense(1)(branch)
model = Model([inp_tensor1, inp_tensor2], branch)
# tf.keras.utils.plot_model(
# model, to_file='model.png',
# expand_nested=True)
model.summary()
return model
|
<filename>nucleotides/filesystem.py
"""\
Module for interacting with the filesystem relative to the current nucleotides task
directory. Each nucleotides benchmarking task takes place in a directory named for
the nucleotides task ID. This module functions to simplify getting the location of
where input files can be found, and where output files should be created.
"""
import os.path, json, funcy, sys
import ruamel.yaml as yaml
import boltons.fileutils as fu
import nucleotides.util as util
#########################################
#
# Files and directories with the nucleotides task
#
#########################################
def get_task_dir_path(app, location):
"""
Return the path to the given sub directory within the nucleotides task
directory. Creates the directory if it does not already exist.
"""
dir_path = os.path.join(app['path'], location)
fu.mkdir_p(dir_path)
return os.path.join(app['path'], location)
def get_task_file_path(app, location):
"""
Return the path to the given file within the given nucleotides task directory.
Creates the parent directory if it does not already exist.
"""
dir_path = os.path.dirname(os.path.join(app['path'], location))
fu.mkdir_p(dir_path)
return os.path.join(app['path'], location)
def get_task_path_file_without_name(app, name):
"""
Return the path for the first file listed within the given nucleotides task
directory. Used to get file paths from directories when the file name is not
known.
"""
path = get_task_dir_path(app, name)
files = os.listdir(path)
if len(files) == 1:
return os.path.join(path, files[0])
elif len(files) == 0:
app['logger'].fatal("No files found in {}".format(path))
sys.exit(1)
else:
app['logger'].fatal("Multiple files found in path {}".format(path))
sys.exit(1)
def biobox_yaml_exists(app):
"""
Was the biobox.yaml file created by the container?
"""
return os.path.isfile(get_task_file_path(app, 'tmp/biobox.yaml'))
def get_output_biobox_file_contents(app):
"""
Return the contents of the biobox.yaml file generated by the Docker container.
"""
with open(get_task_file_path(app, 'tmp/biobox.yaml')) as f:
return yaml.load(f.read())['arguments']
def get_biobox_yaml_value(app, yaml_path):
"""
Given an xpath-type look up, returns the value in the biobox.yaml file
"""
biobox_file = get_output_biobox_file_contents(app)
return funcy.get_in(biobox_file, yaml_path + ['value'])
#########################################
#
# Misc file operations
#
#########################################
# http://stackoverflow.com/a/4213255/91144
def sha_digest(filename):
"""
Returns the sha256sum for a given file path.
"""
import hashlib
sha = hashlib.sha256()
with open(filename,'rb') as f:
for chunk in iter(lambda: f.read(sha.block_size), b''):
sha.update(chunk)
return sha.hexdigest()
def copy_file_to_outputs(app, src_file, dst_dir):
"""
Copies a Docker container generated file to the output directory. The name of
the file will be the 10-character truncated sha256sum of the file contents.
"""
import shutil
src = get_task_file_path(app, src_file)
dst = get_task_file_path(app, 'outputs/{}/{}'.format(dst_dir, sha_digest(src)[:10]))
fu.mkdir_p(os.path.dirname(dst))
shutil.copy(src, dst)
def create_runtime_metric_file(app, metrics):
"""
Parses the raw cgroup data collected from the Docker container into a new file
containing a JSON dictionary of key/value nucleotides metrics suitable for
upload to the nuclotides API.
"""
import gzip
dst = get_task_file_path(app, 'outputs/container_runtime_metrics/metrics.json.gz')
with gzip.open(dst, 'w') as f:
f.write(json.dumps(metrics))
def copy_container_output_files(app, paths):
"""
Given a dictionary of files that are expected to be generated by the Docker
container, copies them to the corresponding destination subdirectory under
./outputs/. Skips files that do not exist.
"""
for (dir_name, src_path) in paths.items():
if os.path.isfile(get_task_file_path(app, src_path)):
copy_file_to_outputs(app, src_path, dir_name)
app['logger'].info("Copied generated {} file '{}'".format(dir_name, src_path))
else:
app['logger'].warn("Expected {} file not found '{}'".format(dir_name, src_path))
|
<reponame>susuhahnml/xls2asp
#!/usr/bin/env python3
"""
Converts an instance given as a set of excel tables
into a set of asp facts.
Input: Excel xlsx file
Output: Logic program instance file
"""
import warnings
import csv
import argparse
import sys
import traceback
import openpyxl as xls
import math
import warnings
import re
import datetime
from operator import itemgetter
# list all styles and types
list_of_styles = ["sparse_matrix_xy", "matrix_xy", "row", "row_indexed"]
list_of_types = ["auto_detect", "skip", "int",
"constant", "time", "date", "datetime", "string"]
def write_category_comment(output, pred):
output.write('%' * (len(pred) + 6) + '\n')
output.write('%% ' + pred + ' %%\n')
output.write('%' * (len(pred) + 6) + '\n')
class Xls2AspError(ValueError):
def __init__(self, msg, sheet="?", cell=(1, 0)):
super(Xls2AspError, self).__init__(msg)
self.sheet = sheet
self.cell = cell
class TableNameError(ValueError):
def __init__(self, table):
ValueError.__init__(
self, "Name of a tables must respect the syntax of gringo constants", table)
class SheetRowColumnWrongTypeValueError(ValueError):
def __init__(self, table, row, col, msg, value=None):
ValueError.__init__(self, 'Wrong type in sheet "{}" row "{}" column "{}": {}'.format(
table, row, xls.utils.cell.get_column_letter(col+1), msg), value)
class Conversion:
@staticmethod
def col2letter(col):
return xls.utils.cell.get_column_letter(col)
@staticmethod
def date2tuple(value):
return "("+str(value.day)+","+str(value.month)+","+str(value.year)+")"
@staticmethod
def datetime2tuple(value):
return "("+Conversion.date2tuple(value)+","+Conversion.time2tuple(value)+")"
@staticmethod
def time2tuple(value):
return "("+str(value.hour)+","+str(value.minute)+","+str(value.second)+")"
@staticmethod
def is_int(value):
return Conversion.is_single_int(value) or Conversion.is_set_of_int(value)
@staticmethod
def is_single_int(value):
try:
return int(value) == float(value)
except (TypeError, ValueError, AttributeError):
return False
@staticmethod
def is_set_of_int(value):
try:
for i in value.split(";"):
a = int(i) == float(i)
return True
except (TypeError, ValueError, AttributeError):
return False
return False
@staticmethod
def normalize_int(value):
if Conversion.is_single_int(value):
return value
else:
return "("+value+")"
@staticmethod
def is_single_string(value):
s = value.split(";")
if len(s) >= 2:
return False
else:
return True
@staticmethod
def normalize_string(value):
if Conversion.is_single_string(value):
return "\""+value+"\""
else:
split = value.split(";")
r = "(" + "\""+split[0]+"\""
for s in split[1:]:
r += ";\""+s+"\""
r += ")"
return r
@staticmethod
def make_predicate(value):
if Conversion.is_single_constant(value):
return value
else:
val = value[0].lower()+value[1:]
if Conversion.is_single_constant(val):
return val
else:
raise TableNameError(value)
@staticmethod
def is_single_constant(value):
"""
ensures gringo constant syntax
"""
const_regex = "_*[a-z][A-Za-z0-9_']*"
if not isinstance(value, str):
return False
m = re.fullmatch(const_regex, value)
if m != None:
return True
else:
return False
@staticmethod
def is_set_of_constant(value):
if not isinstance(value, str):
return False
for s in value.split(";"):
if not Conversion.is_single_constant(s):
return False
return True
@staticmethod
def is_asp_constant(value):
"""
ensures lowercase and no leading or trailing blanks, no whitespace in between
"""
return Conversion.is_single_constant(value) or Conversion.is_set_of_constant(value)
@staticmethod
def normalize_constant(value):
if Conversion.is_single_constant(value):
return value
else:
return "("+value+")"
class Template:
"""
Class for reading template
"""
def __init__(self):
self.template = {}
def read(self, fileName):
with open(fileName, "r") as f:
for line in f:
line = line.split("%")[0]
reader = csv.reader([line], skipinitialspace=True)
line = next(reader)
if len(line) != 0:
table = line[0]
self.add_table(table)
style = line[1].strip()
if style not in list_of_styles:
raise ValueError('style not valid: '+style)
self.add_style(table, style)
types = line[2:]
if style == "matrix_xy":
if len(types) != 3:
raise ValueError(
'3 types are needed to read in matrix style')
default = []
for t in types:
s = t.split("=")
if len(s) > 1:
default.append(s[1].strip())
else:
default.append(None)
type = s[0].strip()
types[types.index(t)] = type
if type not in list_of_types:
raise ValueError('type not valid: '+t)
self.add_types(table, types)
self.add_default(table, default)
f.close()
def add_table(self, table):
"""
Adds a table and ensures it is unique
"""
assert table not in self.template, "Duplicate table '%r' in template" % table
self.template.setdefault(table, {})
def add_types(self, table, types):
"""
Adds a predicate types to a table
"""
self.template.setdefault(table, {}).setdefault("types", types)
def add_style(self, table, style):
self.template.setdefault(table, {}).setdefault("style", style)
def add_default(self, table, value):
self.template.setdefault(table, {}).setdefault("default", value)
class Instance:
"""
Class for maintaining data of an instance file
"""
def __init__(self, template):
self.data = {}
self.template = template
def add_table(self, table):
"""
Adds a table and ensures it is unique
"""
assert table not in self.data, "Duplicate table '%r'" % table
self.data.setdefault(table, {})
def correct_table_name(self, table, newname):
assert newname not in self.data, "Duplicate table '%r' in template" % table
self.data.setdefault(newname, self.data[table])
assert newname not in self.template, "Duplicate table '%r' in template" % table
self.template.setdefault(newname, self.template[table])
def add_skip(self, table, col=None):
"""
Adds the index of a column to skip
"""
if col == None:
self.data.setdefault(table, {}).setdefault("skip", [])
else:
self.data.setdefault(table, {}).setdefault("skip", []).append(col)
def is_skip(self, table, col):
try:
self.data[table]["skip"]
except (KeyError):
return False
return col in self.data[table]["skip"]
def add_style(self, table, style):
"""
Adds style to a table
"""
self.data.setdefault(table, {}).setdefault("style", style)
def add_row(self, table, id, row):
self.data.setdefault(table, {}).setdefault(
"rows", {}).setdefault(id, row)
def write(self, file):
for table in self.data:
style = self.data[table]["style"]
if style in ["row", "row_indexed"]:
self.write_table_row_style(table, file, style == 'row_indexed')
elif style in ["matrix_xy", "sparse_matrix_xy"]:
self.write_table_matrix_xy_style(
table, file, style == 'sparse_matrix_xy')
def write_table_row_style(self, table, file, prefix_index_argument=False):
"""
Writes table content to facts row by row
"""
write_category_comment(file, table)
for index, row in enumerate(self.data[table]["rows"], 0):
pred = table+'('
if prefix_index_argument:
pred += str(index) + ','
for col in range(len(self.data[table]["rows"][row])):
if not self.is_skip(table, col):
pred += str(self.data[table]["rows"][row][col])+','
pred = pred[0:len(pred)-1]
pred += ').\n'
file.write(pred)
file.write('\n')
file.write('\n')
def write_table_matrix_xy_style(self, table, file, sparse=False):
"""
Writes table content to facts
"""
write_category_comment(file, table)
for r in self.data[table]["rows"]:
if r != 1:
y = self.data[table]["rows"][r][0]
for col in range(1, len(self.data[table]["rows"][r])):
if not self.is_skip(table, col):
if not sparse or self.data[table]["rows"][r][col] != None:
pred = table + \
'('+str(self.data[table]["rows"][1][col])+','
pred += str(y)+','
pred += str(self.data[table]
["rows"][r][col])+').\n'
file.write(pred)
file.write('\n')
file.write('\n')
def get_test(self, type):
if type == "int":
return self.test_int
elif type == "constant":
return self.test_constant
elif type == "string":
return self.test_string
elif type == "time":
return self.test_time
elif type == "time2time":
return self.test_time
elif type == "date":
return self.test_date
elif type == "datetime":
return self.test_datetime
elif type == "skip":
return None
elif type == "auto_detect":
return self.test_auto_detect
else:
raise ValueError('Type not valid: '+type)
def test_string(self, table, row, col, value, default):
if value == None and default != None:
return default
if isinstance(value, str):
return Conversion.normalize_string(value)
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "Expecting a string, getting:", value)
def test_int(self, table, row, col, value, default):
if value == None and default != None:
return default
if Conversion.is_int(value):
return Conversion.normalize_int(value)
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "Expecting an int, getting:", value)
def test_constant(self, table, row, col, value, default=None):
if value == None and default != None:
return default
if Conversion.is_asp_constant(value):
return Conversion.normalize_constant(value)
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "Expecting a constant, getting:", value)
def test_time(self, table, row, col, value, default):
if value == None and default != None:
return default
if isinstance(value, datetime.time):
return Conversion.time2tuple(value)
else:
try:
value = datetime.time.fromisoformat(value)
except Exception:
pass
if value == datetime.datetime(1899, 12, 30, 0, 0):
print(
"Warning in table", table, "row ", row, "col ", col)
print("Expected a time, getting:", value)
print(
"This could a know XLS error for times like 00:00:00, treating this as datetime.time(00:00:00).")
return "(0,0,0)"
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "Expecting a time, getting:", value)
def test_time2min(self, table, row, col, value, default=None):
if value == None and default != None:
return default
if isinstance(value, datetime.time):
return str(value.hour)+"*60+"+str(value.minute)
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "Expecting a time, getting:", value)
def test_datetime(self, table, row, col, value, default=None):
if value == None and default != None:
return default
if isinstance(value, datetime.datetime):
return Conversion.datetime2tuple(value)
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "Expecting a datetime, getting:", value)
def test_date(self, table, row, col, value, default=None):
if value == None and default != None:
return default
if isinstance(value, datetime.date):
return Conversion.date2tuple(value)
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "Expecting a date, getting:", value)
def test_auto_detect(self, table, row, col, value, default=None):
if value == None and default != None:
return default
if Conversion.is_int(value):
return Conversion.normalize_int(value)
elif isinstance(value, datetime.time):
return Conversion.time2tuple(value)
elif isinstance(value, datetime.datetime):
return Conversion.datetime2tuple(value)
elif isinstance(value, datetime.date):
return Conversion.date2tuple(value)
if Conversion.is_asp_constant(value):
return Conversion.normalize_constant(value)
elif isinstance(value, str):
return "\""+value+"\""
else:
raise SheetRowColumnWrongTypeValueError(
table, row, col, "A value is expected", value)
def correct(self):
# correct table names
data = {}
template = {}
for table in self.data:
newname = Conversion.make_predicate(table)
if newname != table:
assert newname not in data, "Duplicate table '%r' in template" % table
data.setdefault(newname, self.data[table])
assert newname not in self.template, "Duplicate table '%r' in template" % table
template.setdefault(newname, self.template[table])
self.data = data
self.template = template
# remove leading or trailing blanks from each value in every table
for table in self.data:
for r in self.data[table]["rows"]:
row = self.data[table]["rows"][r]
for value in row:
try:
row[row.index(value)] = value.strip()
except (AttributeError):
pass
for table in self.data:
style = self.template[table]["style"]
if style in ["row", "row_indexed"]:
self.correct_row_style(table)
elif style in ["matrix_xy", "sparse_matrix_xy"]:
self.correct_matrix_xy_style(
table, style == "sparse_matrix_xy")
else:
raise ValueError('style not valid: '+style)
def correct_row_style(self, table):
unexpected = 0
nb_col = len(self.template[table]["types"])
self.data[table]["rows"].pop(1) # ignore first line
self.ignore_empty_row(table)
for row in self.data[table]["rows"]:
if len(self.data[table]["rows"][row]) > nb_col:
unexpected = 1
self.data[table]["rows"][row] = self.data[table]["rows"][row][0:nb_col]
if unexpected:
sys.stderr.write(
"WARNING: Undefined column in sheet \""+table+"\", ignoring it\n")
col = 0
for i in range(len(self.template[table]["types"])):
type = self.template[table]["types"][i]
default = self.template[table]["default"][i]
if type == "skip":
self.add_skip(table, col)
else:
test = self.get_test(type)
for row in self.data[table]["rows"]:
value = self.data[table]["rows"][row][col]
self.data[table]["rows"][row][col] = test(
table, row, col, value, default)
col += 1
def correct_matrix_xy_style(self, table, sparse=False):
type_x = self.template[table]["types"][0]
default_x = self.template[table]["default"][0]
type_y = self.template[table]["types"][1]
default_y = self.template[table]["default"][1]
type_v = self.template[table]["types"][2]
default_v = self.template[table]["default"][2]
self.locate_empty_column(table)
self.add_skip(table, 0)
self.ignore_empty_row(table)
# test type for x (= first line)
test = self.get_test(type_x)
row_x = self.data[table]["rows"][1]
# for i in range(1,len(row_x)):
for col in range(1, len(row_x)):
if not self.is_skip(table, col):
row_x[col] = test(table, 1, col, row_x[col], default_x)
# test type for y (= first column)
test = self.get_test(type_y)
for r in self.data[table]["rows"]:
if r != 1:
self.data[table]["rows"][r][0] = test(
table, r, 0, self.data[table]["rows"][r][0], default_y)
# test type for the inner matrix
test = self.get_test(type_v)
for r in self.data[table]["rows"]:
if r != 1:
for col in range(1, len(self.data[table]["rows"][r])):
if not self.is_skip(table, col):
if not sparse or self.data[table]["rows"][r][col] != None:
self.data[table]["rows"][r][col] = test(
table, r, col, self.data[table]["rows"][r][col], default_v)
def get_table_style(self, table):
if table not in self.template:
sys.stderr.write("WARNING: Sheet \""+table +
"\" is not defined in the template\n")
return "skip"
style = self.template[table]["style"]
return style
def ignore_empty_row(self, table):
list_empty = []
for row in self.data[table]["rows"]:
empty = 1
for value in self.data[table]["rows"][row]:
if value != None:
empty = 0
if empty == 1:
list_empty.append(row)
for row in list_empty:
self.data[table]["rows"].pop(row)
sys.stderr.write("WARNING: Row "+str(row) +
" in sheet \""+table+"\"is empty, ignoring it\n")
def locate_empty_column(self, table):
self.add_skip(table)
for col in range(len(self.data[table]["rows"][1])):
empty = True
for row in self.data[table]["rows"]:
if self.data[table]["rows"][row][col] != None:
empty = False
break
if empty:
self.add_skip(table, col)
for col in self.data[table]["skip"]:
sys.stderr.write("WARNING: Column "+Conversion.col2letter(col+1) +
" in sheet \""+table+"\" is empty, ignoring it\n")
class XlsReader:
def __init__(self, instance):
# Expected worksheets xlsx file and their parsing functions
self.instance = instance
self.active_cell = (1, 0)
def parse(self, input):
"""
Parses input excel table
"""
wb = xls.load_workbook(input, read_only=True, data_only=True)
if self.__update_dimensions(wb):
wb.close()
wb = xls.load_workbook(input, read_only=False, data_only=True)
for sheet in wb:
style = self.instance.get_table_style(sheet.title)
if style == "skip":
sys.stderr.write("Skipping Sheet: "+sheet.title+"\n")
else:
self.parse_table(sheet, style)
for table in self.instance.template:
if table not in self.instance.data:
raise ValueError("Sheet \""+table+"\" not found")
if not self.instance.data[table].__contains__("rows"):
sys.stderr.write("WARNING: Sheet \""+table +
"\" is empty, ignoring it\n")
self.instance.data.pop(table)
sys.stderr.write("Skipping Sheet: "+table+"\n")
def parse_table(self, sheet, style):
table = sheet.title
sys.stderr.write("Parsing Sheet \""+table +
"\" with style \""+style+"\"\n")
self.instance.add_table(sheet.title)
self.instance.add_style(sheet.title, style)
self.active_cell = (1, 0)
self.active_sheet = sheet
try:
id = 1
for r in sheet.iter_rows(min_row=1):
row = self.parse_row(r)
self.instance.add_row(table, id, row)
id += 1
except Exception as e:
raise Xls2AspError(str(e), self.active_sheet, self.active_cell)
def parse_row(self, row, first=0):
cols = []
for i in range(first, len(row)):
cols.append(row[i].value)
return cols
def __update_dimensions(self, workbook):
for sheet in workbook.worksheets:
if sheet.max_column > 50 or sheet.max_row > 1000:
return True
return False
def main():
# temporal solution, to be removed eventually
if sys.version_info < (3, 5):
raise SystemExit('Sorry, this code need Python 3.5 or higher')
try:
parser = argparse.ArgumentParser(
description="Converts an input table to facts"
)
parser.add_argument('--output', '-o', metavar='<file>',
help='Write output into %(metavar)s', default=sys.stdout, required=False)
parser.add_argument('--xls', '-x', metavar='<file>',
help='Read xls file from %(metavar)s', required=True)
parser.add_argument('--template', '-t', metavar='<file>',
help='Read template from %(metavar)s', required=True)
args = parser.parse_args()
tpl = Template()
tpl.read(args.template)
instance = Instance(tpl.template)
reader = XlsReader(instance)
reader.parse(args.xls)
instance.correct()
if args.output == sys.stdout:
instance.write(args.output)
else:
with open(args.output, 'w', encoding="utf8") as f:
instance.write(f)
return 0
except Xls2AspError as e:
sys.stderr.write("*** Exception: {}\n".format(e))
sys.stderr.write("*** In sheet={0}:{1}{2}\n".format(
e.sheet, xls.utils.cell.get_column_letter(e.cell[1]), e.cell[0]))
return 1
except Exception as e:
traceback.print_exception(*sys.exc_info())
return 1
if __name__ == '__main__':
sys.exit(main())
|
<filename>cdbfunctions.py
"""cdbfunctions.py
Developer: <NAME>
Last Updated: September 12, 2014
This module consists of all functions that interact directly with the
cdbtabledef.py module. Functions include inserting, deleting, and
updating records in the database. There are also several class definitions
which are used to create objects that can be sent between different functions.
"""
import sqlalchemy
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, func
from sqlalchemy import desc
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta, date
from cdbtabledef import Household, Person, Volunteer, Visit
#Class Definitions
class volunteerData:
"""This class is used for inserting/selecting a volunteer into/from
the database.
"""
def __init__(self, firstname, lastname, color, phone=None,
active=True):
self.firstname = str(firstname)
self.lastname = str(lastname)
self.color = color
self.phone = str(phone)
self.active = active
class newClientData:
"""This class is used for inserting a new client into the
database.
"""
def __init__(self, firstname, lastname, dob, phone=None,
dateJoined=datetime.now()):
self.firstname = str(firstname)
self.lastname = str(lastname)
self.dob = dob
self.phone = str(phone)
self.dateJoined = dateJoined
class oldClientData:
"""This class is used for updating old clients and for
returning information for a client.
"""
def __init__(self, id, firstname, lastname, dob, phone=None,
dateJoined=datetime.now()):
self.id = id
self.firstname = str(firstname)
self.lastname = str(lastname)
self.dob = dob
self.age = age(dob)
self.phone = str(phone)
self.dateJoined = dateJoined
class houseData:
"""This class is used to hold data for inserting a household,
updating a household, or returning household information.
"""
def __init__(self, street, city='Troy', state='NY', zip='12180',
dateVerified=None, apt=None):
self.street = street
self.city = city
self.state = state
self.zip = zip
self.dateVerified = dateVerified
self.apt = apt
class visitData:
"""This class is used to hold data for inserting a visit
"""
def __init__(self, Vol_ID, visitDate=datetime.now(), notes=None):
self.Vol_ID = Vol_ID
self.visitDate = visitDate
self.notes = notes
class visitDataReturn:
"""This class is used for returning data for the list_visits function.
"""
def __init__(self, visitDate, clientname, volname, notes=None,
vid=None):
self.date = visitDate
self.visitor = clientname
self.volunteer = volname
self.notes = notes
self.visitID = vid
#functions for inserts
def insert_household(s, street, dateverified=None, Apt=None,
City='Troy', State='NY', Zip='12180'):
"""This function creates a new row to hold a household's data. It returns
the household id, which will be used when we insert household members.
"""
newhouse = Household(street_address = street, apt = Apt, city = City,
state = State, zip = Zip,
date_verified = dateverified)
s.add(newhouse)
s.commit()
#return newhouse.id
return newhouse
def insert_person(s, firstname, lastname, dob, newhouse,
datejoined=datetime.now(), phonenum=None):
"""This function creates a new row to hold an individual's data. There is
no return.
"""
newpers = Person(first_name=firstname, last_name=lastname, DOB=dob,
date_joined=datejoined, phone=phonenum)
newpers.HH_ID = newhouse
newpers.age = age(dob)
s.add(newpers)
s.commit()
#return newpers.id
return newpers
def insert_volunteer(s, firstname, lastname, phonenum=None, active=True,
color='light blue'):
"""This function creates a new row in the Volunteer table, to hold
a volunteer's data.
"""
new_vol = Volunteer(first_name=firstname, last_name=lastname,
phone=phonenum, active=active, color=color)
s.add(new_vol)
s.commit()
def insert_visit(s, Vol_id, pers_id, house_id, date_of_visit=datetime.now(),
notes=None):
"""This function creates a new row in the Visits table to hold
the data for a visit.
"""
new_visit = Visit(I_ID=pers_id, HH_ID=house_id, Vol_ID=Vol_id,
date=date_of_visit, visit_notes=notes)
s.add(new_visit)
s.commit()
#functions for updating records
def update_household(s, HH_ID, street, city, state, zip, apt=None,
date_verified=None):
"""This function will update a households records
"""
house = s.query(Household).filter(Household.id == HH_ID).one()
house.street_address = street
house.city = city
house.state = state
house.zip = zip
house.apt = apt
house.date_verified = date_verified
s.commit()
def update_person(s, I_ID, firstname, lastname, dob, phonenum=None):
"""This function will update a person's records.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
pers.first_name = firstname
pers.last_name = lastname
pers.DOB = dob
pers.phone = phonenum
pers.age = age(dob)
s.commit()
def update_visit(s, vis_id, date_of_visit=datetime.now(),
notes=None):
"""This function will update a visit's record.
"""
visit = s.query(Visit).filter(Visit.id == vis_id).one()
visit.date = date_of_visit
visit.visit_notes = notes
s.commit()
def update_volunteer(s, vol_id, firstname, lastname, phonenum, active, color=None):
"""This function will update a volunteer's records.
"""
vol = s.query(Volunteer).filter(Volunteer.id == vol_id).one()
vol.first_name = firstname
vol.last_name = lastname
vol.phone = phonenum
vol.active = active
if color != None:
vol.color = color
s.commit()
#functions for deleting records
def delete_household(s, HH_ID):
"""This function deletes a household record from the database.
"""
house = s.query(Household).filter(Household.id == HH_ID).one()
s.delete(house)
s.commit()
def delete_person(s, I_ID):
"""This function will delete an individual from the database.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
s.delete(pers)
s.commit()
def delete_volunteer(s, Vol_ID):
"""This function will delete a volunteer if the volunteer has
not participated in a visit. Else, it will "deactivate" the
volunteer.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
s.delete(vol)
s.commit()
def delete_visit(s, Vi_ID):
"""This function will delete a visit from the database.
"""
vis = s.query(Visit).filter(Visit.id == Vi_ID).one()
s.delete(vis)
s.commit()
#additional functions
def age(dob):
"""This function takes a person's DOB as input and uses it to
calculate that person's age.
"""
timey = datetime.now()
if timey.month > dob.month:
return timey.year - dob.year
elif timey.month < dob.month:
return timey.year - dob.year - 1
else:
if timey.day >= dob.day:
return timey.year - dob.year
else:
return timey.year - dob.year - 1
def list_visits(s, I_ID):
"""This function will find the past visits for a household
and return them as a list of visitDataReturn objects.
"""
visits = []
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#returns all visits for the household in descending order of date
visithistory = s.query(Visit, Person, Volunteer).\
filter(Visit.HH_ID == house.id).\
filter(Visit.I_ID == Person.id).\
filter(Visit.Vol_ID == Volunteer.id).\
order_by(desc(Visit.date)).all()
#retrieves information for past three visits and returns in a list.
for instance in visithistory:
clientname = instance.Person.first_name + " " +\
instance.Person.last_name
volname = instance.Volunteer.first_name + " " +\
instance.Volunteer.last_name
visit = visitDataReturn(instance.Visit.date, clientname, volname,
notes=instance.Visit.visit_notes,
vid=instance.Visit.id)
visits.append(visit)
return visits
def get_age_breakdown(members):
"""This function will retrieve all the ages of the members, and return the
number of adults, seniors, children, infants, and the total number of
family members.
"""
infants = 0
children = 0
adults = 0
seniors = 0
for member in members:
if member.age < 2:
infants = infants + 1
elif member.age >= 2 and member.age < 18:
children = children + 1
elif member.age >= 18 and member.age < 65:
adults = adults + 1
else:
seniors = seniors + 1
total = infants + children + adults + seniors
agegroups = {'infants':infants, 'children':children, 'adults':adults,
'seniors':seniors, 'total':total}
return agegroups
def generate_report(s, duration):
"""This function will generate a csv/excel file that holds all
relevant info for a monthly report.
"""
import csv
#open file and so on
today = datetime.now()
filename = str(today.month)+ "-" + str(today.day) + "-" +\
str(today.year) + "-report.csv"
csvfile = open(filename, 'w', newline='')
outcsv = csv.writer(csvfile)
#calculate a month ago(or a year or week ago)
today = datetime.now()
month_ago = today - duration
#convert date objects to strings for comparison purposes
month_ago = str(month_ago)
#one giant massive query
select = sqlalchemy.sql.select([Person.first_name, Person.last_name,
Household.seniors, Household.adults,
Household.children, Household.infants,
Household.total, Household.city,
Visit.date])\
.where(Visit.I_ID == Person.id)\
.where(Visit.HH_ID == Household.id)\
.where(Visit.date >= month_ago)
#execute query, write rows and column-names to csv
records = s.execute(select)
outcsv.writerow(records.keys())
outcsv.writerows(records)
#output number of new clients
newc = s.query(func.count(Person.first_name))\
.filter(Person.date_joined >= month_ago).all()
outcsv.writerow(("New individuals:", newc[0]))
#cleanly close database
csvfile.close()
s.close()
def generate_custom(s, start, end):
"""This function will generate a custom report.
"""
import csv
#open file and so on
today = datetime.now()
filename = str(today.month)+ "-" + str(today.day) + "-" +\
str(today.year) + "-report.csv"
csvfile = open(filename, 'w', newline='')
outcsv = csv.writer(csvfile)
#convert date objects to strings for comparison
start = str(start)
end = str(end)
#one giant massive query
select = sqlalchemy.sql.select([Person.first_name, Person.last_name,
Household.seniors, Household.adults,
Household.children, Household.infants,
Household.total, Household.city,
Visit.date])\
.where(Visit.I_ID == Person.id)\
.where(Visit.HH_ID == Household.id)\
.where(Visit.date >= start)\
.where(Visit.date <= end)
#execute query, write rows and column-names to csv
records = s.execute(select)
outcsv.writerow(records.keys())
outcsv.writerows(records)
#get number of new clients (individuals)
newc = s.query(func.count(Person.first_name))\
.filter(Person.date_joined >= start)\
.filter(Person.date_joined <= end).all()
outcsv.writerow(("New individuals:", newc[0]))
#cleanly close database
csvfile.close()
s.close()
|
"""Native adapter for serving CherryPy via its builtin server."""
import logging
import sys
import io
import cheroot.server
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
class NativeGateway(cheroot.server.Gateway):
recursive = False
def respond(self):
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr
local = httputil.Host(local[0], local[1], '')
remote = req.conn.remote_addr, req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], '')
scheme = req.scheme
sn = cherrypy.tree.script_name(req.uri or '/')
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.path
qs = req.qs or ''
headers = req.inheaders.items()
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, 'HTTP/1.1')
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the
# response
try:
request.run(method, path, qs,
req.request_protocol, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same '
'URL twice: %r' % ir.path)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except:
tb = format_exc()
# print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
req = self.req
# Set response status
req.status = str(status or '500 Server Error')
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(cheroot.server.HTTPServer):
"""Wrapper for cheroot.server.HTTPServer.
cheroot has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
cheroot.server.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0)
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
|
import json
import logging
from api.api_samples.python_client.api_client import CloudBoltAPIClient
from api.api_samples.python_client.samples.api_helpers import wait_for_order_completion, wait_for_job_completion
from common.methods import set_progress
from servicecatalog.models import ServiceBlueprint
from utilities.exceptions import CloudBoltException
from utilities.models import ConnectionInfo
from orders.models import Order
# suppress logging from requests module
logger = logging.getLogger('requests')
logger.setLevel(40)
logger = logging.getLogger('py.warnings')
logger.setLevel(40)
API_CLIENT_CI = "CIT API Client"
# BP specific variables - You should change these
BLUEPRINT = 230
NEW_RESOURCE_NAME = "ls-cittest"
BP_PAYLOAD = """
{
"group": "/api/v2/groups/GRP-o1omc6h3/",
"items": {
"deploy-items": [
{
"blueprint": "/api/v2/blueprints/BP-44z1h70u/",
"blueprint-items-arguments": {
"build-item-Create Cisco UCS Service Profile": {
"parameters": {
"chassis-a663": "sys/chassis-6",
"create-sp-from-sp-template-a663": "False",
"mac-pool-name-a663": "macpool",
"organization-a663": "org-root",
"service-profile-description-a663": "a description",
"service-profile-name-a663": "cittest",
"ucs-server-dn-a663": "sys/chassis-6/blade-2",
"use-blade-servers-a663": "True"
}
}
},
"resource-name": "Cisco UCS",
"resource-parameters": {}
}
]
},
"submit-now": "true"
}
"""
# END of BP specific variables
def get_id_from_href(order_href):
split = order_href.split('/')
return int(split[-2])
def test_order_blueprint(client):
order = json.loads(client.post('/api/v2/orders/', body=BP_PAYLOAD))
order_href = order['_links']['self']['href']
order_id = get_id_from_href(order_href)
set_progress("Current Running order: {}".format(order_id))
result = wait_for_order_completion(client, order_id, 180, 10)
order_object = Order.objects.filter(id=order_id).first()
job_list = order_object.list_of_jobs()
job_object = job_list[0]
resource = job_object.get_resource()
if not result == 0 and (not resource or resource.lifecycle == 'PROVFAILED'):
raise CloudBoltException(
"Blueprint Deployment order {} did not succeed.".format(order_id))
set_progress(
"Blueprint deployment order {} completed successfully.".format(order_id))
return resource
def test_delete_resource(client, resource):
body = "{}"
response = json.loads(client.post(
'/api/v2/resources/{}/{}/actions/1/'.format(resource.resource_type.name, resource.id), body=body))
job_href = response['run-action-job']['self']['href']
job_id = get_id_from_href(job_href)
result = wait_for_job_completion(client, job_id, 180, 10)
if not result == 0:
raise CloudBoltException(
"Resource deletion job {} did not succeed.".format(job_id))
set_progress(
"Resource deletion job {} completed successfully.".format(job_id))
def get_api_client():
ci = ConnectionInfo.objects.get(name=API_CLIENT_CI)
return CloudBoltAPIClient(
ci.username, ci.password, ci.ip, ci.port, protocol=ci.protocol)
def run(job, *args, **kwargs):
bp = ServiceBlueprint.objects.get(id=BLUEPRINT)
set_progress(
"Running Continuous Infrastructure Test for blueprint {}".format(bp)
)
client = get_api_client()
# Order the BP
set_progress("### ORDERING BLUEPRINT ###", tasks_done=0, total_tasks=3)
resource = test_order_blueprint(client)
set_progress(f"RESOURCE {resource}")
rce = bp.resource_set.last()
set_progress(f"LAST RESOURCE {rce}")
# Delete the resource from the database only
resource.delete()
set_progress("### DISCOVERING RESOURCES FOR BLUEPRINT ###", tasks_done=1)
bp.sync_resources()
# should be able to get the resource since the sync should have created it
resource = bp.resource_set.get(name=NEW_RESOURCE_NAME, lifecycle='ACTIVE')
set_progress("### DELETING RESOURCE FOR BLUEPRINT ###", tasks_done=2)
test_delete_resource(client, resource)
set_progress("ALL Tests completed!", tasks_done=3)
|
<filename>import_scripts/methylation.py
###ExonArray
#Copyright 2005-2008 <NAME> Institutes, San Francisco California
#Author <NAME> - <EMAIL>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import time
import export
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for file in dir_list:
if '.txt' in file: dir_list2.append(file)
return dir_list2
################# Begin Analysis
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importAnnotations(filename):
firstLine = True
fn = filepath(filename)
rows = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
tab_delimited_data = string.split(data,'\t')
if rows > 10: sys.exit()
print tab_delimited_data#;sys.exit()
rows+=1
def correlateMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1):
### Takes a filtered pre-processed beta-value file as input
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
def importMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1, filter=None):
annot_file = filepath('AltDatabase/ucsc/Hs/Illumina_methylation_genes.txt')
export_object = open(filename[:-4]+'-filtered.txt','w')
print filename[:-4]+'-filtered.txt', counts
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
#export_object.write(string.join(t,'\t')+'\n')
#"""
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object = open(annot_file,'w')
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
#if rows<50: print high, low, max(beta_values), min(beta_values)
#else:sys.exit()
#export_object.write(string.join(t[:delimiter])+'\n')
if high>=counts and low>=counts:
#if (high-low) > 0.2:
#if rows<50: print 1
if filter!=None:
if probeID in filter: proceed=True; probeID = str(filter[probeID])+':'+probeID
else: proceed = False
else: proceed = True
if proceed:
filtered+=1
export_object.write(string.join([probeID]+map(str,beta_values),'\t')+'\n')
if 'Illumina_name' in header:
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
rows+=1
#"""
export_object.close()
if delimiter == '-50':
annot_export_object.close()
print filtered, rows
def conFloat(x,betaValues):
try: x = float(x)
except Exception: x=None
if x== None or x == 0:
floats=[]
for i in betaValues:
if i=='': pass
elif float(i)==0: pass
else: floats.append(float(i))
try: return min(floats)
except Exception: print betaValues;sys.exit()
else:
return x
def betaHighCount(x,betaHigh):
if x>betaHigh:
return 1
else: return 0
def betaLowCount(x,betaLow):
if x<betaLow:
return 1
else: return 0
def getIDsFromFile(filename):
filterIDs = {}
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
filterIDs[string.lower(t[0])]=[]
return filterIDs
def getRegionType(filename,featureType=None,chromosome=None,filterIDs=None):
if filterIDs !=None:
filterIDs = getIDsFromFile(filterIDs)
firstLine = True
fn = filepath(filename)
count=0; filter_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,',')
if firstLine:
if len(t[2]) >0:
header = t
firstLine=False
chr_ind = header.index('CHR')
pos_ind = header.index('Coordinate_36')
tss_ind = header.index('UCSC_RefGene_Group')
gene_name = header.index('UCSC_RefGene_Name')
else:
probeID = t[0]
count+=1
try: gene_names = string.split(t[gene_name],';')
except Exception: gene_names = []
try:
if chromosome != None:
if t[chr_ind] == chromosome:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'promoter' in string.lower(featureType):
if 'TSS' in t[tss_ind]:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'mir' in string.lower(featureType) or 'micro' in string.lower(featureType):
if 'mir' in string.lower(t[gene_name]) or 'let' in string.lower(t[gene_name]):
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
except Exception:
pass
print len(filter_db), 'probes remaining'
return filter_db
if __name__ == '__main__':
import getopt
featureType = 'promoter'
featureType = 'all'
Species = 'Hs'
filter_db=None
chromosome=None
numRegulated = -1
analysis = 'filter'
filterIDs = None
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a methylation beta-value file as input in the command-line"
print "Example: python methylation.py --i /Users/me/sample1.txt --g /Users/me/human.gtf"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','a=','t=','r=','c=','f='])
for opt, arg in options:
if opt == '--i': input_file=arg
elif opt == '--a': analysis=arg
elif opt == '--t': featureType=arg
elif opt == '--r': numRegulated=int(arg)
elif opt == '--c': chromosome=arg
elif opt == '--f': filterIDs=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if analysis == 'filter':
filename = 'AltDatabase/ucsc/Hs/wgEncodeHaibMethyl450CpgIslandDetails.txt'
#input_file = '/Volumes/SEQ-DATA/PCBC/Methylation/Methylome70allBValues_aronowAnnotations.txt'
if featureType!= 'all' or chromosome != None or filterIDs!=None:
filter_db = getRegionType(filename,featureType=featureType,chromosome=chromosome,filterIDs=filterIDs)
importMethylationData(input_file,filter = filter_db,counts=numRegulated); sys.exit()
#importAnnotations(methylation_file);sys.exit()
if analysis == 'correlate':
### Performs all pairwise correlations between probes corresponding to a gene
correlateMethylationData(input_file)
|
'''blox/compile.py
Creates an optimized programattically generated template from an html file
Copyright (C) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
import json
from functools import partial
from xml.dom import minidom
from lxml.etree import HTMLParser
from lxml.html import fromstring
from short.compile import text as grow_short
from blox.all import factory
from blox.attributes import AccessorAttribute
from blox.base import Wildcard
from blox.containers import Container
from blox.text import Text
try:
import Cython
except ImportError:
Cython = False
SCRIPT_TEMPLATE = """# WARNING: DON'T EDIT AUTO-GENERATED
from blox.base import Blox, Wildcard
from blox.containers import Container
from blox.text import Text, UnsafeText
from blox.attributes import AccessorAttribute
class Template(Container):
{indent}__slots__ = tuple({accessors})
{indent}{attributes}
def build(factory):
{indent}template = Template()
{indent}{build_steps}
{indent}return template
"""
def string(html, start_on=None, ignore=(), use_short=True, **queries):
'''Returns a blox template from an html string'''
if use_short:
html = grow_short(html)
return _to_template(fromstring(html), start_on=start_on,
ignore=ignore, **queries)
def file(file_object, start_on=None, ignore=(), use_short=True, **queries):
'''Returns a blox template from a file stream object'''
return string(file_object.read(), start_on=start_on, ignore=ignore, use_short=use_short, **queries)
def filename(file_name, start_on=None, ignore=(), use_short=True, **queries):
'''Returns a blox template from a valid file path'''
with open(file_name) as template_file:
return file(template_file, start_on=start_on, ignore=ignore, use_short=use_short, **queries)
def _to_python(dom, factory=factory, indent=' ', start_on=None, ignore=(), **queries):
if start_on:
dom = dom.cssselect(start_on)[0]
ignored = set()
for query in ignore if type(ignore) in (list, tuple) else (ignore, ):
ignored.update(dom.cssselect(query))
current = [0]
def increment(element_name=''):
current[0] += 1
return ('{0}{1}'.format(element_name, current[0]), factory.get(element_name))
lines = []
accessors = list(queries.keys())
attributes = []
matches = {}
for accessor, query in queries.items():
lines.append('template.{0} = Blox()'.format(accessor))
for match in dom.cssselect(query):
matches[match] = accessor
def compile_node(node, parent='template'):
if node in ignored or type(node.tag) != str:
return
blok_name, blok = increment(node.tag)
lines.append("{0} = {1}({2}('{3}'))".format(blok_name, parent, 'factory' if blok else 'Wildcard', node.tag))
if not blok:
blok = Wildcard
if node in matches:
lines.append('template.{0}.append({1})'.format(matches[node], blok_name))
text = (node.text or "").strip().replace('"', '\\"')
if text:
if 'text' in dir(blok):
lines.append('{0}.text = """{1}"""'.format(blok_name, text))
else:
lines.append('{0}(Text("""{1}"""))'.format(blok_name, text))
if 'id' in node.keys() and not 'accessor' in node.keys():
node.set('accessor', node.get('id'))
for attribute_name, attribute_value in node.items():
attribute_name = getattr(blok, 'attribute_map', {}).get(attribute_name, attribute_name)
if attribute_name == 'accessor':
attribute_value = attribute_value.replace('-', '_')
attributes.append("{0} = AccessorAttribute(Text)".format(attribute_value))
lines.append('template._{0}_parent = {1}'.format(attribute_value, parent))
lines.append('template.{0} = {1}'.format(attribute_value, blok_name))
else:
lines.append('{0}["{1}"] = "{2}"'.format(blok_name, attribute_name.replace('"', '\\"'),
attribute_value.replace('"', '\\"')))
for child_node in node:
if child_node.tag in getattr(blok, 'blok_attributes', {}):
attached_child = "{0}.{1}".format(blok_name, blok.blok_attributes[child_node.tag].name)
for nested_child_node in child_node:
compile_node(nested_child_node, parent=attached_child)
attached_text = (child_node.text or "").strip().replace('"', '\\"')
if attached_text:
if 'text' in dir(blok.blok_attributes[child_node.tag].type):
lines.append('{0}.text = """{1}"""'.format(attached_child, attached_text))
else:
lines.append('{0}(Text("""{1}"""))'.format(attached_child, attached_text))
else:
compile_node(child_node, parent=blok_name)
tail = (child_node.tail or "").strip().replace('"', '\\"')
if tail:
lines.append('{0}(Text("""{1}"""))'.format(blok_name, tail))
compile_node(dom)
return SCRIPT_TEMPLATE.format(accessors=json.dumps(accessors),
attributes="\n{indent}".join(attributes).replace("{indent}", indent),
build_steps="\n{indent}".join(lines).replace("{indent}", indent),
indent=indent)
def _to_template(dom, factory=factory, start_on=None, ignore=(), **queries):
code = _to_python(dom, factory, indent=' ', start_on=start_on, ignore=ignore, **queries)
if Cython and hasattr(Cython, 'inline'):
name_space = Cython.inline(code)
else:
name_space = {}
exec(compile(code, '<string>', 'exec'), name_space)
return partial(name_space['build'], factory)
|
<gh_stars>0
#CREATING NEW CSV FILE TAKING AVERAGE FOR VARIOUS PARAMETERS FOR A SINGLE TIME PERIO
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
def dataExtraction(path):
obd = pd.read_csv(path,index_col=False)
#print(obd.columns)
'''
obd.columns = ['ENGINE_RUN_TINE', 'ENGINE_RPM', 'VEHICLE_SPEED',
'THROTTLE', 'ENGINE_LOAD', 'COOLANT_TEMPERATURE',
'LONG_TERM_FUEL_TRIM_BANK_1', 'SHORT_TERM_FUEL_TRIM_BANK_1',
'INTAKE_MANIFOLD_PRESSURE', 'FUEL_TANK', 'ABSOLUTE_THROTTLE_B',
'PEDAL_D', 'PEDAL_E', 'COMMANDED_THROTTLE_ACTUATOR',
'FUEL_AIR_COMMANDED_EQUIV_RATIO', 'ABSOLUTE_BAROMETRIC_PRESSURE',
'RELATIVE_THROTTLE_POSITION', 'INTAKE_AIR_TEMP',
'TIMING_ADVANCE', 'CATALYST_TEMPERATURE_BANK1_SENSOR1',
'CATALYST_TEMPERATURE_BANK1_SENSOR2', 'CONTROL_MODULE_VOLTAGE',
'COMMANDED_EVAPORATIVE_PURGE', 'TIME_RUN_WITH_MIL_ON',
'TIME_SINCE_TROUBLE_CODES_CLEARED',
'DISTANCE_TRAVELED_WITH_MIL_ON', 'WARM_UPS_SINCE_CODES_CLEARED'
]
'''
#print(obd.isna().sum())
obd = obd.drop(columns=['TIME_SINCE_TROUBLE_CODES_CLEARED ()','DISTANCE_TRAVELED_WITH_MIL_ON ()', 'WARM_UPS_SINCE_CODES_CLEARED ()'])
obd.dropna(inplace=True)
print(obd.head())
print(obd.tail())
return obd
def dataSmoothing(df):
time = np.array(df['ENGINE_RUN_TINE ()'])
l = 0
cnt = 0
new_df = pd.DataFrame()
for i in range(time.shape[0]-1):
if time[i+1] != time[i]:
prev = l+1
l = i
sub = df.loc[prev:l,:]
n = sub.shape[0]
s = dict()
for j in sub.columns:
s[j] = float(sub[j].sum())/n
data = pd.Series(s)
new_df = new_df.append(data,ignore_index=True)
else:
sub = df.loc[l+1:,:]
n = sub.shape[0]
s = dict()
for j in sub.columns:
s[j] = float(sub[j].sum())/n
data = pd.Series(s)
new_df = new_df.append(data,ignore_index=True)
new_df = new_df[df.columns]
return new_df
'''
for i in os.listdir('/home/rohit/UnsupervisedLearning/DataSet_2'):
obd = dataExtraction('/home/rohit/UnsupervisedLearning/DataSet_2/'+ i)
obd.to_csv(r'/home/rohit/UnsupervisedLearning/OBD_Dataset/'+ i, index = False)
'''
#for i in os.listdir('/home/rohit/UnsupervisedLearning/DataSet_2'):
obd = dataExtraction('/home/rohit/UnsupervisedLearning/DataSet_2/' + 'live06.csv')
#try:
#print(f'{i}')
new_obd = dataSmoothing(obd)
new_obd.to_csv('/home/rohit/UnsupervisedLearning/final_OBD/' + 'live06.csv' ,index=False)
#except(Exception):
# print(f"Error in {i}")
#obd.plot(x='ENGINE_RUN_TINE',y=['LONG_TERM_FUEL_TRIM_BANK_1', 'SHORT_TERM_FUEL_TRIM_BANK_1','COMMANDED_EVAPORATIVE_PURGE'])
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import sys,os
import string
import math
import argparse
def read_plugmap(filename):
debug=False
file=open(filename,"r")
doc={}
intypedef=False
indices={}
indices["HOLETYPE"]=8
indices["OBJECT"]=21
indices["ra"]=9
indices["dec"]=10
indices["xfoc"]=22
indices["yfoc"]=23
objects={}
for k in indices :
objects[k]=[]
for line in file.readlines() :
line=line.strip().replace('\t',' ')
if debug :
print "line: ",line
if len(line)==0 :
continue
if line[0]=="#" :
continue
if line.find("typedef")>=0 :
intypedef=True
if debug :
print "now in typedef"
continue
if intypedef and line.find("}")>=0 :
intypedef=False
if debug :
print "end of typedef"
continue
if intypedef :
continue
if line.find("PLUGMAPOBJ")>=0 :
tmp=line.split(" ")
entries=[]
for t in tmp :
if len(t)>0 :
entries.append(t)
for k in objects.keys() :
i=indices[k]
val=entries[i]
#print k,i,val
tmp=None
try :
tmp=string.atoi(val)
except ValueError :
pass
if tmp is None :
try :
val=string.atof(val)
except ValueError :
pass
if tmp is not None :
val=tmp
objects[k].append(val)
if debug :
print "added one PLUGMAPOBJ"
continue
tmp=line.strip().split(" ")
entries=[]
for t in tmp :
if len(t)>0 :
entries.append(t)
if len(entries)>=2 :
key=entries[0]
val=entries[1]
tmp=None
try :
tmp=string.atoi(val)
except ValueError :
pass
if tmp is None :
try :
val=string.atof(val)
except ValueError :
pass
if tmp is not None :
val=tmp
doc[key]=val
if debug :
print "added doc",key,val
# convert objects into np.array
for k in objects :
objects[k]=np.array(objects[k])
return doc,objects
class OpticalDistortion() :
def __init__(self,platescale) :
self.platescale=platescale # has units
# see ~/software/platedesign/trunk/pro/plate/ad2xyfocal.pro
coef=np.array([-0.000137627, -0.00125238, 1.5447e-09,
8.23673e-08, -2.74584e-13, -1.53239e-12,
6.04194e-18, 1.38033e-17, -2.97064e-23,
-3.58767e-23])
self.achromatic_distortion_pol=np.poly1d(coef[::-1])
# see ~/software/platedesign/trunk/pro/plate/apo_rdistort.pro
mm_per_rad =platescale*180/math.pi
self.chromatic_distort_radii=np.arcsin(np.linspace(0,90,10)*math.pi/(60*180))*mm_per_rad
print "RADII=",self.chromatic_distort_radii
self.chromatic_distort_wave=np.array([5300,4000,5500,6000,8000,10000,15350,15950,16550])
nw=self.chromatic_distort_wave.size
nr=self.chromatic_distort_radii.size
self.chromatic_distort=np.array([
[0.,36.26,72.53,108.84,145.18,181.53,217.90,254.29,290.77,327.44],
[0.,-0.002,-0.003,-0.004,-0.005,-0.005,-0.005,-0.004,-0.002,0.003],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.001,0.001,0.001,0.001,0.001,0.001,0.001,0.001,-0.001],
[0.,0.001,0.003,0.003,0.004,0.004,0.004,0.003,0.002,-0.003],
[0.,0.002,0.004,0.005,0.005,0.005,0.005,0.005,0.003,-0.004],
[0.,0.003,0.006,0.007,0.008,0.008,0.008,0.008,0.004,-0.006],
[0.,0.003,0.006,0.008,0.008,0.009,0.009,0.008,0.004,-0.006],
[0.,0.004,0.006,0.008,0.009,0.009,0.009,0.008,0.004,-0.007]])
# apply scaling
scale=np.zeros((nr))
scale[1:]=self.chromatic_distort_radii[1:]/self.chromatic_distort[0,1:]
self.chromatic_distort[1:] *= scale
self.chromatic_distort[0]=0.
# sort wave
ii=np.argsort(self.chromatic_distort_wave)
self.chromatic_distort_wave=self.chromatic_distort_wave[ii]
for j in range(nr) :
self.chromatic_distort[:,j]=self.chromatic_distort[ii,j]
# in ad2xyfocal, a reference wavelength of 5000A instead of 5500A is used !!
ref_distort = np.zeros((nr))
for j in range(nr) :
ref_distort[j]=np.interp(5000,self.chromatic_distort_wave,self.chromatic_distort[:,j])
self.chromatic_distort -= ref_distort
"""
plt.plot(self.chromatic_distort_wave,self.chromatic_distort[:,-1],"o-")
ww=np.linspace(4000,8000,200)*u.angstrom
r=self.chromatic_distort_radii[-1]
dd=np.zeros((ww.size))
for i in range(ww.size) :
dd[i]=self.chromatic_distortion(r,ww[i]).to(u.mm).value
plt.plot(ww,dd,c="r")
plt.show()
"""
def chromatic_distortion(self,radius,wavelength) : # with radius and wave with units , returns delta r to be added
i=np.where(self.chromatic_distort_wave>=wavelength)[0]
if i.size == 0 :
i=1
else :
i=min(max(1,i[0]),self.chromatic_distort_radii.size-1)
dist1=np.interp(radius,self.chromatic_distort_radii,self.chromatic_distort[i-1])
dist2=np.interp(radius,self.chromatic_distort_radii,self.chromatic_distort[i])
dist=np.interp(wavelength,[self.chromatic_distort_wave[i-1],self.chromatic_distort_wave[i]],[dist1,dist2])
return dist
def distortion(self,radius,wavelength) :
return self.achromatic_distortion_pol(radius) + self.chromatic_distortion(radius,wavelength)
# same result as idlutils/goddard/pro/astro/hadec2altaz.pro
# but with adr calibrated using astropy
def hadec2altaz(ha, dec, lat, wavelength=None) : # ha,dec,lat in deg, wave in a, returns alt,az
d2r = math.pi/180.
sh = math.sin(ha*d2r)
ch = math.cos(ha*d2r)
sd = math.sin(dec*d2r)
cd = math.cos(dec*d2r)
sl = math.sin(lat*d2r)
cl = math.cos(lat*d2r)
"""
x=np.array([cd*ch,cd*sh,sd])
r=np.array([[sl,0,-cl],[0,1,0],[cl,0,sl]])
x=r.dot(x)
x0=x[0]
x1=x[1]
x2=x[2]
"""
x0 = - ch * cd * sl + sd * cl
x1 = - sh * cd
x2 = ch * cd * cl + sd * sl
r=math.sqrt(x0**2+x1**2)
az = math.atan2(-x1,-x0) /d2r
alt = math.atan2(x2,r) / d2r
if wavelength is not None :
# arcsec per unit of tan(zenith)
fact=np.interp(wavelength,[3000,3500,4000,5000,5400,6000,7000,8000],[44.166347,43.365612,42.8640697818,42.292551282,42.1507465805,41.990386,41.811009,41.695723])
alt += fact*(r/x2)/3600.
return alt,az
# exact same routine as altaz2rpa in idl, needed to get same platescale definition
def altaz2xy(alt,az,altcen,azcen,platescale) :
d2r=math.pi/180
xx= -np.sin(az*d2r) * np.sin((90-alt)*d2r)
yy= -np.cos(az*d2r) * np.sin((90-alt)*d2r)
zz= np.cos((90-alt)*d2r)
xi= -xx*np.cos(azcen*d2r) + yy*np.sin(azcen*d2r)
yi= -yy*np.cos(azcen*d2r) - xx*np.sin(azcen*d2r)
zi= zz
xl= xi
yl= yi*np.sin((90-altcen)*d2r) + zi*np.cos((90-altcen)*d2r)
zl= zi*np.sin((90-altcen)*d2r) - yi*np.cos((90-altcen)*d2r)
rfocal=np.arcsin(np.sqrt(xl**2+zl**2))/d2r*platescale
posang=np.arctan2(-xl, zl)
return rfocal*np.cos(posang),rfocal*np.sin(posang)
def hadec2xy(ha,dec,alt0,az0,crot,srot,latitude,platescale,distortion,wavelength) :
alt,az = hadec2altaz(ha,dec,latitude,wavelength)
x,y = altaz2xy(alt,az,alt0,az0,platescale)
rscale = 1
if 1 :
# Distortion, see ad2xyfocal.pro
r = np.sqrt(x**2 + y**2)
if r>0 :
rscale = 1+distortion.distortion(r,wavelength)/r
# Rotate the focal plane so that +y points towards a point that is offset from
# the plate center along DEC by +1.5 degrees.
xr = rscale*(x*crot-y*srot)
yr = rscale*(x*srot+y*crot)
return -xr,yr,alt,az
def main() :
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str, default='plPlugMapP-4392.par',
help='Input plugmap filename.')
parser.add_argument('--output', type=str, default='myPlugMapP-4392.list',
help='Output filename.')
parser.add_argument('--ha', type=float, default=0,
help='Design hour angle (degrees).')
args = parser.parse_args()
filename = args.input
ofilename = args.output
ha_obs = args.ha
doc, objects = read_plugmap(filename)
ra=objects["ra"]
dec=objects["dec"]
xfoc=objects["xfoc"]
yfoc=objects["yfoc"]
ha_design=doc["ha"]
ra0=doc["raCen"]
dec0=doc["decCen"]
mjd=doc["mjdDesign"]
print "design MJD=%d HA=%f ra=%f dec=%f"%(mjd,ha_design,ra0,dec0)
# APO lat=32.7797556 in plate_refrac.pro
latitude=32.7797556
# optical distortion
# from platedesign/trunk/pro/plate/get_platescale.pro
platescale = 217.7358
distortion = OpticalDistortion(platescale)
# only reference for this wavelength I could find is in code platedesign/trunk/pro/plate/adr.pro
refwave=5400.0
gal=np.where(objects["OBJECT"]=="GALAXY")[0]
qso=np.where(objects["OBJECT"]=="QSO")[0]
star=np.where(objects["OBJECT"]=="SPECTROPHOTO_STD")[0]
na=np.where(objects["OBJECT"]=="NA")[0]
nobj=xfoc.size
wave_design=refwave*np.ones((nobj))
wave_design[gal]=5400.
wave_design[qso]=4000.
wave_design[star]=5400.
wave_obs=7450*np.ones((nobj))
wave_obs[gal]=7450. # to study r1/r2
wave_obs[qso]=7450.
wave_obs[star]=7450.
# for design
alt0_design,az0_design = hadec2altaz(ha_design, dec0, latitude, refwave)
print "Design ALT (ref wave)=",alt0_design
print "Design AZ (ref wave)=",az0_design
# rotation of plate to get vertical dec
altfid,azfid = hadec2altaz(ha_design, dec0+1.5, latitude, refwave)
xfid,yfid = altaz2xy(altfid,azfid,alt0_design,az0_design,platescale)
rotation_angle = np.arctan2(xfid,yfid)
crot_design = np.cos(rotation_angle)
srot_design = np.sin(rotation_angle)
# same for obs
alt0_obs,az0_obs = hadec2altaz(ha_obs, dec0, latitude, refwave)
print "Obs ALT (ref wave)=",alt0_obs
print "Obs AZ (ref wave)=",az0_obs
# rotation of plate to get vertical dec
altfid,azfid = hadec2altaz(ha_obs, dec0+1.5, latitude, refwave)
xfid,yfid = altaz2xy(altfid,azfid,alt0_obs,az0_obs,platescale)
rotation_angle = np.arctan2(xfid,yfid)
crot_obs = np.cos(rotation_angle)
srot_obs = np.sin(rotation_angle)
# compute, at design hour angle = ha_design
xdesign=np.zeros((nobj))
ydesign=np.zeros((nobj))
alt_design=np.zeros((nobj))
az_design=np.zeros((nobj))
# compute, at observed hour angle = ha_obs
xobs=np.zeros((nobj))
yobs=np.zeros((nobj))
alt_obs=np.zeros((nobj))
az_obs=np.zeros((nobj))
selection=range(nobj)
for o in selection :
x,y,alt,az = hadec2xy(ha_design-(ra[o]-ra0),dec[o],alt0_design,az0_design,crot_design,srot_design,latitude,platescale,distortion,wave_design[o])
xdesign[o] = x
ydesign[o] = y
alt_design[o] = alt
az_design[o] = az
x,y,alt,az = hadec2xy(ha_obs-(ra[o]-ra0),dec[o],alt0_obs,az0_obs,crot_obs,srot_obs,latitude,platescale,distortion,wave_obs[o])
xobs[o] = x
yobs[o] = y
alt_obs[o] = alt
az_obs[o] = az
file=open(ofilename,"w")
file.write("#ra dec xfoc yfoc wavedesign xdesign ydesign altdesign azdesign waveobs xobs yobs altobs azobs hole obj\n")
for o in selection :
file.write("%f %f %f %f %f %f %f %f %f %f %f %f %f %f %s %s\n"%(ra[o],dec[o],xfoc[o],yfoc[o],wave_design[o],xdesign[o],ydesign[o],alt_design[o],az_design[o],wave_obs[o],xobs[o],yobs[o],alt_obs[o],az_obs[o],objects["HOLETYPE"][o],objects["OBJECT"][o]))
file.close()
print "wrote", ofilename
if __name__ == '__main__':
main()
|
import time
import math
import os
import re
import string
from html import escape
import IPython
from IPython.core.magic import Magics, magics_class
from jinja2 import Template, StrictUndefined
from traitlets import Int, Unicode, Bool
DEFAULT_SCHEMA_TTL = -1
DEFAULT_CATALOGS = ''
VARIABLE_NOT_FOUND_MSG = '''
A Jinja template variable named {{{var_name}}} was located in your SQL statement.
However Jinja was unable to substitute it's value because the variable "{var_name}" was not found in your ipython kernel.
Option 1: If you intended to use a template variable make sure to assign a value to "{var_name}"
'''
HOW_TO_ESCAPE_MSG = '''
Option 2: If you intended to include "{{" in your statement then you'll need to escape this special Jinja variable delimiter.
To have Jinja ignore parts it would otherwise handle as variables or blocks. For example, if, with the default syntax, you want to use {{ as a raw string in a template and not start a variable, you have to use a trick.
The easiest way to output a literal variable delimiter "{{" is by using a variable expression:
{{ '{{' }}
For bigger sections, it makes sense to mark a block raw. For example, to include example Jinja syntax in a template, you can use this snippet:
%%trino --limit 3
{% raw %}
/*
This is a comment which happens to contain a jinja template
variable {{x}} that we want to keep as is.
*/
{% endraw %}
SELECT
*
FROM
{{ table_name }}
'''
RAISING_ERROR_MSG = "Raising an error to prevent statement from being executed incorrectly."
class ExplainUndefined(StrictUndefined):
__slots__ = ()
def __str__(self) -> str:
print(VARIABLE_NOT_FOUND_MSG.format(var_name=self._undefined_name))
print(HOW_TO_ESCAPE_MSG)
print(RAISING_ERROR_MSG)
return super().__str__(self)
@magics_class
class Base(Magics):
limit = Int(20, config=True, help='The maximum number of rows to display')
cacheTTL = Int(DEFAULT_SCHEMA_TTL, config=True, help=f'Re-generate output schema file if older than time specified (defaults to {DEFAULT_SCHEMA_TTL} minutes)')
catalogs = Unicode(DEFAULT_CATALOGS, config=True, help=f'Retrive schema from the specified list of catalogs (defaults to "{DEFAULT_CATALOGS}")')
interactive = Bool(False, config=True, help='Display results in interactive grid')
outputFile = Unicode('', config=True, help='Output schema to specified file')
def __init__(self, shell=None, **kwargs):
super().__init__(shell, **kwargs)
self.user_ns = {}
@staticmethod
def bind_variables(query, user_ns):
template = Template(query, undefined=ExplainUndefined)
return template.render(user_ns)
def get_catalog_array(self):
catalog_array = []
if ',' in self.catalogs:
catalog_array = self.catalogs.split(',')
return catalog_array
def get_sql_statement(self, cell, sql_argument, use_jinja):
sql = cell
if cell is None:
sql = ' '.join(sql_argument)
if not sql:
print('No sql statement to execute')
elif use_jinja:
sql = self.bind_variables(sql, self.user_ns)
return sql
def set_user_ns(self, local_ns):
if local_ns is None:
local_ns = {}
self.user_ns = self.shell.user_ns.copy()
self.user_ns.update(local_ns)
@staticmethod
def should_update_schema(schema_file_name, refresh_threshold):
file_exists = os.path.isfile(schema_file_name)
ttl_expired = False
if file_exists:
file_time = os.path.getmtime(schema_file_name)
current_time = time.time()
if current_time - file_time > (refresh_threshold * 60):
print(f'TTL {refresh_threshold} minutes expired, re-generating schema file: {schema_file_name}')
ttl_expired = True
return (not file_exists) or ttl_expired
def display_sql(self, sql):
def _jupyterlab_repr_html_(self):
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
fmt = HtmlFormatter()
style = "<style>{}\n{}</style>".format(
fmt.get_style_defs(".output_html"), fmt.get_style_defs(".jp-RenderedHTML")
)
return style + highlight(self.data, self._get_lexer(), fmt)
# Replace _repr_html_ with our own version that adds the 'jp-RenderedHTML' class
# in addition to 'output_html'.
IPython.display.Code._repr_html_ = _jupyterlab_repr_html_
return IPython.display.Code(data=sql, language="mysql")
|
<filename>pythonscripts/comix.py
'''
This iteration of comix stripper uses the retrieve module which tends to crash
a lot less than urllib when grabbing jpeg images or just large files in
general. I started writing doc strings but haven't finished. Currently the
program has to create a directory called Comix in your home directory in order
to work and it only works on Mac, Linux, and other Unix based systems. Sorry
Windows. Additionally the program only works with the website mangahere.co
and is pretty dependent on their html schema. The base directory that the
program asks for i.e. the comic URL is the one that lists all the issues of a
particular comic.
'''
import os, requests, zipfile, shutil, time, thread, errno, signal
from functools import wraps
from lxml import html
home = os.path.join(os.path.expanduser("~"), "Comix")
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
def tranBool(response):
'''
(str) -> bool
Translate a single string to a boolean response using common set of words
that are often used to denote an affirmative response.
'''
affirmative = ['t', 'true', 'yes', 'y', '1', 'yeah', 'yup', 'fo sho']
return response.lower() in affirmative
def pstrip(response):
'''
(str) -> str
Remove the trailing forward slash from a URL if it is there.
'''
if response[-1] == '/':
return response[:-1]
else:
return response
def baseEXT(add):
'''
(str) -> str, str
Given a URL returns the base html from which information is obtained such
as the number of comics and the links where they are found as well as the
name of the comic to be used as a directory name.
'''
return '/'.join(add.split('/')[:-2]), add.split('/')[-1]
def readMeta():
'''
(None) -> str
Read a URL from the hidden file in the base Comix directory.
'''
f = open(os.path.join(home, ".comixMeta.txt"), 'r')
temp = f.readline().strip()
f.close()
return temp
def writeMeta(URL):
'''
(str) -> None
Write a URL from the hidden file in the base Comix directory.
'''
f = open(os.path.join(home, ".comixMeta.txt"), 'w')
f.writelines(URL)
f.close()
def input_thread(L):
'''
(list) -> None
Helper function used to cease the program after the current comic is
finished downloading.
'''
raw_input()
L.append(None)
def write_img(url_, file_):
'''
(str, str) -> None
given a url and file write the image from the URL to the file.
'''
with open(file_, 'wb') as handle:
response = requests.get(url_, stream=True)
while not response.ok:
print "Something fucked up retrying"
response = requests.get(url_, stream=True)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
def intro():
if not os.path.isdir(home):
os.mkdir(home)
print 'Welcome to Comic Stripper!\n'
print 'Is this a download session?\n'
download = tranBool(raw_input('> '))
cont = False
if os.path.exists(os.path.join(home, ".comixMeta.txt")):
print 'The last comic url you used was %s,\n' % readMeta()
print 'Would you like to continue that session?'
cont = tranBool(raw_input('> '))
if cont:
add = readMeta()
else:
print 'What is the URL for the comics?\n'
add = pstrip(raw_input('> '))
writeMeta(add)
base, comic = baseEXT(add)
path = os.path.join(home, comic)
if download:
print 'Which number comic would you like to start with?'
cNum = int(raw_input('> ')) - 1
return download, add, base, comic, cNum, path
else:
return download, add, base, comic, None, path
# make base directory
def makePath(comic):
path = os.path.join(home, comic)
try:
os.makedirs(path)
print 'Base Directory Created!'
except:
print 'Base Directory Already there!'
# get base html
def getHTML(url):
f = requests.get(url)
return html.fromstring(f.text)
# get each chapters html
def getCH(baseHTML, add, base):
comic = os.path.normpath (add).split ('/') [-1]
chap = [ch.get ('href') [0:-1] for ch in baseHTML.xpath
('//a[@class="color_0077"]') if comic in ch.get('href')]
return [ch if ch[0] == 'h' else base + ch for ch in chap][::-1]
# build a directory with jpeg images
def makeChDir(chapURL, comic):
keyPh = 'var total_pages = '
ext = chapURL.split('/c')[-1].zfill(3)
nDir = os.path.join(home, comic, "c"+ext)
if os.path.exists(nDir + '.zip') or os.path.exists(nDir + '.cbz'):
print nDir + " already zipped!!!"; return
try:
os.makedirs(nDir)
except:
print nDir + " directory is already there!!!"; return
tTree = getHTML (chapURL)
js = [sc for sc in tTree.xpath ('//script[@type="text/javascript"]/text()')
if keyPh in sc] [0]
st = js.index(keyPh) + len (keyPh)
num = int(js[st:].split(' ') [0])
for i in range(1,num + 1):
tries = 0
success = False
while tries < 10 and not success:
try:
get_image(i, chapURL, nDir)
success = True
except:
tries += 1
print nDir + " successfully created!!"
@timeout(20)
def get_image(i, chapURL, nDir):
page = requests.get(''.join([chapURL, '/', str(i), '.html']))
stTree = html.fromstring(page.text)
jFile = [img.get('src') for img in stTree.xpath('//img')][0]
write_img(jFile, os.path.join(nDir, str(i).rjust(3, '0') + '.jpg'))
# make a function that builds multiple chapter directries
def makeAllCh(baseHTML, cNum, add, base, comic):
cURL = getCH(baseHTML, add, base)
L = []
thread.start_new_thread(input_thread, (L,))
for u in cURL[cNum:]:
time.sleep(.1)
if L:
break
makeChDir(u, comic)
return
# zipping function that zips a directory idk why this isnt a thing already
def zipdir(path, zip):
for root, dirs, files in os.walk(path):
for f in files:
zip.write(os.path.join(root, f))
# zip all directories in a path
def zipAll(path):
dirs = [os.path.join(path, d) for d in os.listdir(path)
if not d.startswith('.') and os.path.isdir(path + '/' + d)]
for d in dirs:
tempZ = zipfile.ZipFile(d + '.zip', 'w')
zipdir(d, tempZ)
shutil.rmtree(d)
return
# convert zip files to cbz files
def conv2CBZ(path):
for z in [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.zip')]:
os.rename(z, z[:-3] + 'cbz')
return
# The function that does it all
def doAll():
download, add, base, comic, cNum, path = intro()
makePath(comic)
if download:
baseHTML = getHTML(add)
makeAllCh(baseHTML, cNum, add, base, comic)
zipAll(path)
conv2CBZ(path)
print 'See yah!'
return
doAll()
|
<reponame>Znerual/FastLogin
import logging
import urllib.request
import urllib.parse
import http.cookiejar
import json
from requests import requests
from configuration import Configuration
def logIn():
# ---! Data !---
logging.basicConfig(filename='log',level=logging.DEBUG)
logging.debug("Assembling the data")
url = 'https://iu.zid.tuwien.ac.at/AuthServ.portal'
conf = Configuration()
usn, pw = conf.getUsernamePassword('Anil')
values = {'name' : usn,
'pw' : pw,
'totp' : '',
'app' : '77'}
#We have to get the cooky there. In the browser, a link to the cooky is send in the header, but the link changes dynamicly
#so we have to find our own way to get and save the cooky
header = {
"Host":"iu.zid.tuwien.ac.at",
"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language":"en-US,en;q=0.5",
"Accept-Encoding":"gzip, deflate, br",
"Connection":"keep-alive",
"Upgrade-Insecure-Requests":"1"
}
# ---! End Of Data !---
data = urllib.parse.urlencode(values)
data = data.encode('UTF-8') # data should be bytes
logging.debug("parse Data " + str(values))
# ---! urllib attempt, (including Cookiejar) !---
# cj = http.cookiejar.CookieJar()
# opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
#request = urllib.request.Request(url, data)
#response = opener.open(request)
# response.headers.add_header('Set-Cookie',
# 'Life=ok; expires=Sat, 02-Apr-2017 18:23:03 GMT; path=/; domain=' + header['Host'] +'; HttpOnly')
# the_page = response.read().decode('utf8', 'ignore') #converts the result to utf8 text
# cj.extract_cookies(response, request)
#for cookie in cj:
#print(cookie)
# ---! requests lib attempt (Libaray in folder requests) !---
# see http://www.pythonforbeginners.com/requests/using-requests-in-python
# and better: http://engineering.hackerearth.com/2014/08/21/python-requests-module/
#with requests.Session() as s:
# req = requests.Request('POST', url, data=data, headers=header)
# prepped = s.prepare_request(req)
# resp = s.send(prepped)
# print(resp.text)
#r = requests.post(url, data=json.dumps(data), headers=header)
with requests.Session() as session:
logging.debug("Opened the request for the url: " + url + " with the data " + str(data))
session.post(url, data=data)
r = session.get("https://iu.zid.tuwien.ac.at/AuthServ.portal")
print(r.text)
registerCourse("j_id_43:0:j_id_8m", session)
def registerCourse(courseId, session):
url = "https://tiss.tuwien.ac.at/education/course/examDateList.xhtml"
values = {
"examDateListForm" + courseId : "Anmelden",
"examDateListForm_SUBMIT" : "1"
}
data = urllib.parse.urlencode(values)
data = data.encode('UTF-8') # data should be bytes
logging.debug("parse Data " + str(values))
session.post(url, data=data)
r = session.get(url)
print(r.text)
url2 = "https://tiss.tuwien.ac.at/education/course/register.xhtml"
values2 = {
"regForm:j_id_2j" : "Anmelden",
"regForm_SUBMIT": "1"
}
data2 = urllib.parse.urlencode(values2)
data2 = data2.encode('UTF-8') # data should be bytes
logging.debug("parse Data " + str(values2))
session.post(url2, data=data2)
r2 = session.get(url2)
print(r2.text)
'''
regForm:j_id_2j:Anmelden
regForm_SUBMIT:1
javax.faces.ViewState:txSJFVo5dOyZn2VTDZIm84DdNWHA3EP29bLGxP+fcPhyZmO/
javax.faces.ClientWindow:7120
dspwid:7120
'''
def urlibTest():
with urllib.request.urlopen('https://docs.python.org/2/howto/urllib2.html') as response:
the_page = response.read()
print(the_page)
logIn() |
from orchestra.models import Iteration
from orchestra.models import Task
from orchestra.models import TaskAssignment
from orchestra.utils.task_properties import current_assignment
from orchestra.utils.task_properties import get_iteration_history
def verify_iterations(task_id):
task = Task.objects.get(id=task_id)
iterations = list(get_iteration_history(task).all())
if iterations:
_verify_iteration_topology(iterations)
_verify_iteration_data(iterations)
_verify_iteration_datetimes(iterations)
def _verify_iteration_topology(iterations):
# First iteration should belong to first assignment
expected_counter = 0
visited_counters = set()
task = iterations[0].assignment.task
for i, iteration in enumerate(iterations):
assignment = iteration.assignment
assignment_counter = assignment.assignment_counter
assert assignment_counter == expected_counter
visited_counters.add(assignment.assignment_counter)
if i == len(iterations) - 1:
_verify_final_iteration(iteration)
else:
# Only the last iteration (if any) should be processing
assert iteration.status != Iteration.Status.PROCESSING
# Status of current iteration determines the expected review level
# of the next one's assignment
if iteration.status == Iteration.Status.REQUESTED_REVIEW:
expected_counter = assignment_counter + 1
elif iteration.status == Iteration.Status.PROVIDED_REVIEW:
expected_counter = assignment_counter - 1
# Iterations should span all assignments
assert visited_counters == set(range(task.assignments.count()))
def _verify_final_iteration(iteration):
# Last iteration should belong to current assignment
assignment = iteration.assignment
assert assignment == current_assignment(assignment.task)
# Map final iteration statuses onto task statuses
task_statuses = {
Iteration.Status.PROCESSING: [
Task.Status.PROCESSING, Task.Status.REVIEWING,
Task.Status.POST_REVIEW_PROCESSING
],
Iteration.Status.REQUESTED_REVIEW: [
Task.Status.PENDING_REVIEW, Task.Status.COMPLETE
],
Iteration.Status.PROVIDED_REVIEW: [
Task.Status.POST_REVIEW_PROCESSING
]
}
# A task awaiting processing should not have iterations
assignment.task.status != Task.Status.AWAITING_PROCESSING
for k, v in task_statuses.items():
# An aborted task could have any iteration configuration
task_statuses[k].append(Task.Status.ABORTED)
if iteration.status == Iteration.Status.PROCESSING:
expected_assignment_status = TaskAssignment.Status.PROCESSING
else:
expected_assignment_status = TaskAssignment.Status.SUBMITTED
# Check that task and assignment statuses are correctly set
assert assignment.status == expected_assignment_status
assert assignment.task.status in task_statuses[iteration.status]
def _verify_iteration_data(iterations):
"""
Verifies correct data for certain iterations.
Since the data for other iterations won't be stored elsewhere, this
function should be run each time an iteration is added.
"""
for iteration in iterations:
if iteration.status == Iteration.Status.PROCESSING:
# Iterations should not have data until submitted
assert iteration.submitted_data == {}
# NOTE(jrbotros): Last iteration for an assignment will normally
# have its latest data, unless the task has been reverted
def _verify_iteration_datetimes(iterations):
"""
Verifies correct start and end datetimes for ordered iterations.
"""
for iteration in iterations:
assignment = iteration.assignment
siblings = assignment.iterations.order_by('start_datetime')
if siblings.first() == iteration:
# If iteration is first in assignment, expected start datetime
# is when the assignment was picked up rather than the end of
# the previous iteration
expected_start_datetime = assignment.start_datetime
assert iteration.start_datetime == expected_start_datetime
# The expected start datetime for the next iteration should be the
# end datetime of the current one, unless the next iteration is the
# first in its assignment
expected_start_datetime = iteration.end_datetime
# If iteration is processing, it should not have an end datetime
if iteration.status == Iteration.Status.PROCESSING:
assert not iteration.end_datetime
|
#!/usr/bin/env python
from optparse import OptionParser
import os
import re
import sys
import logging
LOGGER = logging.getLogger(__name__)
def main(cmdline=None):
parser = make_parser()
opts, args = parser.parse_args(cmdline)
error_happened = False
for filename in args[1:]:
stream = open(filename, 'r')
if opts.fastq:
errors = validate_fastq(stream,
opts.format,
opts.uniform_lengths,
opts.max_errors)
if errors > 0:
LOGGER.error("%s failed validation", filename)
error_happened = True
stream.close()
if error_happened:
return 1
return 0
def make_parser():
parser = OptionParser()
parser.add_option("--fastq", action="store_true", default=False,
help="verify arguments are valid fastq file")
parser.add_option("--uniform-lengths", action="store_true", default=False,
help="require all reads to be of the same length")
parser.add_option("--max-errors", type="int", default=None)
encodings=['phred33', 'phred64']
parser.add_option("--format", type="choice",
choices=encodings,
default='phred64',
help="choose quality encoding one of: %s" % (", ".join(encodings)))
return parser
def validate_fastq(stream, format='phred33', uniform_length=False, max_errors=None):
"""Validate that a fastq file isn't corrupted
uniform_length - requires that all sequence & qualities must be
the same lengths.
returns number of errors found
"""
FQ_NONE = 0
FQ_H1 = 1
FQ_SEQ = 2
FQ_H2 = 3
FQ_QUAL = 4
h1_re = re.compile("^@[\s\w:-]*$")
seq_re = re.compile("^[AGCT.N]+$", re.IGNORECASE)
h2_re = re.compile("^\+[\s\w:-]*$")
phred33 = re.compile("^[!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJ]+$")
phred64 = re.compile("^[@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh]+$")
if format == 'phred33':
quality_re = phred33
elif format == 'phred64':
quality_re = phred64
else:
raise ValueError("Unrecognized quality format name")
state = FQ_H1
length = None
line_number = 1
errors = 0
for line in stream:
line = line.rstrip()
len_errors = 0
if state == FQ_H1:
# reset length at start of new record for non-uniform check
if not uniform_length:
length = None
# start of record checks
errors += validate_re(h1_re, line, line_number, "FAIL H1")
state = FQ_SEQ
elif state == FQ_SEQ:
errors += validate_re(seq_re, line, line_number, "FAIL SEQ")
length, len_errors = validate_length(line, length, line_number,
"FAIL SEQ LEN")
errors += len_errors
state = FQ_H2
elif state == FQ_H2:
errors += validate_re(h2_re, line, line_number, "FAIL H2")
state = FQ_QUAL
elif state == FQ_QUAL:
errors += validate_re(quality_re, line, line_number, "FAIL QUAL")
length, len_errors = validate_length(line, length, line_number,
"FAIL QUAL LEN")
errors += len_errors
state = FQ_H1
else:
raise RuntimeError("Invalid state: %d" % (state,))
line_number += 1
if max_errors is not None and errors > max_errors:
break
return errors
def validate_re(pattern, line, line_number, errmsg):
if pattern.match(line) is None:
LOGGER.error("%s [%d]: %s", errmsg, line_number, line)
return 1
else:
return 0
def validate_length(line, line_length, line_number, errmsg):
"""
if line_length is None, sets it
"""
error_count = 0
if line_length is None:
line_length = len(line)
elif len(line) != line_length:
LOGGER.error("%s %d: %s", errmsg, line_number, line)
error_count = 1
return line_length, error_count
|
from maestro.core.metadata import VectorClock
from maestro.core.utils import make_hashable
from enum import Enum
from typing import List, Any, Union, Optional
import copy
class Comparator(Enum):
"""Represents a comparison operation that can be performed in a field."""
EQUALS = "=="
NOT_EQUALS = "!="
LESS_THAN = "<"
LESS_THAN_OR_EQUALS = "<="
GREATER_THAN = ">"
GREATER_THAN_OR_EQUALS = ">="
IN = "in"
class Comparison:
"""Stores a comparison that can be done to a field, such as field1 > 2, field1 == 3, etc."""
field_name: "str"
comparator: "Comparator"
value: "Any"
def __init__(self, field_name: "str", comparator: "Comparator", value: "Any"):
self.field_name = field_name
self.comparator = comparator
self.value = value
def __str__(self):
return f"{self.field_name} {self.comparator.value} {self.value}"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __hash__(self):
return hash((self.__class__, self.comparator, make_hashable(self.value)))
def __eq__(self, other):
return (
isinstance(other, Comparison)
and self.comparator == other.comparator
and self.value == other.value
)
class Connector(Enum):
"""Represents the connection between filters when they are combined."""
AND = "AND"
OR = "OR"
class Filter:
"""Represents a filtering operation. The combined filters are stored in a tree where the leaves are always Comparison objects.
Attributes:
children (List[Union[Filter, Comparison]]): These are the nested filters that were combined into this one. If this is a single
non-combined filter, it will contain a single Comparison instance.
connector (TYPE): The connection between this filter's children. If this is a single non-combined filter, its operator will be
equal to Connector.AND.
"""
connector: "Connector"
children: "List[Union[Filter, Comparison]]"
def __init__(
self,
children: "List[Union[Filter, Comparison]]",
connector: "Connector" = Connector.AND,
):
self.children = children
self.connector = connector
def add(self, child: "Filter"):
"""Add another child to this filter.
Args:
child (Filter): the filter being added.
"""
if child in self.children:
return
if child.connector == self.connector or len(child) == 1:
self.children.extend(child.children)
else:
self.children.append(child)
def combine(self, other: "Filter", connector: "Connector") -> "Filter":
"""Combines two filters using the given connector.
Args:
other (Filter): The filter being combined into this one
connector (Connector): The connector to be used.
Returns:
Filter: The combined filter
Raises:
TypeError: If the instance passed is not a Filter
"""
if not isinstance(other, Filter):
raise TypeError(other)
if not other:
return copy.deepcopy(self)
elif not self:
return copy.deepcopy(other)
combined = Filter(connector=connector, children=[])
combined.add(self)
combined.add(other)
return combined
def __or__(self, other: "Filter") -> "Filter":
return self.combine(other, Connector.OR)
def __and__(self, other: "Filter") -> "Filter":
return self.combine(other, Connector.AND)
def __str__(self):
return "(%s: %s)" % (
self.connector.value,
", ".join(str(child) for child in self.children),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __len__(self):
"""Return the number of children this node has."""
return len(self.children)
def __bool__(self):
"""Return whether or not this node has children."""
return bool(self.children)
def __contains__(self, other):
"""Return True if 'other' is a direct child of this instance."""
return other in self.children
def __eq__(self, other):
return (
isinstance(other, Filter)
and self.connector == other.connector
and self.children == other.children
)
def __hash__(self):
return hash((self.__class__, self.connector, *make_hashable(self.children),))
class SortOrder:
"""Stores an ordering instruction for a particular field"""
field_name: "str"
descending: "bool"
def __init__(self, field_name: "str", descending: "bool" = False):
self.field_name = field_name
self.descending = descending
def __hash__(self):
return hash((self.field_name, self.descending))
def __str__(self):
return f"{self.field_name} -> {'ASC' if not self.descending else 'DESC'}"
def __repr__(self):
return self.__str__()
class Query:
"""Represents a query with an optional filter and an ordering.
Attributes:
filter (Filter): The filter that needs to be applied
ordering (List[SortOrder]): The sort order that should be applied
"""
filter: "Filter"
ordering: "List[SortOrder]"
entity_name: "str"
limit: "Optional[Any]"
offset: "Optional[Any]"
def __init__(
self,
entity_name: "str",
filter: "Filter",
ordering: "List[SortOrder]",
limit: "Optional[Any]",
offset: "Optional[Any]",
):
if limit is not None:
assert ordering, "Can't define limit for unordered query!"
if offset is not None:
assert ordering, "Can't define offset for unordered query!"
self.entity_name = entity_name
self.filter = filter
self.ordering = ordering
self.limit = limit
self.offset = offset
def __repr__(self):
return self.__str__()
def __str__(self):
return f"Query(entity_name='{self.entity_name}', filter={self.filter}, ordering={self.ordering}, limit={self.limit}, offset={self.offset})"
def __hash__(self):
return hash(
(
self.filter,
tuple(self.ordering),
self.entity_name,
self.limit,
self.offset,
)
)
def get_id(self):
"""Returns a unique identifier for this query."""
return str(self.__hash__())
class TrackedQuery:
query: "Query"
vector_clock: "VectorClock"
def __init__(self, query: "Query", vector_clock: "VectorClock"):
self.query = query
self.vector_clock = vector_clock
def __repr__(self):
return f"TrackedQuery(query={self.query}, vector_clock={self.vector_clock})"
|
<gh_stars>0
import os
import numpy as np
import torch as t
from jukebox.hparams import Hyperparams
from jukebox.utils.torch_utils import empty_cache
from jukebox.utils.audio_utils import save_wav, load_audio
from jukebox.make_models import make_model
from jukebox.align import get_alignment
from jukebox.save_html import save_html
from jukebox.utils.sample_utils import split_batch, get_starts
from jukebox.utils.dist_utils import print_once
import fire
# Sample a partial window of length<n_ctx with tokens_to_sample new tokens on level=level
def sample_partial_window(zs, labels, sampling_kwargs, level, prior, tokens_to_sample, hps):
z = zs[level]
n_ctx = prior.n_ctx
current_tokens = z.shape[1]
if current_tokens < n_ctx - tokens_to_sample:
sampling_kwargs['sample_tokens'] = current_tokens + tokens_to_sample
start = 0
else:
sampling_kwargs['sample_tokens'] = n_ctx
start = current_tokens - n_ctx + tokens_to_sample
return sample_single_window(zs, labels, sampling_kwargs, level, prior, start, hps)
# Sample a single window of length=n_ctx at position=start on level=level
def sample_single_window(zs, labels, sampling_kwargs, level, prior, start, hps):
n_samples = hps.n_samples
n_ctx = prior.n_ctx
end = start + n_ctx
# get z already sampled at current level
z = zs[level][:,start:end]
if 'sample_tokens' in sampling_kwargs:
# Support sampling a window shorter than n_ctx
sample_tokens = sampling_kwargs['sample_tokens']
else:
sample_tokens = (end - start)
conditioning_tokens, new_tokens = z.shape[1], sample_tokens - z.shape[1]
print_once(f"Sampling {sample_tokens} tokens for [{start},{start+sample_tokens}]. Conditioning on {conditioning_tokens} tokens")
if new_tokens <= 0:
# Nothing new to sample
return zs
# get z_conds from level above
z_conds = prior.get_z_conds(zs, start, end)
# set y offset, sample_length and lyrics tokens
y = prior.get_y(labels, start)
empty_cache()
max_batch_size = sampling_kwargs['max_batch_size']
del sampling_kwargs['max_batch_size']
z_list = split_batch(z, n_samples, max_batch_size)
z_conds_list = split_batch(z_conds, n_samples, max_batch_size)
y_list = split_batch(y, n_samples, max_batch_size)
z_samples = []
for z_i, z_conds_i, y_i in zip(z_list, z_conds_list, y_list):
z_samples_i = prior.sample(n_samples=z_i.shape[0], z=z_i, z_conds=z_conds_i, y=y_i, **sampling_kwargs)
z_samples.append(z_samples_i)
z = t.cat(z_samples, dim=0)
sampling_kwargs['max_batch_size'] = max_batch_size
# Update z with new sample
z_new = z[:,-new_tokens:]
zs[level] = t.cat([zs[level], z_new], dim=1)
return zs
# Sample total_length tokens at level=level with hop_length=hop_length
def sample_level(zs, labels, sampling_kwargs, level, prior, total_length, hop_length, hps):
print_once(f"Sampling level {level}")
if total_length >= prior.n_ctx:
for start in get_starts(total_length, prior.n_ctx, hop_length):
zs = sample_single_window(zs, labels, sampling_kwargs, level, prior, start, hps)
else:
zs = sample_partial_window(zs, labels, sampling_kwargs, level, prior, total_length, hps)
return zs
# Sample multiple levels
def _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps):
alignments = None
for level in reversed(sample_levels):
prior = priors[level]
prior.cuda()
empty_cache()
# Set correct total_length, hop_length, labels and sampling_kwargs for level
assert hps.sample_length % prior.raw_to_tokens == 0, f"Expected sample_length {hps.sample_length} to be multiple of {prior.raw_to_tokens}"
total_length = hps.sample_length//prior.raw_to_tokens
hop_length = int(hps.hop_fraction[level]*prior.n_ctx)
zs = sample_level(zs, labels[level], sampling_kwargs[level], level, prior, total_length, hop_length, hps)
prior.cpu()
empty_cache()
# Decode sample
x = prior.decode(zs[level:], start_level=level, bs_chunks=zs[level].shape[0])
logdir = f"{hps.name}/level_{level}"
if not os.path.exists(logdir):
os.makedirs(logdir)
t.save(dict(zs=zs, labels=labels, sampling_kwargs=sampling_kwargs, x=x), f"{logdir}/data.pth.tar")
save_wav(logdir, x, hps.sr)
if alignments is None and priors[-1] is not None and priors[-1].n_tokens > 0:
alignments = get_alignment(x, zs, labels[-1], priors[-1], sampling_kwargs[-1]['fp16'], hps)
save_html(logdir, x, zs, labels[-1], alignments, hps)
return zs
# Generate ancestral samples given a list of artists and genres
def ancestral_sample(labels, sampling_kwargs, priors, hps):
sample_levels = list(range(len(priors)))
zs = [t.zeros(hps.n_samples,0,dtype=t.long, device='cuda') for _ in range(len(priors))]
zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps)
return zs
# Continue ancestral sampling from previously saved codes
def continue_sample(zs, labels, sampling_kwargs, priors, hps):
sample_levels = list(range(len(priors)))
zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps)
return zs
# Upsample given already generated upper-level codes
def upsample(zs, labels, sampling_kwargs, priors, hps):
sample_levels = list(range(len(priors) - 1))
zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps)
return zs
# Prompt the model with raw audio input (dimension: NTC) and generate continuations
def primed_sample(x, labels, sampling_kwargs, priors, hps):
sample_levels = list(range(len(priors)))
zs = priors[-1].encode(x, start_level=0, end_level=len(priors), bs_chunks=x.shape[0])
zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps)
return zs
# Load `duration` seconds of the given audio files to use as prompts
def load_prompts(audio_files, duration, hps):
xs = []
for audio_file in audio_files:
x = load_audio(audio_file, sr=hps.sr, duration=duration, offset=0.0, mono=True)
x = x.T # CT -> TC
xs.append(x)
while len(xs) < hps.n_samples:
xs.extend(xs)
xs = xs[:hps.n_samples]
x = t.stack([t.from_numpy(x) for x in xs])
x = x.to('cuda', non_blocking=True)
return x
# Load codes from previous sampling run
def load_codes(codes_file, duration, priors, hps):
data = t.load(codes_file, map_location='cpu')
zs = [z.cuda() for z in data['zs']]
assert zs[-1].shape[0] == hps.n_samples, f"Expected bs = {hps.n_samples}, got {zs[-1].shape[0]}"
del data
if duration is not None:
# Cut off codes to match duration
top_raw_to_tokens = priors[-1].raw_to_tokens
assert duration % top_raw_to_tokens == 0, f"Cut-off duration {duration} not an exact multiple of top_raw_to_tokens"
assert duration//top_raw_to_tokens <= zs[-1].shape[1], f"Cut-off tokens {duration//priors[-1].raw_to_tokens} longer than tokens {zs[-1].shape[1]} in saved codes"
zs = [z[:,:duration//prior.raw_to_tokens] for z, prior in zip(zs, priors)]
return zs
# Generate and save samples, alignment, and webpage for visualization.
def save_samples(model, device, hps, sample_hps):
print(hps)
from jukebox.lyricdict import poems, gpt_2_lyrics
vqvae, priors = make_model(model, device, hps)
assert hps.sample_length//priors[-2].raw_to_tokens >= priors[-2].n_ctx, f"Upsampling needs atleast one ctx in get_z_conds. Please choose a longer sample length"
total_length = hps.total_sample_length_in_seconds * hps.sr
offset = 0
# Set artist/genre/lyrics for your samples here!
# We used different label sets in our models, but you can write the human friendly names here and we'll map them under the hood for each model.
# For the 5b/5b_lyrics model and the upsamplers, labeller will look up artist and genres in v2 set. (after lowercasing, removing non-alphanumerics and collapsing whitespaces to _).
# For the 1b_lyrics top level, labeller will look up artist and genres in v3 set (after lowercasing).
metas = [dict(artist = "<NAME>",
genre = "Country",
lyrics = poems['ozymandias'],
total_length=total_length,
offset=offset,
),
dict(artist="<NAME>",
genre="Blues Rock",
lyrics=gpt_2_lyrics['hottub'],
total_length=total_length,
offset=offset,
),
dict(artist="<NAME>",
genre="Classic Pop",
lyrics=gpt_2_lyrics['alone'],
total_length=total_length,
offset=offset,
),
dict(artist="<NAME>",
genre="Jazz",
lyrics=gpt_2_lyrics['count'],
total_length=total_length,
offset=offset,
),
dict(artist="<NAME>",
genre="Pop",
lyrics=gpt_2_lyrics['darkness'],
total_length=total_length,
offset=offset,
),
]
while len(metas) < hps.n_samples:
metas.extend(metas)
metas = metas[:hps.n_samples]
labels = [prior.labeller.get_batch_labels(metas, 'cuda') for prior in priors]
for label in labels:
assert label['y'].shape[0] == hps.n_samples
lower_level_chunk_size = 32
lower_level_max_batch_size = 16
if model == '1b_lyrics':
chunk_size = 32
max_batch_size = 16
else:
chunk_size = 16
max_batch_size = 3
sampling_kwargs = [dict(temp=0.99, fp16=True, chunk_size=lower_level_chunk_size, max_batch_size=lower_level_max_batch_size),
dict(temp=0.99, fp16=True, chunk_size=lower_level_chunk_size, max_batch_size=lower_level_max_batch_size),
dict(temp=0.99, fp16=True, chunk_size=chunk_size, max_batch_size=max_batch_size)]
if sample_hps.mode == 'ancestral':
ancestral_sample(labels, sampling_kwargs, priors, hps)
elif sample_hps.mode in ['continue', 'upsample']:
assert sample_hps.codes_file is not None
top_raw_to_tokens = priors[-1].raw_to_tokens
if sample_hps.prompt_length_in_seconds is not None:
duration = (int(sample_hps.prompt_length_in_seconds * hps.sr) // top_raw_to_tokens) * top_raw_to_tokens
else:
duration = None
zs = load_codes(sample_hps.codes_file, duration, priors, hps)
if sample_hps.mode == 'continue':
continue_sample(zs, labels, sampling_kwargs, priors, hps)
elif sample_hps.mode == 'upsample':
upsample(zs, labels, sampling_kwargs, priors, hps)
elif sample_hps.mode == 'primed':
assert sample_hps.audio_file is not None
assert sample_hps.prompt_length_in_seconds is not None
audio_files = sample_hps.audio_file.split(',')
top_raw_to_tokens = priors[-1].raw_to_tokens
duration = (int(sample_hps.prompt_length_in_seconds * hps.sr) // top_raw_to_tokens) * top_raw_to_tokens
x = load_prompts(audio_files, duration, hps)
primed_sample(x, labels, sampling_kwargs, priors, hps)
else:
raise ValueError(f'Unknown sample mode {sample_hps.mode}.')
def run(model, mode='ancestral', codes_file=None, audio_file=None, prompt_length_in_seconds=None, port=29500, **kwargs):
from jukebox.utils.dist_utils import setup_dist_from_mpi
rank, local_rank, device = setup_dist_from_mpi(port=port)
hps = Hyperparams(**kwargs)
sample_hps = Hyperparams(dict(mode=mode, codes_file=codes_file, audio_file=audio_file, prompt_length_in_seconds=prompt_length_in_seconds))
with t.no_grad():
save_samples(model, device, hps, sample_hps)
if __name__ == '__main__':
fire.Fire(run)
|
#!/usr/bin/python
"""
Learning tool.
=======
License
=======
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=C0325
import sys
import platform
import click
import random
import time
import json
from datetime import datetime
from concept import VERSION
from concept.generator.select import select
from concept.graph.gnuplot import plot, multiplot, script
def average(entry):
"""providing average time for an individidual test."""
return entry['total time (s)'] / float(entry['correct answers'] + entry['wrong answers'])
def dump_last_result(statistic):
"""
Print result of last training to stdout.
:param statistic: results of last training.
"""
print("\nResults of last test run:")
print(" %5d answers were correct." % statistic['correct answers'])
print(" %5d answers were wrong." % statistic['wrong answers'])
print(" %5.2f seconds in total." % statistic['total time (s)'])
print(" %5.2f seconds per answer (average)." % average(statistic))
print(" %5.2f seconds was best time." % statistic['best time (s)'])
print(" %5.2f seconds was worst time." % statistic['worst time (s)'])
def dump_total_results(statistic_entries):
"""
Print summary for all done training to stdout.
:param statistic_entries: a list of dictionaries with test results.
"""
individual_tests = sum([entry['correct answers'] + entry['wrong answers']
for entry in statistic_entries])
average_per_test = sum([entry['total time (s)'] for entry in statistic_entries]) \
/ float(individual_tests)
average_per_run = sum([entry['total time (s)'] for entry in statistic_entries]) \
/ float(len(statistic_entries))
best_time = min([entry['best time (s)'] for entry in statistic_entries])
worst_time = max([entry['worst time (s)'] for entry in statistic_entries])
print("\nSummary for all done tests:")
print(" %5d total test runs" % len(statistic_entries))
print(" %5d individual tests" % individual_tests)
print(" %5.1f individual tests per run" % (individual_tests / float(len(statistic_entries))))
print(" %5.2f seconds per answer (average)" % average_per_test)
print(" %5.2f seconds per run (average)" % average_per_run)
print(" %5.2f seconds was best time." % best_time)
print(" %5.2f seconds was worst time." % worst_time)
def create_gnuplot_statistic(statistic_entries):
"""Creating a gnuplot script and generates the image for it."""
grouped_by_number_of_entries = {}
for statistic in statistic_entries:
key = statistic['max entries']
if key not in grouped_by_number_of_entries:
grouped_by_number_of_entries[key] = [statistic]
else:
grouped_by_number_of_entries[key].append(statistic)
all_plots = multiplot("learn.py statistics", title_font=("", 18), plots_per_row=2)
pos = 0
max_pos = len(grouped_by_number_of_entries) - 1
for key, statistic in grouped_by_number_of_entries.items():
average_time_plot = plot()
average_time_plot.set_ylabel("seconds")
if pos == max_pos:
average_time_plot.set_xlabel("n'th test run")
average_time_plot.set_xtics("1")
average_time_plot.set_ytics("0.5")
average_time_plot.set_line_style(1, "lc rgb \"#00ff00\" lw 2")
average_time_plot.set_fill_style(1, "transparent solid 0.4 border")
values = list(enumerate([average(entry) for entry in statistic], 1))
average_time_plot.add_curve("average times (max entries=%d)" % key,
values=values, mode=plot.FILLEDCURVES)
all_plots.add_plot(average_time_plot)
number_of_tests_plot = plot()
number_of_tests_plot.set_ylabel("# tests")
if pos == max_pos:
number_of_tests_plot.set_xlabel("n'th test run")
number_of_tests_plot.set_xtics("1")
number_of_tests_plot.set_ytics("1")
number_of_tests_plot.set_line_style(1, "lc rgb \"#00ff00\" lw 2")
number_of_tests_plot.set_fill_style(1, "transparent solid 0.4 border")
values = list(enumerate([entry['correct answers'] + entry['wrong answers']
for entry in statistic], 1))
number_of_tests_plot.add_curve("# of tests (max entries=%d)" % key,
values=values, mode=plot.FILLEDCURVES)
all_plots.add_plot(number_of_tests_plot)
pos += 1
calculated_height = len(grouped_by_number_of_entries) * 250
script("learn.gp", all_plots, width=800, height=calculated_height).execute()
def save(statistic_entries):
"""
Save all statistic entries to a JSON file.
:param statistic_entries: a list of dictionaries with test results.
"""
with open('learn.json', 'w') as file:
json.dump(statistic_entries, file, indent=2)
def load():
"""
Load all previous statistic results from a JSON file.
:returns: list of previous statistic results or empty list of not found.
"""
try:
with open('learn.json', 'r') as file:
return json.load(file)
except IOError:
return []
@click.command()
@click.option("--max-entries", default=5, help="number of entries to display")
@click.option("--max-tests", default=10, help="number of tests")
def main(max_entries, max_tests):
"""Learning tool."""
print("learning tool (version %s)" % VERSION)
print(" ... Python %s" % sys.version.replace("\n", ""))
print(" ... Platform %s" % platform.platform())
print("\nAll possible entries: %s\n" % select(1, max_entries, 1).build())
previous_results = load()
started = datetime.now()
results = []
test = 1
while test <= max_tests:
entries = select(1, max_entries, 1).shuffled()
pos = random.randint(0, max_entries - 1)
missing_entry = entries[pos]
del entries[pos]
start = time.time()
answer = input("Which entry is missing: %s: " % entries)
duration = time.time() - start
if int(answer) == missing_entry:
print(" ... correct (took %f seconds)" % duration)
results.append((True, duration))
else:
print(" ... wrong, it was %s (took %f seconds)" % (missing_entry, duration))
results.append((False, duration))
test += 1
total_time = sum([entry[1] for entry in results])
best_time = min([entry[1] for entry in results])
worst_time = max([entry[1] for entry in results])
statistic = {'started': started.strftime("%Y-%m-%d %H:%M:%S"),
'max entries': max_entries,
'correct answers': sum([1 for entry in results if entry[0]]),
'wrong answers': sum([1 for entry in results if not entry[0]]),
'total time (s)': total_time,
'best time (s)': best_time,
'worst time (s)': worst_time}
previous_results.append(statistic)
save(previous_results)
dump_last_result(statistic)
dump_total_results(previous_results)
create_gnuplot_statistic(previous_results)
if __name__ == "__main__":
main()
|
import numpy as np
from math import pi
import matplotlib.pyplot as plt
sqrt_pi = (2 * pi) ** 0.5
class NBFunctions:
@staticmethod
def gaussian(x, mu, sigma):
return np.exp(-(x - mu) ** 2 / (2 * sigma ** 2)) / (sqrt_pi * sigma)
@staticmethod
def gaussian_maximum_likelihood(labelled_x, n_category, dim):
mu = [np.sum(
labelled_x[c][dim]) / len(labelled_x[c][dim]) for c in range(n_category)]
sigma = [np.sum(
(labelled_x[c][dim] - mu[c]) ** 2) / len(labelled_x[c][dim]) for c in range(n_category)]
def func(_c):
def sub(x):
return NBFunctions.gaussian(x, mu[_c], sigma[_c])
return sub
return [func(_c=c) for c in range(n_category)]
class DataUtil:
def get_dataset(name, path, n_train=None, tar_idx=None, shuffle=True):
x = []
with open(path, "r", encoding="utf8") as file:
if "balloon" in name or 'mushroom' in name:
for sample in file:
x.append(sample.strip().split(","))
if shuffle:
np.random.shuffle(x)
tar_idx = -1 if tar_idx is None else tar_idx
y = np.array([xx.pop(tar_idx) for xx in x])
x = np.asarray(x)
if n_train is None:
return x, y
return (x[:n_train], y[:n_train]), (x[n_train:], y[n_train:])
class NaiveBayes:
def __init__(self, **kwargs):
super(NaiveBayes, self).__init__(**kwargs)
self._x = self._y = self._data = None
self._n_possibilities = self._p_category = None
self._labelled_x = self._label_zip = None
self._cat_counter = self._con_counter = None
self.label_dict = self._feat_dicts = None
def __getitem__(self, item):
if isinstance(item, str):
return getattr(self, '_' + item)
def feed_data(self, x, y, sample_weight=None):
pass
def feed_sample_weight(self, sample_weight=None):
pass
def get_prior_probability(self, lb=1):
return [(c_num + lb) / (len(self._y) + lb * len(self._cat_counter))
for c_num in self._cat_counter]
def fit(self, x=None, y=None, sample_weight=None, lb=1):
if x is not None and y is not None:
self.feed_data(x, y, sample_weight)
self._fit(lb)
def _fit(self, lb):
pass
def _func(self, x, i):
pass
def predict(self, x, get_raw_result=False, **kwargs):
if isinstance(x, np.ndarray):
x = x.tolist()
else:
x = [xx[:] for xx in x]
x = self._transfer_x(x)
m_arg, m_probability = np.zeros(len(x), dtype=np.int8), np.zeros(len(x))
for i in range(len(self._cat_counter)):
p = self._func(x, i)
mask = p > m_probability
m_arg[mask], m_probability[mask] = i, p[mask]
if not get_raw_result:
return np.array([self.num_to_label_dict[arg] for arg in m_arg])
return m_probability
def evaluate(self, x, y):
y_pred = self.predict(x)
print('Acc={:12.6} %'.format(100 * np.sum(y_pred == y) / len(y)))
def _transfer_x(self, x):
return x
class GaussianNB(NaiveBayes):
def feed_data(self, x, y, sample_weight=None):
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
x = np.array([list(map(lambda c: float(c), sample)) for sample in x])
labels = list(set(y))
label_dict = {label: i for i, label in enumerate(labels)}
y = np.array([label_dict[yy] for yy in y])
cat_counter = np.bincount(y)
labels = [y == value for value in range(len(cat_counter))]
labelled_x = [x[label].T for label in labels]
self._x, self._y = x.T, y
self._labelled_x, self._label_zip = labelled_x, labels
self._cat_counter, self.label_dict = cat_counter, {i: l for l, i in label_dict.items()}
self.feed_sample_weight(sample_weight)
def feed_sample_weight(self, sample_weight=None):
if sample_weight is not None:
local_weights = sample_weight * len(sample_weight)
for i, label in enumerate(self._label_zip):
self._labelled_x[i] *= local_weights[label]
def _fit(self, lb):
n_category = len(self._cat_counter)
p_category = self.get_prior_probability(lb)
data = [
NBFunctions.gaussian_maximum_likelihood(
self._labelled_x, n_category, dim) for dim in range(len(self._x))]
self._data = data
def func(input_x, tar_category):
rs = 1
for d, xx in enumerate(input_x):
rs *= data[d][tar_category](xx)
return rs * p_category[tar_category]
return func
def visualize(self, save=False):
colors = plt.cm.Paired([i / len(self.label_dict) for i in range(len(self.label_dict))])
colors = {cat: color for cat, color in zip(self.label_dict.values(), colors)}
for j in range(len(self._x)):
tmp_data = self._x[j]
x_min, x_max = np.min(tmp_data), np.max(tmp_data)
gap = x_max - x_min
tmp_x = np.linspace(x_min-0.1*gap, x_max+0.1*gap, 200)
title = "$j = {}$".format(j + 1)
plt.figure()
plt.title(title)
for c in range(len(self.label_dict)):
plt.plot(tmp_x, [self._data[j][c](xx) for xx in tmp_x],
c=colors[self.label_dict[c]], label="class: {}".format(self.label_dict[c]))
plt.xlim(x_min-0.2*gap, x_max+0.2*gap)
plt.legend()
if not save:
plt.show()
else:
plt.savefig("d{}".format(j + 1))
def run_mushroom():
import time
dateset = 'data.txt'
print(
"===============================\n"
"{}\n"
"-------------------------------\n".format(dateset), end='\t')
(_x, _y), (_x_val, _y_val) = DataUtil.get_dataset(dateset, 'data/{}'.format(dateset), tar_idx=0, n_train=7000)
learning_time = time.time()
nb = GaussianNB()
nb.fit(_x, _y)
learning_time = time.time() - learning_time
estimation_time = time.time()
nb.evaluate(_x, _y)
nb.evaluate(_x_val, _y_val)
estimation_time = time.time() - estimation_time
print(
"Model building : {:12.6} s\n"
"Estimation : {:12.6} s\n"
"Total : {:12.6} s".format(
learning_time, estimation_time,
learning_time + estimation_time
)
)
# nb.show_timing_log()
nb.visualize()
if __name__ == '__main__':
run_mushroom() |
<gh_stars>0
import logging
from sqlalchemy import create_engine
import pandas as pd
LOG = logging.getLogger(__name__)
class SeqlDB(object):
def __init__(self, conString, dbVendor = 'SqlServer'):
self.dbVendor = dbVendor
self.conString = conString
self.seqlEngine = self.createEngine()
self.setUpIfNotExist()
def createEngine(self):
engine = create_engine(str(self.conString))
return engine
def getConnection(self):
engine = self.seqlEngine
LOG.info('Connecting to the database')
connection = engine.connect()
return connection
def execute(self, sql):
with self.getConnection() as db_connection:
trans = db_connection.begin()
try:
result = db_connection.execute(sql)
if result.returns_rows:
cNames = result.keys()
data = pd.DataFrame(result.fetchall(), columns=cNames)
else:
data = None
trans.commit()
except Exception as ex:
trans.rollback()
raise ex
return data
def setUpIfNotExist(self):
if self.dbVendor == 'SqlServer':
exists = self.execute('''
SELECT *
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = 'dbo'
AND TABLE_NAME = 'Block'
''')
if len(exists) == 0:
self.execute('''
CREATE TABLE [dbo].[Block](
[number] [int] NOT NULL,
[transCount] [int] NULL,
[uniqueAccounts] [int] NULL,
[contractCount] [int] NULL,
[hash] varchar(66) NULL,
[parentHash] varchar(66) NULL,
[miner] varchar(66) NULL,
[nonce] varchar(18) NULL,
[timestamp] [datetime] NULL,
[difficulty] [bigint] NULL,
[totalDifficulty] [bigint] NULL,
[gasLimit] [bigint] NULL,
[gasUsed] [bigint] NULL,
[receiptsRoot] varchar(66) NULL,
[stateRoot] varchar(66) NULL,
[transactionsRoot] varchar(66) NULL,
[sha3Uncles] varchar(66) NULL,
[size] [bigint] NULL,
[alias] varchar(100),
[hasAccountBalanceInfo] int,
PRIMARY KEY CLUSTERED([number])
);
CREATE TABLE [dbo].[BlockTransaction](
[blockNumber] int not null,
[transactionIndex] int not null,
[hash] varchar(66),
[from] varchar(42),
[to] varchar(42),
[contractCreated] varchar(42),
[valEth] [float] NULL,
[valFinney] [float] NULL,
[valSzabo] [float] NULL,
[value] [float] NULL,
[gas] [bigint] NULL,
[gasPrice] [bigint] NULL,
[nonce] [bigint] NULL,
primary key([blockNumber], [transactionIndex])
);
CREATE TABLE [dbo].[Contract](
[blockNumber] int not null,
[transactionHash] varchar(66),
[contractAddress] varchar(42),
[creator] varchar(42),
[gasUsed] bigint,
[transactionIndex] int ,
[cumulativeGasUsed] bigint
);
create table AccountAlias(
[account] varchar(42),
[alias] varchar(100),
primary key (account)
);
CREATE TABLE [dbo].[AccountBalances](
[blockNumber] int not null,
[account] varchar(42),
[balEth] [float],
[balFinney] [float],
[balSzabo] [float],
[balance] [float],
primary key (blockNumber, account)
);
CREATE TABLE [dbo].[failures](
[failed] [bigint] NULL,
[message] [varchar](max) NULL
);
''')
self.execute('''
create procedure spCleanUpByBlock(@blockNumber int) as
begin
delete from [dbo].[AccountBalances] where blockNumber = @blockNumber
delete from [dbo].[Block] where number = @blockNumber
delete from [dbo].[Contract] where blockNumber = @blockNumber
delete from [dbo].[BlockTransaction] where blockNumber = @blockNumber
delete from failures where failed = @blockNumber
end;
''')
#Used on the ParseBlockFunction
self.execute('''
create procedure insertIfNotExistAccAlias(@account varchar(42), @alias varchar(100))
as
begin
IF NOT EXISTS(SELECT * FROM AccountAlias WHERE account = @account) begin
insert into AccountAlias values (@account, @alias)
end
end;
''')
#TODO: this second procedure could replace the first completelly
#used on the parseAccount procedure
self.execute('''
create procedure insertOrUpdateAccAlias(@account varchar(42), @alias varchar(100))
as
begin
IF NOT EXISTS(SELECT * FROM AccountAlias WHERE account = @account) begin
insert into AccountAlias values (@account, @alias)
end ELSE begin
update AccountAlias set alias = @alias where account = @account and (alias = 'other' or alias is null)
end
end
''') |
<reponame>cherepaha/PyDDM
import unittest
from unittest import TestCase, main
from string import ascii_letters
import numpy as np
from itertools import groupby
from math import fsum
import pandas
import copy
import scipy.stats
from numpy import asarray as aa
import ddm
def fails(f, exception=BaseException):
failed = False
try:
f()
except exception as e:
failed = True
if failed == False:
raise ValueError("Error, function did not fail")
class TestDependences(TestCase):
def setUp(self):
"""Create fake models which act like models but are actually much simpler."""
# Fake model which solves to be a uniform distribution
class FakeUniformModel(ddm.Model):
def solve(self, conditions={}, *args, **kwargs):
corr = self.t_domain()*0+.4/len(self.t_domain())
err = self.t_domain()*0+.4/len(self.t_domain())
undec = self.x_domain(conditions=conditions)*0+.2/len(self.x_domain(conditions=conditions))
return ddm.Solution(corr, err, self, conditions, undec)
FakeUniformModel.solve_analytical = FakeUniformModel.solve
FakeUniformModel.solve_numerical = FakeUniformModel.solve
FakeUniformModel.solve_numerical_cn = FakeUniformModel.solve
FakeUniformModel.solve_numerical_implicit = FakeUniformModel.solve
FakeUniformModel.solve_numerical_explicit = FakeUniformModel.solve
self.FakeUniformModel = FakeUniformModel
# Fake model which solves to be a single point
class FakePointModel(ddm.Model):
def solve(self, conditions={}, *args, **kwargs):
corr = self.t_domain()*0
corr[1] = .8
err = self.t_domain()*0
err[1] = .2
return ddm.Solution(corr, err, self, conditions)
FakePointModel.solve_analytical = FakePointModel.solve
FakePointModel.solve_numerical = FakePointModel.solve
FakePointModel.solve_numerical_cn = FakePointModel.solve
FakePointModel.solve_numerical_implicit = FakePointModel.solve
FakePointModel.solve_numerical_explicit = FakePointModel.solve
self.FakePointModel = FakePointModel
# Fake model which has all trials undecided
class FakeUndecidedModel(ddm.Model):
def solve(self, conditions={}, *args, **kwargs):
corr = self.t_domain()*0
err = self.t_domain()*0
undec = self.x_domain(conditions=conditions)*0+1/len(self.x_domain(conditions=conditions))
return ddm.Solution(corr, err, self, conditions, undec)
FakeUndecidedModel.solve_analytical = FakeUndecidedModel.solve
FakeUndecidedModel.solve_numerical = FakeUndecidedModel.solve
FakeUndecidedModel.solve_numerical_cn = FakeUndecidedModel.solve
FakeUndecidedModel.solve_numerical_implicit = FakeUndecidedModel.solve
FakeUndecidedModel.solve_numerical_explicit = FakeUndecidedModel.solve
self.FakeUndecidedModel = FakeUndecidedModel
def test_Dependence_spec(self):
"""Ensure classes can inherit properly from Dependence"""
# Instantiating directly fails
fails(lambda : ddm.models.Dependence())
# Fails without all properties
class TestDepFail1(ddm.models.Dependence):
pass
fails(lambda : TestDepFail1())
class TestDepFail2(ddm.models.Dependence):
depname = "Depname"
fails(lambda : TestDepFail2())
class TestDepFail3(ddm.models.Dependence):
depname = "Depname"
name = "Name"
fails(lambda : TestDepFail3())
class TestDep(ddm.models.Dependence):
depname = "Depname"
name = "Name"
required_parameters = []
assert TestDep() is not None
def test_Dependence_derived(self):
"""Ensure derived classes handle parameters properly"""
class TestDep(ddm.models.Dependence):
depname = "Test dependence"
class TestDepComp(TestDep):
name = "Test component"
required_parameters = ["testparam1", "testparam2"]
default_parameters = {"testparam2" : 10}
# Not all params specified
fails(lambda : TestDepComp())
# Using default parameter
assert TestDepComp(testparam1=5) is not None
# Overriding the default parameter
tdc = TestDepComp(testparam1=3, testparam2=4)
assert tdc.testparam1 == 3
assert tdc.testparam2 == 4
assert tdc.required_conditions == []
# Ensure class static variable holds
tdc = TestDepComp(testparam1=7)
assert tdc.testparam1 == 7
assert tdc.testparam2 == 10
def test_DriftReduces(self):
"""DriftLinear reduces to DriftConstant when x and t are 0"""
drift_constant_instances = [e for e in ddm.models.DriftConstant._generate()]
for cinst in drift_constant_instances:
linst = ddm.models.DriftLinear(drift=cinst.get_drift(t=0), x=0, t=0)
for t in [0, .1, .5, 1, 2, 10]:
assert linst.get_drift(t=t, x=1) == cinst.get_drift(t=t, x=1)
def test_NoiseReduces(self):
"""NoiseLinear reduces to NoiseConstant when x and t are 0"""
noise_constant_instances = [e for e in ddm.models.NoiseConstant._generate()]
for cinst in noise_constant_instances:
linst = ddm.models.NoiseLinear(noise=cinst.get_noise(t=0), x=0, t=0)
for t in [0, .1, .5, 1, 2, 10]:
assert linst.get_noise(t=t, x=1) == cinst.get_noise(t=t, x=1)
def test_ICArbitrary(self):
"""Arbitrary starting conditions from a distribution"""
# Make sure we get out the same distribution we put in
m = ddm.Model()
unif = ddm.models.ICUniform()
unif_a = ddm.models.ICArbitrary(unif.get_IC(m.x_domain({})))
assert np.all(unif.get_IC(m.x_domain({})) == unif_a.get_IC(m.x_domain({})))
point = ddm.models.ICPointSourceCenter()
point_a = ddm.models.ICArbitrary(point.get_IC(m.x_domain({})))
assert np.all(point.get_IC(m.x_domain({})) == point_a.get_IC(m.x_domain({})))
# Make sure the distribution integrates to 1
fails(lambda : ddm.models.ICArbitrary(aa([.1, .1, 0, 0, 0])))
fails(lambda : ddm.models.ICArbitrary(aa([0, .6, .6, 0])))
assert ddm.models.ICArbitrary(aa([1]))
def test_ICRange(self):
"""Uniform distribution of starting conditions of arbitrary size centered at 0"""
# Make sure it is the same as uniform in the limiting case
icrange = ddm.models.ICRange(sz=1)
icunif = ddm.models.ICUniform()
params = dict(x=np.arange(-1, 1.0001, .01), dx=.01)
assert np.all(np.isclose(icunif.get_IC(**params), icrange.get_IC(**params)))
# Make sure it is the same as point source center when sz=0
icpsc = ddm.models.ICPointSourceCenter()
icrange = ddm.models.ICRange(sz=0)
assert np.all(np.isclose(icpsc.get_IC(**params), icrange.get_IC(**params)))
# For intermediate values, there should only be two values
# generated, and it should be symmetric
icrange = ddm.models.ICRange(sz=.444)
ic = icrange.get_IC(x=np.arange(-.48, .48001, .02), dx=.02)
assert np.all(np.isclose(ic, ic[::-1]))
assert len(set(ic)) == 2
def test_OverlayNone(self):
"""No overlay"""
s = ddm.Model().solve()
assert s == ddm.models.OverlayNone().apply(s)
s = self.FakeUniformModel().solve()
assert s == ddm.models.OverlayNone().apply(s)
s = self.FakePointModel().solve()
assert s == ddm.models.OverlayNone().apply(s)
def test_OverlayUniformMixture(self):
"""Uniform mixture model overlay: a uniform distribution plus the model's solved distribution"""
# Do nothing with 0 probability
s = ddm.Model(drift=ddm.models.DriftConstant(drift=1)).solve()
smix = ddm.models.OverlayUniformMixture(umixturecoef=0).apply(s)
assert s == smix
# With mixture coef 1, integrate to 1
s = ddm.Model(drift=ddm.models.DriftConstant(drift=2), noise=ddm.models.NoiseConstant(noise=3)).solve()
smix = ddm.models.OverlayUniformMixture(umixturecoef=1).apply(s)
assert np.isclose(np.sum(smix.corr) + np.sum(smix.err), 1)
# Should not change uniform distribution
s = self.FakeUniformModel(dt=.001).solve()
assert s == ddm.models.OverlayUniformMixture(umixturecoef=.2).apply(s)
# Don't change total probability
s = ddm.Model(drift=ddm.models.DriftConstant(drift=1)).solve()
smix = ddm.models.OverlayUniformMixture(umixturecoef=.2).apply(s)
assert np.isclose(np.sum(s.corr) + np.sum(s.err),
np.sum(smix.corr) + np.sum(smix.err))
def test_OverlayPoissonMixture(self):
"""Poisson mixture model overlay: an exponential distribution plus the model's solved distribution"""
# Do nothing with mixture coef 0
s = ddm.Model(drift=ddm.models.DriftConstant(drift=1)).solve()
smix = ddm.models.OverlayPoissonMixture(pmixturecoef=0, rate=1).apply(s)
assert s == smix
# With mixture coef 1, integrate to 1
s = ddm.Model(drift=ddm.models.DriftConstant(drift=2), noise=ddm.models.NoiseConstant(noise=3)).solve()
smix = ddm.models.OverlayPoissonMixture(pmixturecoef=1, rate=10).apply(s)
assert np.isclose(np.sum(smix.corr) + np.sum(smix.err), 1)
# Should be monotonic decreasing on uniform distribution
s = self.FakeUniformModel(dt=.001).solve()
smix = ddm.models.OverlayPoissonMixture(pmixturecoef=.2, rate=1).apply(s)
assert np.all([smix.corr[i-1]-smix.corr[i] > 0 for i in range(1, len(smix.corr))])
assert np.all([smix.err[i-1]-smix.err[i] > 0 for i in range(1, len(smix.err))])
# Don't change total probability
s = ddm.Model(ddm.models.DriftConstant(drift=1)).solve()
smix = ddm.models.OverlayPoissonMixture(pmixturecoef=.2, rate=7).apply(s)
assert np.isclose(np.sum(s.corr) + np.sum(s.err),
np.sum(smix.corr) + np.sum(smix.err))
def test_OverlayNonDecision(self):
"""Non-decision time shifts the histogram"""
# Should do nothing with no shift
s = ddm.Model().solve()
assert s == ddm.models.OverlayNonDecision(nondectime=0).apply(s)
# Shifts a single point distribution
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayNonDecision(nondectime=.01).apply(s)
assert s.corr[1] == sshift.corr[2]
assert s.err[1] == sshift.err[2]
# Shift the other way
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayNonDecision(nondectime=-.01).apply(s)
assert s.corr[1] == sshift.corr[0]
assert s.err[1] == sshift.err[0]
# Truncate when time bin doesn't align
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayNonDecision(nondectime=.019).apply(s)
assert s.corr[1] == sshift.corr[2]
assert s.err[1] == sshift.err[2]
def test_OverlayNonDecisionUniform(self):
"""Uniform-distributed non-decision time shifts the histogram"""
# Should give the same results as OverlayNonDecision when halfwidth=0
s = ddm.Model().solve()
for nondectime in [0, -.1, .01, .0099, .011111, 1]:
ndunif = ddm.models.OverlayNonDecisionUniform(nondectime=nondectime, halfwidth=0).apply(s)
ndpoint = ddm.models.OverlayNonDecision(nondectime=nondectime).apply(s)
assert np.all(np.isclose(ndunif.corr, ndpoint.corr)), (nondectime, list(ndunif.corr), list(ndpoint.corr))
assert np.all(np.isclose(ndunif.err, ndpoint.err))
# Simple shift example
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayNonDecisionUniform(nondectime=.02, halfwidth=.01).apply(s)
assert sshift.corr[2] == sshift.corr[3] == sshift.corr[4]
assert sshift.err[2] == sshift.err[3] == sshift.err[4]
assert sshift.corr[0] == sshift.corr[1] == sshift.corr[5] == 0
assert sshift.err[0] == sshift.err[1] == sshift.err[5] == 0
# Off-boundary and behind 0 example
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayNonDecisionUniform(nondectime=.021111, halfwidth=.033333).apply(s)
assert sshift.corr[0] == sshift.corr[1]
assert sshift.err[0] == sshift.err[1]
assert len(set(sshift.corr)) == 2
assert len(set(sshift.err)) == 2
def test_OverlayNonDecisionGamma(self):
"""Gamma-distributed non-decision time shifts the histogram"""
# Should get back a gamma distribution from a delta spike
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayNonDecisionGamma(nondectime=.01, shape=1.3, scale=.002).apply(s)
gamfn = scipy.stats.gamma(a=1.3, scale=.002).pdf(s.model.t_domain()[0:-2])
assert np.all(np.isclose(sshift.corr[2:], gamfn/np.sum(gamfn)*s.corr[1]))
assert np.all(np.isclose(sshift.err[2:], gamfn/np.sum(gamfn)*s.err[1]))
def test_OverlaySimplePause(self):
"""Pause at some point in the trial and then continue, leaving 0 probability in the gap"""
# Should do nothing with no shift
s = ddm.Model().solve()
assert s == ddm.models.OverlaySimplePause(pausestart=.4, pausestop=.4).apply(s)
# Shift should make a gap in the uniform model
s = self.FakeUniformModel().solve()
smix = ddm.models.OverlaySimplePause(pausestart=.3, pausestop=.6).apply(s)
assert len(set(smix.corr).union(set(smix.err))) == 2
assert len(list(groupby(smix.corr))) == 3 # Looks like ----____----------
# Should start with 0 and then go to constant with pausestart=.3
s = self.FakeUniformModel(dt=.01).solve()
smix = ddm.models.OverlaySimplePause(pausestart=0, pausestop=.05).apply(s)
assert len(set(smix.corr).union(set(smix.err))) == 2
assert len(list(groupby(smix.corr))) == 2 # Looks like ____----------
assert np.all(smix.corr[0:5] == 0) and smix.corr[6] != 0
# Truncate when time bin doesn't align
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlaySimplePause(pausestart=.01, pausestop=.029).apply(s)
assert s.corr[1] == sshift.corr[2]
assert s.err[1] == sshift.err[2]
def test_OverlayBlurredPause(self):
"""Like OverlaySimplePause but with a gamma distribution on delay times"""
# Don't change total probability when there are no undecided responses
s = ddm.Model(drift=ddm.models.DriftConstant(drift=1), T_dur=10).solve()
smix = ddm.models.OverlayBlurredPause(pausestart=.3, pausestop=.6, pauseblurwidth=.1).apply(s)
assert np.isclose(np.sum(s.corr) + np.sum(s.err),
np.sum(smix.corr) + np.sum(smix.err))
# Make sure responses before the pause aren't affected
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayBlurredPause(pausestart=.02, pausestop=.03, pauseblurwidth=.002).apply(s)
assert s.corr[1] == sshift.corr[1] != 0
assert s.err[1] == sshift.err[1] != 0
# Make sure responses after look like a gamma distribution
s = self.FakePointModel(dt=.01).solve()
sshift = ddm.models.OverlayBlurredPause(pausestart=0, pausestop=.05, pauseblurwidth=.01).apply(s)
positive = (sshift.corr[2:] > sshift.err[1:-1]).astype(int) # Excluding first 0 point, should go from + to - slope only once
assert positive[0] == 1 and positive[-1] == 0 and len(set(positive)) == 2
def test_OverlayChain(self):
"""Combine multiple overlays in sequence"""
# Combine with OverlayNone()
s = self.FakePointModel(dt=.01).solve()
o = ddm.models.OverlayChain(overlays=[
ddm.models.OverlayNone(),
ddm.models.OverlayNonDecision(nondectime=.01),
ddm.models.OverlayNone()])
sshift = o.apply(s)
assert s.corr[1] == sshift.corr[2]
assert s.err[1] == sshift.err[2]
assert o.nondectime == .01
o.nondectime = .3
assert o.nondectime == .3
def test_LossSquaredError(self):
"""Squared error loss function"""
# Should be zero for empty sample when all undecided
m = self.FakeUndecidedModel()
s = ddm.Sample(aa([]), aa([]), undecided=1)
assert ddm.models.LossSquaredError(sample=s, dt=m.dt, T_dur=m.T_dur).loss(m) == 0
# Can also be determined precisely for the point model
m = self.FakePointModel()
sol = m.solve()
err = ddm.models.LossSquaredError(sample=s, dt=m.dt, T_dur=m.T_dur).loss(m)
assert np.isclose(err, np.sum(sol.corr)**2 + np.sum(sol.err)**2)
def test_LossLikelihood(self):
"""Likelihood loss function"""
# We can calculate likelihood for this simple case
m = self.FakePointModel(dt=.02)
sol = m.solve()
s = ddm.Sample(aa([.02]), aa([]))
expected = -np.log(np.sum(sol.corr)/m.dt)
assert np.isclose(expected, ddm.models.LossLikelihood(sample=s, dt=m.dt, T_dur=m.T_dur).loss(m))
# And for the uniform case we can assert equivalence
m = self.FakeUniformModel()
s1 = ddm.Sample(aa([.02, .05, .07, .12]), aa([.33, .21]))
s2 = ddm.Sample(aa([.13, .1, .02]), aa([.66, .15, .89]))
assert np.isclose(ddm.models.LossLikelihood(sample=s1, dt=m.dt, T_dur=m.T_dur).loss(m),
ddm.models.LossLikelihood(sample=s2, dt=m.dt, T_dur=m.T_dur).loss(m))
# TODO I think this reveals we should be doing
# (len(x_domain())-1) instead of len(x_domain()). Multiple of 2 somewhere.
# And it should not depend on dt since it is comparing to the pdf
# m1 = self.FakeUniformModel(dt=.02)
# m2 = self.FakeUniformModel(dt=.01)
# print(m1.solve().pdf_corr(), m2.solve().pdf_corr())
# s = ddm.Sample(aa([.14, .1, .01]), aa([.66, .16, .89]))
# assert np.isclose(ddm.models.LossLikelihood(sample=s, dt=m1.dt, T_dur=m1.T_dur).loss(m1),
# ddm.models.LossLikelihood(sample=s, dt=m2.dt, T_dur=m2.T_dur).loss(m2))
def test_BIC(self):
"""BIC loss function"""
# -2*Likelihood == BIC for a sample size of 1
m = self.FakePointModel(dt=.02)
sol = m.solve()
s = ddm.Sample(aa([.02]), aa([]))
expected = -np.log(np.sum(sol.corr)/m.dt)
assert np.isclose(ddm.models.LossBIC(sample=s, dt=m.dt, T_dur=m.T_dur, nparams=1, samplesize=1).loss(m),
2*ddm.models.LossLikelihood(sample=s, dt=m.dt, T_dur=m.T_dur).loss(m))
class TestSample(TestCase):
def setUp(self):
self.samps = {
# Empty sample
"empty": ddm.Sample(aa([]), aa([]), 0),
# Simple sample
"simple": ddm.Sample(aa([1, 2]), aa([.5, .7]), 0),
# Sample with conditions
"conds": ddm.Sample(aa([1, 2, 3]), aa([]), 0,
cond1=(aa([1, 1, 2]), aa([]))),
# Sample with conditions and explicitly showing undecided
"condsexp": ddm.Sample(aa([1, 2, 3]), aa([]), 0,
cond1=(aa([1, 1, 2]), aa([]), aa([]))),
# Sample with undecided
"undec": ddm.Sample(aa([1, 2]), aa([.5, .7]), 2),
# Sample with undecided and conditions
"undeccond": ddm.Sample(aa([1, 2, 3]), aa([]), 3,
cond1=(aa([1, 1, 2]), aa([]), aa([2, 2, 1]))),
# For the adding test
"adda": ddm.Sample(aa([1]), aa([2, 4]), 3,
cond1=(aa(["a"]), aa(["a", "b"]), aa(["a", "b", "b"]))),
"addb": ddm.Sample(aa([1.5, 2, 1]), aa([]), 1,
cond1=(aa(["b", "b", "c"]), aa([]), aa(["d"]))),
# Two conditions
"two": ddm.Sample(aa([1]), aa([2]), 1,
conda=(aa(["a"]), aa(["b"]), aa(["a"])),
condb=(aa([1]), aa([2]), aa([2]))),
}
def test_add(self):
"""Adding two samples together"""
s1 = self.samps["adda"]
s2 = self.samps["addb"]
s = s1 + s2
assert len(s) == 10
assert s.condition_names() == ["cond1"]
assert s.condition_values("cond1") == ["a", "b", "c", "d"]
assert s.prob_undecided() == .4
assert s.prob_correct() == .4
assert s.prob_error() == .2
# Try to add to the empty sample
assert self.samps["empty"] + self.samps["undec"] == self.samps["undec"]
assert self.samps["empty"] + self.samps["simple"] == self.samps["simple"]
def test_eqality(self):
"""Two samples are equal iff they are the same"""
# Equality and inequality with multiple conditions
assert self.samps["adda"] != self.samps["addb"]
assert self.samps["adda"] == self.samps["adda"]
def test_condition_values(self):
"""Condition_values method"""
assert self.samps["conds"].condition_values("cond1") == [1, 2]
assert self.samps["condsexp"].condition_values("cond1") == [1, 2]
assert self.samps["undeccond"].condition_values("cond1") == [1, 2]
assert self.samps["adda"].condition_values("cond1") == ["a", "b"]
assert self.samps["addb"].condition_values("cond1") == ["b", "c", "d"]
assert self.samps["two"].condition_values("conda") == ["a", "b"]
assert self.samps["two"].condition_values("condb") == [1, 2]
def test_condition_combinations(self):
"""Condition combinations are a cartesian product of condition values"""
# If we want nothing
assert self.samps["conds"].condition_combinations([]) == [{}]
# If nothing matches
assert self.samps["conds"].condition_combinations(["xyz"]) == [{}]
# If we want everything
assert self.samps["conds"].condition_combinations(None) == [{"cond1": 1}, {"cond1": 2}]
# Limit to one condition
assert self.samps["conds"].condition_combinations(["cond1"]) == [{"cond1": 1}, {"cond1": 2}]
# More conditions
conds_two = self.samps["two"].condition_combinations()
exp_conds_two = [{"conda": "a", "condb": 1},
{"conda": "b", "condb": 2},
{"conda": "a", "condb": 2}]
assert all(a in exp_conds_two for a in conds_two)
assert all(a in conds_two for a in exp_conds_two)
def test_pdfs(self):
"""Produce valid distributions which sum to one"""
dt = .02
for n,s in self.samps.items():
if n == "empty": continue
assert np.isclose(fsum([fsum(s.pdf_corr(T_dur=4, dt=dt))*dt, fsum(s.pdf_err(T_dur=4, dt=dt))*dt, s.prob_undecided()]), 1)
assert np.isclose(fsum(s.pdf_corr(T_dur=4, dt=dt)*dt), s.prob_correct())
assert np.isclose(fsum(s.pdf_err(T_dur=4, dt=dt)*dt), s.prob_error())
assert s.mean_decision_time() > 0
if s.prob_undecided() == 0:
assert s.prob_correct() == s.prob_correct_forced()
assert s.prob_error() == s.prob_error_forced()
assert len(s.pdf_corr(T_dur=4, dt=dt)) == len(s.t_domain(T_dur=4, dt=dt))
def test_iter(self):
"""The iterator .items() goes through correct or error trials and their conditions"""
itr = self.samps["conds"].items(correct=True)
assert next(itr) == (1, {"cond1": 1})
assert next(itr) == (2, {"cond1": 1})
assert next(itr) == (3, {"cond1": 2})
fails(lambda : next(itr), StopIteration)
itr = self.samps["two"].items(correct=False)
assert next(itr) == (2, {"conda": "b", "condb": 2})
# Create a list to make sure we don't iterate past the end
list(self.samps["conds"].items(correct=True))
list(self.samps["conds"].items(correct=False))
def test_subset(self):
"""Filter a sample by some conditions"""
# Basic access
assert len(self.samps['conds'].subset(cond1=2)) == 1
# The elements being accessed
assert list(self.samps['conds'].subset(cond1=1).corr) == [1, 2]
# An empty subset with two conditions
assert len(self.samps['two'].subset(conda="b", condb=1)) == 0
# A non-epty subset with two conditions
assert len(self.samps['two'].subset(conda="a", condb=1)) == 1
# Querying only one condition when more conditions exist
assert len(self.samps['two'].subset(conda="a")) == 2
# Query by list
assert len(self.samps['two'].subset(conda=["a", "z"])) == 2
# Query by function
assert len(self.samps['two'].subset(conda=lambda x : True if x=="a" else False)) == 2
def test_from_numpy_array(self):
"""Create a sample from a numpy array"""
simple_ndarray = np.asarray([[1, 1], [.5, 0], [.7, 0], [2, 1]])
assert ddm.Sample.from_numpy_array(simple_ndarray, []) == self.samps['simple']
conds_ndarray = np.asarray([[1, 1, 1], [2, 1, 1], [3, 1, 2]])
assert ddm.Sample.from_numpy_array(conds_ndarray, ["cond1"]) == self.samps['conds']
assert ddm.Sample.from_numpy_array(conds_ndarray, ["cond1"]) == self.samps['condsexp']
def test_from_pandas(self):
"""Create a sample from a pandas dataframe"""
simple_df = pandas.DataFrame({'corr': [1, 0, 0, 1], 'resptime': [1, .5, .7, 2]})
print(simple_df)
assert ddm.Sample.from_pandas_dataframe(simple_df, 'resptime', 'corr') == self.samps['simple']
cond_df = pandas.DataFrame({'c': [1, 1, 1], 'rt': [1, 2, 3], 'cond1': [1, 1, 2]})
assert ddm.Sample.from_pandas_dataframe(cond_df, 'rt', 'c') == self.samps['conds']
assert ddm.Sample.from_pandas_dataframe(cond_df, correct_column_name='c', rt_column_name='rt') == self.samps['condsexp']
class TestSolution(TestCase):
def setUp(self):
class DriftSimple(ddm.Drift):
name = "Test drift"
required_conditions = ['coher']
required_parameters = []
def get_drift(self, conditions, **kwargs):
return conditions["coher"]
class DriftSimpleStringArg(ddm.Drift):
name = "Test drift"
required_conditions = ['type']
required_parameters = []
def get_drift(self, conditions, **kwargs):
if conditions['type'] == "a":
return .3
else:
return .1
# No undecided
self.quick_ana = ddm.Model(T_dur=2, dt=.02).solve_analytical()
# Includes undecided
self.quick_cn = ddm.Model(T_dur=.5).solve_numerical_cn()
# Includes undecided
self.quick_imp = ddm.Model(T_dur=.5).solve_numerical_implicit()
# No undecided, with parameters
self.params_ana = ddm.Model(drift=DriftSimple(), T_dur=2.5, dt=.005).solve_analytical({"coher": .3})
# Includes undecided, with parameters
self.params_cn = ddm.Model(drift=DriftSimple(), T_dur=.5).solve_numerical_cn(conditions={"coher": .1})
# Includes undecided, with parameters
self.params_imp = ddm.Model(drift=DriftSimple(), T_dur=.5).solve_numerical_implicit(conditions={"coher": .1})
# Dependence with a string argument
self.params_strarg = ddm.Model(drift=DriftSimpleStringArg(), T_dur=.5).solve_analytical(conditions={"type": "a"})
self.all_sols = [self.quick_ana, self.quick_cn, self.quick_imp, self.params_ana, self.params_cn, self.params_imp, self.params_strarg]
def test_pdfs(self):
"""Make sure we produce valid distributions from solutions"""
# For each test model
for s in self.all_sols:
dt = s.model.dt
# Distribution sums to 1
assert np.isclose(fsum([fsum(s.pdf_corr())*dt, fsum(s.pdf_err())*dt, s.prob_undecided()]), 1)
# Correct and error probabilities are sensible
assert np.isclose(fsum(s.pdf_corr()*dt), s.prob_correct())
assert np.isclose(fsum(s.pdf_err()*dt), s.prob_error())
assert s.mean_decision_time() > 0
if s.prob_undecided() == 0:
assert s.prob_correct() == s.prob_correct_forced()
assert s.prob_error() == s.prob_error_forced()
# Signed probabilities sum to 1
if s.undec is not None:
assert np.isclose(np.sum(s.prob_correct_sign()) + np.sum(s.prob_error_sign()), 1, rtol=.005)
assert np.sum(s.prob_correct_sign()) + np.sum(s.prob_error_sign()) <= 1
# Correct time domain
assert len(s.pdf_corr()) == len(s.model.t_domain())
# pdf_undec with pdf_corr and pdf_err sums to one if pdf_undec exists
for s in [self.quick_cn, self.quick_imp, self.params_cn, self.params_imp]:
dx = s.model.dx
if s.undec is not None:
# Allow better tolerance since accuracy isn't perfect for undecided pdf
assert np.isclose(fsum([fsum(s.pdf_corr())*dt, fsum(s.pdf_err())*dt, fsum(s.pdf_undec())*dx]), 1, atol=.001)
class TestTriDiagMatrix(TestCase):
def setUp(self):
self.matrices = [ddm.tridiag.TriDiagMatrix.eye(1)*4.1, # For fully collapsing bounds
ddm.tridiag.TriDiagMatrix.eye(3),
ddm.tridiag.TriDiagMatrix(diag=np.asarray([1, 2, 3]),
up=np.asarray([5, 1]),
down=np.asarray([1, 2])),
ddm.tridiag.TriDiagMatrix(diag=np.asarray([1.1, 2.6, -3.1]),
up=np.asarray([50, 1.6]),
down=np.asarray([.1, 2.4]))]
self.scalars = [5.4, 9, 0, 1, -6]
def test_multiply(self):
for m in self.matrices:
for s in self.scalars:
assert np.all(((m * s).to_scipy_sparse() == m.to_scipy_sparse().dot(s)).todense())
assert np.all(((m * s).to_scipy_sparse() == (m.to_scipy_sparse()*s)).todense())
for m2 in self.matrices:
if m.shape == m2.shape:
assert np.all(((m.dot(m2)) == m.to_scipy_sparse().dot(m2.to_scipy_sparse())).todense())
assert np.all((m * m2).to_scipy_sparse() == m.to_scipy_sparse().multiply(m2.to_scipy_sparse()).todense())
def test_add_inplace(self):
ms = [copy.deepcopy(m) for m in self.matrices]
for m,mo in zip(ms, self.matrices):
m *= 1.4
m *= mo
assert m == (mo * 1.4) * mo
def test_add(self):
for m in self.matrices:
#for s in self.scalars:
# np.sum((m + s).to_scipy_sparse() != m.to_scipy_sparse() + s)
for m2 in self.matrices:
if m.shape == m2.shape:
assert np.all(((m + m2).to_scipy_sparse() == (m.to_scipy_sparse() + m2.to_scipy_sparse())).todense())
def test_add_r(self):
for m in self.matrices:
#for s in self.scalars:
# np.sum((s + m).to_scipy_sparse() != s + m.to_scipy_sparse())
for m2 in self.matrices:
if m.shape == m2.shape:
assert np.all(((m2 + m).to_scipy_sparse() == (m2.to_scipy_sparse() + m.to_scipy_sparse())).todense())
def test_add_inplace(self):
ms = [copy.deepcopy(m) for m in self.matrices]
for m,mo in zip(ms, self.matrices):
m += 1.4
m += mo
assert m == (mo + 1.4) + mo
def test_subtract(self):
for m in self.matrices:
#for s in self.scalars:
# np.sum((m - s).to_scipy_sparse() != m.to_scipy_sparse() + -s)
for m2 in self.matrices:
if m.shape == m2.shape:
assert np.all(((m - m2).to_scipy_sparse() == (m.to_scipy_sparse() - m2.to_scipy_sparse())).todense())
def test_subtract_r(self):
for m in self.matrices:
#for s in self.scalars:
# np.sum((s - m).to_scipy_sparse() != s - m.to_scipy_sparse())
for m2 in self.matrices:
if m.shape == m2.shape:
assert np.all(((m2 - m).to_scipy_sparse() == (m2.to_scipy_sparse() - m.to_scipy_sparse())).todense())
def test_subtract_inplace(self):
ms = [copy.deepcopy(m) for m in self.matrices]
for m,mo in zip(ms, self.matrices):
m -= 1.4
m -= mo
assert m == (mo - 1.4) - mo
class TestMisc(TestCase):
def test_analytic_lin_collapse(self):
"""Make sure linearly collapsing bounds stops at 0"""
# Will collapse to 0 by t=1
b = ddm.models.bound.BoundCollapsingLinear(B=1, t=1)
m = ddm.Model(bound=b, T_dur=2)
s = m.solve()
assert len(s.pdf_corr()) == len(m.t_domain())
# TODO test if there is no overlay, then corr + err + undecided = 1
# TODO test bounds that don't depend on t but do depend on conditions, mus like that, etc.
# TODO test solution.resample in integration testing
# TODO test loss parallelization?
|
from __future__ import with_statement
__version__ = '0.31'
__license__ = 'MIT'
import re
import os
import sys
import finalseg
import time
import tempfile
import marshal
from math import log
import random
import threading
from functools import wraps
import logging
DICTIONARY = "dict.txt"
DICT_LOCK = threading.RLock()
trie = None # to be initialized
FREQ = {}
min_freq = 0.0
total =0.0
user_word_tag_tab={}
initialized = False
log_console = logging.StreamHandler(sys.stderr)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(log_console)
def setLogLevel(log_level):
global logger
logger.setLevel(log_level)
def gen_trie(f_name):
lfreq = {}
trie = {}
ltotal = 0.0
with open(f_name, 'rb') as f:
lineno = 0
for line in f.read().rstrip().decode('utf-8').split('\n'):
lineno += 1
try:
word,freq,_ = line.split(' ')
freq = float(freq)
lfreq[word] = freq
ltotal+=freq
p = trie
for c in word:
if c not in p:
p[c] ={}
p = p[c]
p['']='' #ending flag
except ValueError, e:
logger.debug('%s at line %s %s' % (f_name, lineno, line))
raise ValueError, e
return trie, lfreq,ltotal
def initialize(*args):
global trie, FREQ, total, min_freq, initialized
if len(args)==0:
dictionary = DICTIONARY
else:
dictionary = args[0]
with DICT_LOCK:
if initialized:
return
if trie:
del trie
trie = None
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
abs_path = os.path.join(_curpath,dictionary)
logger.debug("Building Trie..., from %s" % abs_path)
t1 = time.time()
if abs_path == os.path.join(_curpath,"dict.txt"): #defautl dictionary
cache_file = os.path.join(tempfile.gettempdir(),"jieba.cache")
else: #customer dictionary
cache_file = os.path.join(tempfile.gettempdir(),"jieba.user."+str(hash(abs_path))+".cache")
load_from_cache_fail = True
if os.path.exists(cache_file) and os.path.getmtime(cache_file)>os.path.getmtime(abs_path):
logger.debug("loading model from cache %s" % cache_file)
try:
trie,FREQ,total,min_freq = marshal.load(open(cache_file,'rb'))
load_from_cache_fail = False
except:
load_from_cache_fail = True
if load_from_cache_fail:
trie,FREQ,total = gen_trie(abs_path)
FREQ = dict([(k,log(float(v)/total)) for k,v in FREQ.iteritems()]) #normalize
min_freq = min(FREQ.itervalues())
logger.debug("dumping model to file cache %s" % cache_file)
try:
tmp_suffix = "."+str(random.random())
with open(cache_file+tmp_suffix,'wb') as temp_cache_file:
marshal.dump((trie,FREQ,total,min_freq),temp_cache_file)
if os.name=='nt':
import shutil
replace_file = shutil.move
else:
replace_file = os.rename
replace_file(cache_file+tmp_suffix,cache_file)
except:
logger.error("dump cache file failed.")
logger.exception("")
initialized = True
logger.debug("loading model cost %s seconds." % (time.time() - t1))
logger.debug("Trie has been built succesfully.")
def require_initialized(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
global initialized
if initialized:
return fn(*args, **kwargs)
else:
initialize(DICTIONARY)
return fn(*args, **kwargs)
return wrapped
def __cut_all(sentence):
dag = get_DAG(sentence)
old_j = -1
for k,L in dag.iteritems():
if len(L)==1 and k>old_j:
yield sentence[k:L[0]+1]
old_j = L[0]
else:
for j in L:
if j>k:
yield sentence[k:j+1]
old_j = j
def calc(sentence,DAG,idx,route):
N = len(sentence)
route[N] = (0.0,'')
for idx in xrange(N-1,-1,-1):
candidates = [ ( FREQ.get(sentence[idx:x+1],min_freq) + route[x+1][0],x ) for x in DAG[idx] ]
route[idx] = max(candidates)
@require_initialized
def get_DAG(sentence):
N = len(sentence)
i,j=0,0
p = trie
DAG = {}
while i<N:
c = sentence[j]
if c in p:
p = p[c]
if '' in p:
if i not in DAG:
DAG[i]=[]
DAG[i].append(j)
j+=1
if j>=N:
i+=1
j=i
p=trie
else:
p = trie
i+=1
j=i
for i in xrange(len(sentence)):
if i not in DAG:
DAG[i] =[i]
return DAG
def __cut_DAG_NO_HMM(sentence):
re_eng = re.compile(ur'[a-zA-Z0-9]',re.U)
DAG = get_DAG(sentence)
route ={}
calc(sentence,DAG,0,route=route)
x = 0
N = len(sentence)
buf = u''
while x<N:
y = route[x][1]+1
l_word = sentence[x:y]
if re_eng.match(l_word) and len(l_word)==1:
buf += l_word
x =y
else:
if len(buf)>0:
yield buf
buf = u''
yield l_word
x =y
if len(buf)>0:
yield buf
buf = u''
def __cut_DAG(sentence):
DAG = get_DAG(sentence)
route ={}
calc(sentence,DAG,0,route=route)
x = 0
buf =u''
N = len(sentence)
while x<N:
y = route[x][1]+1
l_word = sentence[x:y]
if y-x==1:
buf+= l_word
else:
if len(buf)>0:
if len(buf)==1:
yield buf
buf=u''
else:
if (buf not in FREQ):
regognized = finalseg.cut(buf)
for t in regognized:
yield t
else:
for elem in buf:
yield elem
buf=u''
yield l_word
x =y
if len(buf)>0:
if len(buf)==1:
yield buf
else:
if (buf not in FREQ):
regognized = finalseg.cut(buf)
for t in regognized:
yield t
else:
for elem in buf:
yield elem
def cut(sentence,cut_all=False,HMM=True):
if not isinstance(sentence, unicode):
try:
sentence = sentence.decode('utf-8')
except UnicodeDecodeError:
sentence = sentence.decode('gbk','ignore')
re_han, re_skip = re.compile(ur"([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)", re.U), re.compile(ur"(\r\n|\s)", re.U)
if cut_all:
re_han, re_skip = re.compile(ur"([\u4E00-\u9FA5]+)", re.U), re.compile(ur"[^a-zA-Z0-9+#\n]", re.U)
blocks = re_han.split(sentence)
if HMM:
cut_block = __cut_DAG
else:
cut_block = __cut_DAG_NO_HMM
if cut_all:
cut_block = __cut_all
for blk in blocks:
if len(blk)==0:
continue
if re_han.match(blk):
for word in cut_block(blk):
yield word
else:
tmp = re_skip.split(blk)
for x in tmp:
if re_skip.match(x):
yield x
elif not cut_all:
for xx in x:
yield xx
else:
yield x
def cut_for_search(sentence,HMM=True):
words = cut(sentence,HMM=HMM)
for w in words:
if len(w)>2:
for i in xrange(len(w)-1):
gram2 = w[i:i+2]
if gram2 in FREQ:
yield gram2
if len(w)>3:
for i in xrange(len(w)-2):
gram3 = w[i:i+3]
if gram3 in FREQ:
yield gram3
yield w
@require_initialized
def load_userdict(f):
global trie,total,FREQ
if isinstance(f, (str, unicode)):
f = open(f, 'rb')
content = f.read().decode('utf-8')
line_no = 0
for line in content.split("\n"):
line_no+=1
if line.rstrip()=='': continue
tup =line.split(" ")
word,freq = tup[0],tup[1]
if line_no==1:
word = word.replace(u'\ufeff',u"") #remove bom flag if it exists
if len(tup)==3:
add_word(word, freq, tup[2])
else:
add_word(word, freq)
def add_word(word, freq, tag=None):
global FREQ, trie, total, user_word_tag_tab
freq = float(freq)
FREQ[word] = log(freq / total)
if tag is not None:
user_word_tag_tab[word] = tag.strip()
p = trie
for c in word:
if c not in p:
p[c] = {}
p = p[c]
p[''] = '' # ending flag
__ref_cut = cut
__ref_cut_for_search = cut_for_search
def __lcut(sentence):
return list(__ref_cut(sentence,False))
def __lcut_no_hmm(sentence):
return list(__ref_cut(sentence,False,False))
def __lcut_all(sentence):
return list(__ref_cut(sentence,True))
def __lcut_for_search(sentence):
return list(__ref_cut_for_search(sentence))
@require_initialized
def enable_parallel(processnum=None):
global pool,cut,cut_for_search
if os.name=='nt':
raise Exception("jieba: parallel mode only supports posix system")
if sys.version_info[0]==2 and sys.version_info[1]<6:
raise Exception("jieba: the parallel feature needs Python version>2.5 ")
from multiprocessing import Pool,cpu_count
if processnum==None:
processnum = cpu_count()
pool = Pool(processnum)
def pcut(sentence,cut_all=False,HMM=True):
parts = re.compile('([\r\n]+)').split(sentence)
if cut_all:
result = pool.map(__lcut_all,parts)
else:
if HMM:
result = pool.map(__lcut,parts)
else:
result = pool.map(__lcut_no_hmm,parts)
for r in result:
for w in r:
yield w
def pcut_for_search(sentence):
parts = re.compile('([\r\n]+)').split(sentence)
result = pool.map(__lcut_for_search,parts)
for r in result:
for w in r:
yield w
cut = pcut
cut_for_search = pcut_for_search
def disable_parallel():
global pool,cut,cut_for_search
if 'pool' in globals():
pool.close()
pool = None
cut = __ref_cut
cut_for_search = __ref_cut_for_search
def set_dictionary(dictionary_path):
global initialized, DICTIONARY
with DICT_LOCK:
abs_path = os.path.normpath( os.path.join( os.getcwd(), dictionary_path ) )
if not os.path.exists(abs_path):
raise Exception("jieba: path does not exist:" + abs_path)
DICTIONARY = abs_path
initialized = False
def get_abs_path_dict():
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
abs_path = os.path.join(_curpath,DICTIONARY)
return abs_path
def tokenize(unicode_sentence,mode="default",HMM=True):
#mode ("default" or "search")
if not isinstance(unicode_sentence, unicode):
raise Exception("jieba: the input parameter should unicode.")
start = 0
if mode=='default':
for w in cut(unicode_sentence,HMM=HMM):
width = len(w)
yield (w,start,start+width)
start+=width
else:
for w in cut(unicode_sentence,HMM=HMM):
width = len(w)
if len(w)>2:
for i in xrange(len(w)-1):
gram2 = w[i:i+2]
if gram2 in FREQ:
yield (gram2,start+i,start+i+2)
if len(w)>3:
for i in xrange(len(w)-2):
gram3 = w[i:i+3]
if gram3 in FREQ:
yield (gram3,start+i,start+i+3)
yield (w,start,start+width)
start+=width
|
#!/usr/bin/env python
# thermald reports on cpu temp, cou usage, memory usage
# zmq message used = thermal
# it also:
# controls the fan of the EON
# turns charging on and off
# checks if we still have a good location
# checks if the selfdrive can be started based on :
# a health message is available (generated by boardd)
# car is started
# voltage > 13.5
# training is completed (param TrainingVersion = 0.1.0)
# terms have been accepted (param HasAcceptedTerms)
import os
import zmq
import psutil
from smbus2 import SMBus
from cereal import log
from selfdrive.version import training_version
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.services import service_list
from selfdrive.loggerd.config import ROOT
from common.params import Params
from common.realtime import sec_since_boot
from common.numpy_fast import clip
from common.filter_simple import FirstOrderFilter
ThermalStatus = log.ThermalData.ThermalStatus
CURRENT_TAU = 2. # 2s time constant
def read_tz(x):
# CPU temperature is stored in /sys/devices/virtual/thermal/thermal_zonex/temp
# multiple CPU system have multiple directories
# Rpi uses thermal_zone0
try:
with open("/sys/devices/virtual/thermal/thermal_zone%d/temp" % x) as f:
ret = max(0, int(f.read()))
f.close()
except FileNotFoundError:
ret = 0
return ret
def read_thermal():
dat = messaging.new_message()
dat.init('thermal')
dat.thermal.cpu0 = read_tz(5)
dat.thermal.cpu1 = read_tz(7)
dat.thermal.cpu2 = read_tz(10)
dat.thermal.cpu3 = read_tz(12)
dat.thermal.mem = read_tz(2)
dat.thermal.gpu = read_tz(16)
dat.thermal.bat = read_tz(29)
dat.thermal.cpu0Percent = psutil.cpu_percent()
# for raspberry and single cpu hardware
if dat.thermal.cpu0 == 0:
dat.thermal.cpu0 = read_tz(0)
return dat
LEON = False
def setup_eon_fan():
global LEON, I2C
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
# open I2C bus to write some data on bus 7, address 0x21, offset 0x010, data=0xf
try:
bus = SMBus(7, force=True)
I2C = True
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print ("LEON detected")
#os.system("echo 1 > /sys/devices/soc/6a00000.ssusb/power_supply/usb/usb_otg")
LEON = True
bus.close()
except FileNotFoundError:
print ("Warning: I2C bus not accessible")
I2C = False
last_eon_fan_val = None
def set_eon_fan(val):
global LEON, last_eon_fan_val, I2C
if (last_eon_fan_val is None or last_eon_fan_val != val) and I2C:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val-1)<<6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THERSHOLD = 45.
def handle_fan(max_cpu_temp, bat_temp, fan_speed):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THERSHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed/16384)
return fan_speed
def check_car_battery_voltage(should_start, health, charging_disabled):
# charging disallowed if:
# - there are health packets from panda, and;
# - 12V battery voltage is too low, and;
# - onroad isn't started
if charging_disabled and (health is None or health.health.voltage > 11800):
charging_disabled = False
os.system('echo "1" > /sys/class/power_supply/battery/charging_enabled')
elif not charging_disabled and health is not None and health.health.voltage < 11500 and not should_start:
charging_disabled = True
os.system('echo "0" > /sys/class/power_supply/battery/charging_enabled')
return charging_disabled
class LocationStarter(object):
def __init__(self):
self.last_good_loc = 0
def update(self, started_ts, location):
rt = sec_since_boot()
if location is None or location.accuracy > 50 or location.speed < 2:
# bad location, stop if we havent gotten a location in a while
# dont stop if we're been going for less than a minute
if started_ts:
if rt-self.last_good_loc > 60. and rt-started_ts > 60:
cloudlog.event("location_stop",
ts=rt,
started_ts=started_ts,
last_good_loc=self.last_good_loc,
location=location.to_dict() if location else None)
return False
else:
return True
else:
return False
self.last_good_loc = rt
if started_ts:
return True
else:
cloudlog.event("location_start", location=location.to_dict() if location else None)
return location.speed*3.6 > 10
def thermald_thread():
setup_eon_fan()
# prevent LEECO from undervoltage
BATT_PERC_OFF = 10 if LEON else 3
# now loop
context = zmq.Context()
thermal_sock = messaging.pub_sock(context, service_list['thermal'].port)
health_sock = messaging.sub_sock(context, service_list['health'].port)
location_sock = messaging.sub_sock(context, service_list['gpsLocation'].port)
fan_speed = 0
count = 0
off_ts = None
started_ts = None # start time in seconds
ignition_seen = False
started_seen = False
passive_starter = LocationStarter()
thermal_status = ThermalStatus.green
health_sock.RCVTIMEO = 1500
current_filter = FirstOrderFilter(0., CURRENT_TAU, 1.)
# Make sure charging is enabled
charging_disabled = False
os.system('echo "1" > /sys/class/power_supply/battery/charging_enabled')
params = Params()
while 1:
health = messaging.recv_sock(health_sock, wait=True)
location = messaging.recv_sock(location_sock)
location = location.gpsLocation if location else None
msg = read_thermal()
# loggerd is gated based on free space
# ROOT is set in loggerd/config.py
statvfs = os.statvfs(ROOT)
avail = (statvfs.f_bavail * 1.0)/statvfs.f_blocks
# thermal message now also includes free space
msg.thermal.freeSpace = avail
# battery directory is not available on a RPi
if os.path.exists("/sys/class/power_supply/battery"):
with open("/sys/class/power_supply/battery/capacity") as f:
msg.thermal.batteryPercent = int(f.read())
with open("/sys/class/power_supply/battery/status") as f:
msg.thermal.batteryStatus = f.read().strip()
with open("/sys/class/power_supply/battery/current_now") as f:
msg.thermal.batteryCurrent = int(f.read())
with open("/sys/class/power_supply/battery/voltage_now") as f:
msg.thermal.batteryVoltage = int(f.read())
with open("/sys/class/power_supply/usb/present") as f:
msg.thermal.usbOnline = bool(int(f.read()))
else:
msg.thermal.batteryPercent = 100
msg.thermal.batteryStatus = ""
msg.thermal.batteryCurrent = 0
msg.thermal.batteryVoltage = 0
msg.thermal.usbOnline = False
current_filter.update(msg.thermal.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
# check if cpu temp is not in milligrades
if max_cpu_temp > 1000:
max_cpu_temp = max_cpu_temp / 100.
max_comp_temp = max(max_cpu_temp, msg.thermal.mem / 10., msg.thermal.gpu / 10.)
bat_temp = msg.thermal.bat/1000.
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed)
msg.thermal.fanSpeed = fan_speed
# thermal logic with hysterisis
if max_cpu_temp > 107. or bat_temp >= 63.:
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 95. or bat_temp > 60.:
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 90.0:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 85.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
# all good
thermal_status = ThermalStatus.green
# **** starting logic ****
# a health message with started=true is required
# or a voltage > 13.5 (meaning the engine is running)
# and 2% free disk space
# start constellation of processes when the car starts
ignition = health is not None and health.health.started
ignition_seen = ignition_seen or ignition
# add voltage check for ignition
if not ignition_seen and health is not None and health.health.voltage > 13500:
ignition = True
do_uninstall = params.get("DoUninstall") == "1"
accepted_terms = params.get("HasAcceptedTerms").decode() == "1"
completed_training = params.get("CompletedTrainingVersion").decode() == training_version
should_start = ignition
# have we seen a panda?
passive = (params.get("Passive").decode() == "1")
# start on gps movement if we haven't seen ignition and are in passive mode
should_start = should_start or (not (ignition_seen and health) # seen ignition and panda is connected
and passive
and passive_starter.update(started_ts, location))
# with 2% left, we killall, otherwise the phone will take a long time to boot
should_start = should_start and msg.thermal.freeSpace > 0.02
# require usb power in passive mode
should_start = should_start and (not passive or msg.thermal.usbOnline)
# confirm we have completed training and aren't uninstalling
should_start = should_start and accepted_terms and (passive or completed_training) and (not do_uninstall)
print ("should start: ")
print (should_start, accepted_terms, passive)
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
if thermal_status >= ThermalStatus.danger:
# TODO: Add a better warning when this is happening
should_start = False
print ("Thermal status/ temperature too high" + str(max_cpu_temp))
if should_start:
off_ts = None
if started_ts is None:
params.car_start()
started_ts = sec_since_boot()
started_seen = True
else:
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# shutdown if the battery gets lower than 3%, it's discharging, we aren't running for
# more than a minute but we were running
if msg.thermal.batteryPercent < BATT_PERC_OFF and msg.thermal.batteryStatus == "Discharging" and \
started_seen and (sec_since_boot() - off_ts) > 60:
os.system('LD_LIBRARY_PATH="" svc power shutdown')
charging_disabled = check_car_battery_voltage(should_start, health, charging_disabled)
msg.thermal.chargingDisabled = charging_disabled
msg.thermal.chargingError = current_filter.x > 1.0 # if current is > 1A out, then charger might be off
msg.thermal.started = started_ts is not None
msg.thermal.startedTs = int(1e9*(started_ts or 0))
msg.thermal.thermalStatus = thermal_status
thermal_sock.send(msg.to_bytes())
#print (msg)
# report to server once per minute
if (count%60) == 0:
cloudlog.event("STATUS_PACKET",
count=count,
health=(health.to_dict() if health else None),
location=(location.to_dict() if location else None),
thermal=msg.to_dict())
count += 1
def main(gctx=None):
thermald_thread()
if __name__ == "__main__":
main()
|
import math
import numpy as np
import logging
import cv2
import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
class Logger(object):
def __init__(self, log_file_name, logger_name, log_level=logging.DEBUG):
### create a logger
self.__logger = logging.getLogger(logger_name)
### set the log level
self.__logger.setLevel(log_level)
### create a handler to write log file
file_handler = logging.FileHandler(log_file_name)
### create a handler to print on console
console_handler = logging.StreamHandler()
### define the output format of handlers
formatter = logging.Formatter('[%(asctime)s] - [%(filename)s file line:%(lineno)d] - %(levelname)s: %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
### add handler to logger
self.__logger.addHandler(file_handler)
self.__logger.addHandler(console_handler)
def get_log(self):
return self.__logger
def mkExpDir(args):
if (os.path.exists(args.save_dir)):
if (not args.reset):
raise SystemExit('Error: save_dir "' + args.save_dir + '" already exists! Please set --reset True to delete the folder.')
else:
shutil.rmtree(args.save_dir)
os.makedirs(args.save_dir)
# os.makedirs(os.path.join(args.save_dir, 'img'))
if ((not args.eval) and (not args.test)):
os.makedirs(os.path.join(args.save_dir, 'model'))
if ((args.eval and args.eval_save_results) or args.test):
os.makedirs(os.path.join(args.save_dir, 'save_results'))
args_file = open(os.path.join(args.save_dir, 'args.txt'), 'w')
for k, v in vars(args).items():
args_file.write(k.rjust(30,' ') + '\t' + str(v) + '\n')
_logger = Logger(log_file_name=os.path.join(args.save_dir, args.log_file_name),
logger_name=args.logger_name).get_log()
return _logger
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
# self.requires_grad = False
self.weight.requires_grad = False
self.bias.requires_grad = False
def calc_psnr(img1, img2):
### args:
# img1: [h, w, c], range [0, 255]
# img2: [h, w, c], range [0, 255]
diff = (img1 - img2) / 255.0
diff[:,:,0] = diff[:,:,0] * 65.738 / 256.0
diff[:,:,1] = diff[:,:,1] * 129.057 / 256.0
diff[:,:,2] = diff[:,:,2] * 25.064 / 256.0
diff = np.sum(diff, axis=2)
mse = np.mean(np.power(diff, 2))
return -10 * math.log10(mse)
def calc_ssim(img1, img2):
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
### args:
# img1: [h, w, c], range [0, 255]
# img2: [h, w, c], range [0, 255]
# the same outputs as MATLAB's
border = 0
img1_y = np.dot(img1, [65.738,129.057,25.064])/256.0+16.0
img2_y = np.dot(img2, [65.738,129.057,25.064])/256.0+16.0
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1_y = img1_y[border:h-border, border:w-border]
img2_y = img2_y[border:h-border, border:w-border]
if img1_y.ndim == 2:
return ssim(img1_y, img2_y)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
def calc_psnr_and_ssim(sr, hr):
### args:
# sr: pytorch tensor, range [-1, 1]
# hr: pytorch tensor, range [-1, 1]
### prepare data
sr = (sr+1.) * 127.5
hr = (hr+1.) * 127.5
if (sr.size() != hr.size()):
h_min = min(sr.size(2), hr.size(2))
w_min = min(sr.size(3), hr.size(3))
sr = sr[:, :, :h_min, :w_min]
hr = hr[:, :, :h_min, :w_min]
img1 = np.transpose(sr.squeeze().round().cpu().numpy(), (1,2,0))
img2 = np.transpose(hr.squeeze().round().cpu().numpy(), (1,2,0))
psnr = calc_psnr(img1, img2)
ssim = calc_ssim(img1, img2)
return psnr, ssim |
from __future__ import print_function, absolute_import
import numpy as np
from six import iterkeys
from h5Nastran.defaults import Defaults
from h5Nastran.h5nastrannode import H5NastranNode
from .input_table import InputTable, TableDef
class Property(H5NastranNode):
def __init__(self, h5n, input):
self._h5n = h5n
self._input = input
self.mfluid = MFLUID(self._h5n, self)
self.nsm = NSM(self._h5n, self)
self.nsm1 = NSM1(self._h5n, self)
self.nsmadd = NSMADD(self._h5n, self)
self.nsml = NSML(self._h5n, self)
self.nsml1 = NSML1(self._h5n, self)
self.paabsf = PAABSF(self._h5n, self)
self.pacabs = PACABS(self._h5n, self)
self.pacbar = PACBAR(self._h5n, self)
self.pacinf = PACINF(self._h5n, self)
self.paero1 = PAERO1(self._h5n, self)
self.paero2 = PAERO2(self._h5n, self)
self.paero3 = PAERO3(self._h5n, self)
self.paero4 = PAERO4(self._h5n, self)
self.paero5 = PAERO5(self._h5n, self)
self.paxisym = PAXISYM(self._h5n, self)
self.paxsymh = PAXSYMH(self._h5n, self)
self.pbar = PBAR(self._h5n, self)
self.pbarl = PBARL(self._h5n, self)
self.pbarn1 = PBARN1(self._h5n, self)
self.pbcomp = PBCOMP(self._h5n, self)
self.pbeam = PBEAM(self._h5n, self)
self.pbeam3 = PBEAM3(self._h5n, self)
self.pbeaml = PBEAML(self._h5n, self)
self.pbemn1 = PBEMN1(self._h5n, self)
self.pbend = PBEND(self._h5n, self)
# self.pbmsect = PBMSECT(self._h5n, self)
# self.pbrsect = PBRSECT(self._h5n, self)
self.pbush = PBUSH(self._h5n, self)
self.pbush1d = PBUSH1D(self._h5n, self)
# self.pbush2d = PBUSH2D(self._h5n, self)
self.pbusht = PBUSHT(self._h5n, self)
self.pcohe = PCOHE(self._h5n, self)
self.pcomp = PCOMP(self._h5n, self)
self.pcompf = PCOMPF(self._h5n, self)
self.pcompg = PCOMPG(self._h5n, self)
self.pcompls = PCOMPLS(self._h5n, self)
self.pconeax = PCONEAX(self._h5n, self)
self.pconv = PCONV(self._h5n, self)
self.pconv1 = PCONV1(self._h5n, self)
self.pconvm = PCONVM(self._h5n, self)
self.pdamp = PDAMP(self._h5n, self)
self.pdamp5 = PDAMP5(self._h5n, self)
self.pdampt = PDAMPT(self._h5n, self)
self.pelas = PELAS(self._h5n, self)
self.pelast = PELAST(self._h5n, self)
self.pfast = PFAST(self._h5n, self)
self.pgap = PGAP(self._h5n, self)
self.phbdy = PHBDY(self._h5n, self)
self.plcomp = PLCOMP(self._h5n, self)
self.plplane = PLPLANE(self._h5n, self)
self.plsolid = PLSOLID(self._h5n, self)
self.pmass = PMASS(self._h5n, self)
self.prod = PROD(self._h5n, self)
self.prodn1 = PRODN1(self._h5n, self)
self.pseam = PSEAM(self._h5n, self)
self.pshear = PSHEAR(self._h5n, self)
self.pshearn = PSHEARN(self._h5n, self)
self.pshell = PSHELL(self._h5n, self)
self.pshln1 = PSHLN1(self._h5n, self)
self.pshln2 = PSHLN2(self._h5n, self)
self.psldn1 = PSLDN1(self._h5n, self)
self.psolid = PSOLID(self._h5n, self)
self.ptube = PTUBE(self._h5n, self)
self.pvisc = PVISC(self._h5n, self)
self.pweld = PWELD(self._h5n, self)
self.snorm = SNORM(self._h5n, self)
# self.vcct = VCCT(self._h5n, self)
# self.viewex = VIEWEX(self._h5n, self)
def path(self):
return self._input.path() + ['PROPERTY']
########################################################################################################################
class MFLUID(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/MFLUID')
########################################################################################################################
class NSM(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/NSM/IDENTITY')
########################################################################################################################
class NSM1(InputTable):
"""
<group name="NSM1">
<dataset name="IDENTITY">
<field name="SID" type="integer"/>
<field name="TYPE" type="character" size="8"/>
<field name="PROP" type="character" size="8" description="Name of nonstructural mass entry: NSM1 or NSML1"/>
<field name="VALUE" type="double"/>
<field name="ALL" type="integer"/>
<field name="LIST_POS" type="integer"/>
<field name="LIST_LEN" type="integer"/>
<field name="THRU_POS" type="integer"/>
<field name="THRU_LEN" type="integer"/>
<field name="THRUBY_POS" type="integer"/>
<field name="THRUBY_LEN" type="integer"/>
<field name="DOMAIN_ID" type="integer"/>
</dataset>
<dataset name="IDLIST">
<field name="ID" type="integer"/>
</dataset>
<dataset name="THRU">
<field name="ID1" type="integer"/>
<field name="ID2" type="integer"/>
</dataset>
<dataset name="THRU_BY">
<field name="ID1" type="integer"/>
<field name="ID2" type="integer"/>
<field name="N" type="integer"/>
</dataset>
</group>
"""
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/NSM1/IDENTITY',
rename={'IDLIST_POS': 'LIST_POS', 'IDLIST_LEN': 'LIST_LEN', 'THRU_BY_POS': 'THRUBY_POS',
'THRU_BY_LEN': 'THRUBY_LEN'})
########################################################################################################################
class NSMADD(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/NSMADD/IDENTITY')
########################################################################################################################
class NSML(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/NSML/IDENTITY')
########################################################################################################################
class NSML1(InputTable):
"""
<group name="NSML1">
<dataset name="IDENTITY">
<field name="SID" type="integer"/>
<field name="TYPE" type="character" size="8"/>
<field name="VALUE" type="double"/>
<field name="ALL" type="integer"/>
<field name="LIST_POS" type="integer"/>
<field name="LIST_LEN" type="integer"/>
<field name="THRU_POS" type="integer"/>
<field name="THRU_LEN" type="integer"/>
<field name="THRUBY_POS" type="integer"/>
<field name="THRUBY_LEN" type="integer"/>
<field name="DOMAIN_ID" type="integer"/>
</dataset>
<dataset name="IDLIST">
<field name="ID" type="integer"/>
</dataset>
<dataset name="THRU">
<field name="ID1" type="integer"/>
<field name="ID2" type="integer"/>
</dataset>
<dataset name="THRU_BY">
<field name="ID1" type="integer"/>
<field name="ID2" type="integer"/>
<field name="N" type="integer"/>
</dataset>
</group>
"""
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/NSML1/IDENTITY',
rename={'IDLIST_POS': 'LIST_POS', 'IDLIST_LEN': 'LIST_LEN', 'THRU_BY_POS': 'THRUBY_POS',
'THRU_BY_LEN': 'THRUBY_LEN'}
)
########################################################################################################################
class PAABSF(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAABSF')
########################################################################################################################
class PACABS(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PACABS')
########################################################################################################################
class PACBAR(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PACBAR')
########################################################################################################################
class PACINF(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PACINF')
########################################################################################################################
class PAERO1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAERO1')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
b1 = data['B1']
b2 = data['B2']
b3 = data['B3']
b4 = data['B4']
b5 = data['B5']
b6 = data['B6']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
bi = list(card.Bi)
diff_len = 6 - len(bi)
if diff_len > 0:
bi += [None] * diff_len
bi = [_ if _ is not None else Defaults.default_int for _ in bi]
b1[i], b2[i], b3[i], b4[i], b5[i], b6[i] = bi
return {'IDENTITY': data}
########################################################################################################################
class PAERO2(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAERO2')
########################################################################################################################
class PAERO3(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAERO3')
########################################################################################################################
class PAERO4(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAERO4/IDENTITY')
########################################################################################################################
class PAERO5(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAERO5/IDENTITY')
########################################################################################################################
class PAXISYM(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAXISYM')
########################################################################################################################
class PAXSYMH(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PAXSYMH')
########################################################################################################################
class PBAR(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBAR')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid = data['MID']
a = data['A']
i1 = data['I1']
i2 = data['I2']
j = data['J']
nsm = data['NSM']
data['FE'] = Defaults.default_double # blank field
c1 = data['C1']
c2 = data['C2']
d1 = data['D1']
d2 = data['D2']
e1 = data['E1']
e2 = data['E2']
f1 = data['F1']
f2 = data['F2']
k1 = data['K1']
k2 = data['K2']
i12 = data['I12']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid[i] = card.mid
a[i] = card.A
i1[i] = card.i1
i2[i] = card.i2
j[i] = card.j
nsm[i] = card.nsm
c1[i] = card.c1
c2[i] = card.c2
d1[i] = card.d1
d2[i] = card.d2
e1[i] = card.e1
e2[i] = card.e2
f1[i] = card.f1
f2[i] = card.f2
k1[i] = card.k1
k2[i] = card.k2
i12[i] = card.i12
result = {'IDENTITY': data}
return result
########################################################################################################################
# PBARL msc spec is missing NSM for some reason
class PBARL_INFO_SPEC(object):
name = 'INFO'
path = '/NASTRAN/INPUT/PROPERTY/PBARL'
dtype = [('VALUE', '<f8', (),)]
is_subtable = True
same_as = None
subtables = []
class PBARL_SPEC(object):
name = 'IDENTITY'
path = '/NASTRAN/INPUT/PROPERTY/PBARL'
dtype = [('PID', '<i8', ()), ('MID', '<i8', ()), ('GROUP', 'S8', ()), ('TYPE', 'S8', ()), ('NSM', '<f8', ()),
('INFO_POS', '<i8', ()), ('INFO_LEN', '<i8', ()), ('DOMAIN_ID', '<i8', ())]
is_subtable = False
same_as = 'None'
subtables = [PBARL_INFO_SPEC]
class PBARL(InputTable):
table_def = TableDef.create(PBARL_SPEC)
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
info = {'IDENTITY': {'VALUE': []}}
result = {'IDENTITY': {'PID': [], 'MID': [], 'GROUP': [], 'TYPE': [], 'NSM': [], 'INFO_POS': [],
'INFO_LEN': [], 'DOMAIN_ID': []},
'INFO': info,
'_subtables': ['INFO']}
identity = result['IDENTITY']
value = info['IDENTITY']['VALUE']
pid = identity['PID']
mid = identity['MID']
group = identity['GROUP']
type_ = identity['TYPE']
nsm = identity['NSM']
info_pos = identity['INFO_POS']
info_len = identity['INFO_LEN']
_pos = 0
for card_id in card_ids:
card = cards[card_id]
pid.append(card.pid)
mid.append(card.mid)
group.append(card.group)
type_.append(card.beam_type)
nsm.append(card.nsm)
info_pos.append(_pos)
_info_len = len(card.dim)
info_len.append(_info_len)
_pos += _info_len
value += list(card.dim)
return result
########################################################################################################################
class PBARN1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBARN1')
########################################################################################################################
class PBCOMP(InputTable):
"""
<group name="PBCOMP">
<dataset name="IDENTITY">
<field name="PID" type="integer"/>
<field name="MID" type="integer"/>
<field name="A" type="double"/>
<field name="I1" type="double"/>
<field name="I2" type="double"/>
<field name="I12" type="double"/>
<field name="J" type="double"/>
<field name="NSM" type="double"/>
<field name="K1" type="double"/>
<field name="K2" type="double"/>
<field name="M1" type="double"/>
<field name="M2" type="double"/>
<field name="N1" type="double"/>
<field name="N2" type="double"/>
<field name="NSECT" type="integer"/>
<field name="POS" type="integer"/>
<field name="LEN" type="integer"/>
<field name="DOMAIN_ID" type="integer"/>
</dataset>
<dataset name="SECTION">
<field name="Y" type="double"/>
<field name="Z" type="double"/>
<field name="C" type="double"/>
<field name="MID" type="integer"/>
</dataset>
</group>
"""
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBCOMP/IDENTITY',
rename={'SECTION_POS': 'POS', 'SECTION_LEN': 'LEN'})
########################################################################################################################
def _resize(arr, size):
arr = list(arr)
first = arr[0]
last = arr[-1]
del arr[0]
try:
del arr[-1]
except IndexError:
pass
size -= 2
arr_len = len(arr)
diff_len = size - arr_len
if diff_len == 0:
return arr
elif diff_len < 0:
raise Exception
mid_arr = arr + [None] * diff_len
return [first] + mid_arr + [last]
class PBEAM(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBEAM')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid = data['MID']
nsegs = data['NSEGS']
data['CCF'][:] = Defaults.unknown_int
data['CWELD'][:] = Defaults.unknown_int
######################
so = data['SO']
xxb = data['XXB']
a = data['A']
i1 = data['I1']
i2 = data['I2']
i12 = data['I12']
j = data['J']
nsm = data['NSM']
c1 = data['C1']
c2 = data['C2']
d1 = data['D1']
d2 = data['D2']
e1 = data['E1']
e2 = data['E2']
f1 = data['F1']
f2 = data['F2']
######################
k1 = data['K1']
k2 = data['K2']
s1 = data['S1']
s2 = data['S2']
nsia = data['NSIA']
nsib = data['NSIB']
cwa = data['CWA']
cwb = data['CWB']
m1a = data['M1A']
m2a = data['M2A']
m1b = data['M1B']
m2b = data['M2B']
n1a = data['N1A']
n2a = data['N2A']
n1b = data['N1B']
n2b = data['N2B']
# TODO: PBEAM - verify so is correct
_so = {
'': Defaults.default_double,
None: Defaults.default_double,
'NO': 0.,
'YES': 1.,
'YESA': 2.
}
# TODO: The first and last stations (xxb = 0.0, 1.0 are in slots 0 and 10).
# Intermediate slots are 0.0 if they are not defined and nonzero
# if the data is meaningful. The data coming from the PBEAM/PBEAML
# is sorted, but only uses as many fields as is necessary to fully
# define the card.
#
# TODO: PBEAM: verify that above comment has been implemented correctly regarding resizing of data
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid[i] = card.mid
nsegs[i] = len(card.so)
so[i] = _resize([_so[_] for _ in card.so], 11)
xxb[i] = _resize(card.xxb, 11)
a[i] = _resize(card.A, 11)
i1[i] = _resize(card.i1, 11)
i2[i] = _resize(card.i2, 11)
i12[i] = _resize(card.i12, 11)
j[i] = _resize(card.j, 11)
nsm[i] = _resize(card.nsm, 11)
c1[i] = _resize(card.c1, 11)
c2[i] = _resize(card.c2, 11)
d1[i] = _resize(card.d1, 11)
d2[i] = _resize(card.d2, 11)
e1[i] = _resize(card.e1, 11)
e2[i] = _resize(card.e2, 11)
f1[i] = _resize(card.f1, 11)
f2[i] = _resize(card.f2, 11)
k1[i] = card.k1
k2[i] = card.k2
s1[i] = card.s1
s2[i] = card.s2
nsia[i] = card.nsia
nsib[i] = card.nsib
cwa[i] = card.cwa
cwb[i] = card.cwb
m1a[i] = card.m1a
m2a[i] = card.m2a
m1b[i] = card.m1b
m2b[i] = card.m2b
n1a[i] = card.n1a
n2a[i] = card.n2a
n1b[i] = card.n1b
n2b[i] = card.n2b
result = {'IDENTITY': data}
return result
########################################################################################################################
class PBEAM3(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBEAM3')
########################################################################################################################
class PBEAML(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBEAML/IDENTITY',
subtables=[
TableDef.create('/NASTRAN/INPUT/PROPERTY/PBEAML/SECTION',
subtables=[
TableDef.create('/NASTRAN/INPUT/PROPERTY/PBEAML/DIMS')
]
)
]
)
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
dims = {'IDENTITY': {'DIM': []}}
section = {'IDENTITY': {'SO': [], 'RDIST': [], 'DIMS_POS': [], 'DIMS_LEN': [], 'NSM': []},
'DIMS': dims,
'_subtables': ['DIMS']}
result = {'IDENTITY': {'PID': [], 'MID': [], 'GROUP': [], 'TYPE': [],
'SECTION_POS': [], 'SECTION_LEN': [], 'DOMAIN_ID': []},
'SECTION': section,
'_subtables': ['SECTION']}
section = section['IDENTITY']
identity = result['IDENTITY']
dim = dims['IDENTITY']['DIM']
so = section['SO']
rdist = section['RDIST']
dims_pos = section['DIMS_POS']
dims_len = section['DIMS_LEN']
nsm = section['NSM']
pid = identity['PID']
mid = identity['MID']
group = identity['GROUP']
type_ = identity['TYPE']
section_pos = identity['SECTION_POS']
section_len = identity['SECTION_LEN']
# TODO: PBEAML - verify so is correct
_so = {
'': Defaults.default_double,
None: Defaults.default_double,
'NO': 0.,
'YES': 1.,
'YESA': 2.
}
_section_pos = 0
_dims_pos = 0
for card_id in card_ids:
card = cards[card_id]
pid.append(card.pid)
mid.append(card.mid)
group.append(card.group)
type_.append(card.beam_type)
_section_len = len(card.so)
section_pos.append(_section_pos)
section_len.append(_section_len)
so += [_so[_s] for _s in card.so]
rdist += list(card.xxb)
nsm += list(card.nsm)
for _dim in card.dim:
_dim_len = len(_dim)
dims_pos.append(_dims_pos)
_dims_pos += _dim_len
dims_len.append(_dim_len)
dim += list(_dim)
return result
########################################################################################################################
class PBEMN1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBEMN1')
########################################################################################################################
class PBEND(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBEND')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid = data['MID']
a = data['A']
i1 = data['I1']
i2 = data['I2']
j = data['J']
fsi = data['FSI']
rm = data['RM']
t = data['T']
p = data['P']
rb = data['RB']
thetab = data['THETAB']
c1 = data['C1']
c2 = data['C2']
d1 = data['D1']
d2 = data['D2']
e1 = data['E1']
e2 = data['E2']
f1 = data['F1']
f2 = data['F2']
k1 = data['K1']
k2 = data['K2']
nsm = data['NSM']
rc = data['RC']
zc = data['ZC']
deltan = data['DELTAN']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid[i] = card.mid
a[i] = card.A
i1[i] = card.i1
i2[i] = card.i2
j[i] = card.j
fsi[i] = card.fsi
rm[i] = card.rm
t[i] = card.t
p[i] = card.p
rb[i] = card.rb
thetab[i] = card.theta_b
c1[i] = card.c1
c2[i] = card.c2
d1[i] = card.d1
d2[i] = card.d2
e1[i] = card.e1
e2[i] = card.e2
f1[i] = card.f1
f2[i] = card.f2
k1[i] = card.k1
k2[i] = card.k2
nsm[i] = card.nsm
rc[i] = card.rc
zc[i] = card.zc
deltan[i] = card.delta_n
result = {'IDENTITY': data}
return result
########################################################################################################################
# TODO: PBMSECT is complex
# class PBMSECT(CardTable):
# table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBMSECT/IDENTITY',
# subtables=[
# TableDef.create('/NASTRAN/INPUT/PROPERTY/PBMSECT/SECTION',
# subtables=[
# TableDef.create('/NASTRAN/INPUT/PROPERTY/PBMSECT/BRP')
# ])
# ])
########################################################################################################################
# TODO: PBRSECT is complex
# class PBRSECT(CardTable):
# table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBRSECT/IDENTITY')
########################################################################################################################
class PBUSH(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBUSH')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
k = data['K']
b = data['B']
ge = data['GE']
sa = data['SA']
st = data['ST']
ea = data['EA']
et = data['ET']
m = data['M']
def _get_value(obj, attr, default):
try:
return getattr(obj, attr)
except AttributeError:
return default
def _get_list(obj, attr, default):
lst = list(getattr(obj, attr))
if len(lst) == 0:
return [default] * 6
return lst
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
k[i] = _get_list(card, 'Ki', Defaults.default_double)
b[i] = _get_list(card, 'Bi', Defaults.default_double)
ge[i] = _get_list(card, 'GEi', Defaults.default_double)
sa[i] = _get_value(card, 'sa', Defaults.default_double)
st[i] = _get_value(card, 'st', Defaults.default_double)
ea[i] = _get_value(card, 'ea', Defaults.default_double)
et[i] = _get_value(card, 'et', Defaults.default_double)
m[i] = _get_value(card, 'm', Defaults.default_double)
result = {'IDENTITY': data}
return result
########################################################################################################################
# TODO: PBUSH1D verify correctness
class PBUSH1D(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBUSH1D')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
k = data['K']
c = data['C']
m = data['M']
alpha = data['ALPHA']
sa = data['SA']
ea = data['EA']
typea = data['TYPEA']
cvt = data['CVT']
cvc = data['CVC']
expvt = data['EXPVT']
expvc = data['EXPVC']
idtsu = data['IDTSU']
idtcu = data['IDTCU']
idtsud = data['IDTSUD']
idcsud = data['IDCSUD']
types = data['TYPES']
idts = data['IDTS']
idcs = data['IDCS']
idtdu1 = data['IDTDU1']
idcdu1 = data['IDCDU1']
typed = data['TYPED']
idtd1 = data['IDTD1']
idtd2 = data['IDTD2']
idtdv1 = data['IDTDV1']
idcdv1 = data['IDCDV1']
typeg = data['TYPEG']
idtg = data['IDTG']
idcg = data['IDCG']
idtdu2 = data['IDTDU2']
idcdu2 = data['IDCDU2']
idtdv2 = data['IDTDV2']
idcdv2 = data['IDCDV2']
typef = data['TYPEF']
idtf = data['IDTF']
idcf = data['IDCF']
ut = data['UT']
uc = data['UC']
default_double = Defaults.default_double
default_int = Defaults.default_int
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
k[i] = card.k
c[i] = card.c
m[i] = card.m
alpha[i] = default_double
sa[i] = card.sa
ea[i] = card.se
shock_type = card.__dict__.get('shock_type', None)
if shock_type is None:
typea[i] = 0
cvt[i] = default_double
cvc[i] = default_double
expvt[i] = default_double
expvc[i] = default_double
idtsu[i] = default_int
idtcu[i] = default_int
idtsud[i] = default_int
idcsud[i] = default_int
else:
if shock_type == 'TABLE':
shock_type = 1
elif shock_type == 'EQUAT':
shock_type = 2
assert shock_type in (1, 2)
typea[i] = shock_type
cvt[i] = card.shock_cvt
cvc[i] = card.shock_cvc
expvt[i] = card.shock_exp_vt
expvc[i] = card.shock_exp_vc
if shock_type == 1:
idtsu[i] = card.shock_idts
idtcu[i] = default_int
idtsud[i] = default_int
idcsud[i] = default_int
else:
itdsu[i] = card.idets
idtcu[i] = card.idecs
idtsud[i] = card.idetsd
idcsud[i] = card.idecsd
spring_type = card.__dict__.get('spring_type', None)
if spring_type is None:
types[i] = 0
idts[i] = default_int
idcs[i] = default_int
idtdu1[i] = default_int
idcdu1[i] = default_int
else:
if spring_type == 'TABLE':
spring_type = 1
elif spring_type == 'EQUAT':
spring_type = 2
assert spring_type in (1, 2)
types[i] = spring_type
idts[i] = card.spring_idt
idcs[i] = card.spring_idc
idtdu1[i] = card.spring_idtdu
idcdu1[i] = card.spring_idcdu
damper_type = card.__dict__.get('damper_type', None)
if damper_type is None:
typed[i] = default_int
idtd1[i] = default_int
idtd2[i] = default_int
idtdv1[i] = default_int
idcdv1[i] = default_int
else:
if damper_type == 'TABLE':
damper_type = 1
elif damper_type == 'EQUAT':
damper_type = 2
assert damper_type in (1, 2)
typed[i] = damper_type
idtd1[i] = card.damper_idt
idtd2[i] = card.damper_idc
idtdv1[i] = card.damper_idtdv
idcdv1[i] = card.damper_idcdv
gener_idt = card.__dict__.get('gener_idt', None)
if gener_idt is None:
typeg[i] = 0
idtg[i] = default_int
idcg[i] = default_int
idtdu2[i] = default_int
idcdu2[i] = default_int
idtdv2[i] = default_int
idcdv2[i] = default_int
else:
typeg[i] = 2
idtg[i] = card.gener_idt
idcg[i] = card.gener_idc
idtdu2[i] = card.gener_idtdu
idcdu2[i] = card.gener_idcdu
idtdv2[i] = card.gener_idtdv
idcdv2[i] = card.gener_idcdv
typef[i] = Defaults.unknown_int
idtf[i] = Defaults.unknown_int
idcf[i] = Defaults.unknown_int
ut[i] = Defaults.unknown_double
uc[i] = Defaults.unknown_double
return {'IDENTITY': data}
########################################################################################################################
# TODO: PBUSH2D is complex
# class PBUSH2D(CardTable):
# table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBUSH2D/IDENTITY')
########################################################################################################################
class PBUSHT(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PBUSHT')
########################################################################################################################
class PCOHE(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCOHE')
########################################################################################################################
class PCOMP(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCOMP/IDENTITY')
def from_bdf(self, cards):
_ft = {
None: Defaults.default_int,
'': Defaults.default_int,
'HILL': 1,
'HOFF': 2,
'TSAI': 3,
'STRN': 4
}
# TODO: check that sout is correct
_convert_sout = {'YES': 1, 'NO': 0}
ply = {
'IDENTITY': {'MID': [], 'T': [], 'THETA': [], 'SOUT': []}
}
data = {
'IDENTITY': {'PID': [],
'NPLIES': [],
'Z0': [],
'NSM': [],
'SB': [],
'FT': [],
'TREF': [],
'GE': [],
'PLY_POS': [],
'PLY_LEN': []},
'PLY': ply,
'_subtables': ['PLY']
}
identity = data['IDENTITY']
pid = identity['PID']
nplies = identity['NPLIES']
z0 = identity['Z0']
nsm = identity['NSM']
sb = identity['SB']
ft = identity['FT']
tref = identity['TREF']
ge = identity['GE']
ply_pos = identity['PLY_POS']
ply_len = identity['PLY_LEN']
ply = ply['IDENTITY']
mid = ply['MID']
t = ply['T']
theta = ply['THETA']
sout = ply['SOUT']
card_ids = sorted(iterkeys(cards))
_ply_pos = 0
for card_id in card_ids:
card = cards[card_id]
_plies = len(card.material_ids)
pid.append(card.pid)
nplies.append(_plies)
z0.append(round(card.z0, 15))
nsm.append(card.nsm)
sb.append(card.sb)
ft.append(_ft[card.ft])
tref.append(card.tref)
ge.append(card.ge)
ply_pos.append(_ply_pos)
ply_len.append(_plies)
_ply_pos += _plies
mid.extend(list(card.material_ids))
t.extend(list(card.thicknesses))
theta.extend(card.thetas)
sout.extend([_convert_sout.get(_, 0) for _ in card.souts])
return data
########################################################################################################################
class PCOMPF(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCOMPF/IDENTITY',
rename={'IDLIST_POS': 'LIST_POS', 'IDLIST_LEN': 'LIST_LEN'})
########################################################################################################################
class PCOMPG(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCOMPG/IDENTITY')
def from_bdf(self, cards):
_ft = {
None: Defaults.default_int,
'': Defaults.default_int,
'HILL': 1,
'HOFF': 2,
'TSAI': 3,
'STRN': 4
}
# TODO: check that sout is correct
_convert_sout = {'YES': 1, 'NO': 0}
ply = {
'IDENTITY': {'GPLYID': [], 'MID': [], 'THICK': [], 'THETA': [], 'SOUT': [], 'MIDMTX': [],
'VF': [], 'VV': [], 'CTEMP': [], 'MOIST': [], 'CRIT': [], 'NFTI': [], 'FTI': []}
}
result = {
'IDENTITY': {'PID': [],
'NPLIES': [],
'Z0': [],
'NSM': [],
'SB': [],
'FT': [],
'TREF': [],
'GE': [],
'MICRO': [],
'PLY_POS': [],
'PLY_LEN': [],
'DOMAIN_ID': []
},
'PLY': ply,
'_subtables': ['PLY']
}
identity = result['IDENTITY']
pid = identity['PID']
nplies = identity['NPLIES']
z0 = identity['Z0']
nsm = identity['NSM']
sb = identity['SB']
ft = identity['FT']
tref = identity['TREF']
ge = identity['GE']
micro = identity['MICRO']
ply_pos = identity['PLY_POS']
ply_len = identity['PLY_LEN']
ply = ply['IDENTITY']
gplyid = ply['GPLYID']
mid = ply['MID']
thick = ply['THICK']
theta = ply['THETA']
sout = ply['SOUT']
midmtx = ply['MIDMTX']
vf = ply['VF']
vv = ply['VV']
ctemp = ply['CTEMP']
moist = ply['MOIST']
crit = ply['CRIT']
nfti = ply['NFTI']
fti = ply['FTI']
card_ids = sorted(iterkeys(cards))
_pos = 0
for card_id in card_ids:
card = cards[card_id]
pid.append(card.pid)
n = len(card.thicknesses)
nplies.append(n)
z0.append(card.z0)
nsm.append(card.nsm)
sb.append(card.sb)
ft.append(_ft[card.ft])
tref.append(card.tref)
ge.append(card.ge)
micro.append(Defaults.unknown_str)
ply_pos.append(_pos)
ply_len.append(n)
_pos += n
gplyid += list(card.global_ply_ids)
mid += list(card.mids)
thick += list(card.thicknesses)
theta += list(card.thetas)
sout += [_convert_sout[_] for _ in card.souts]
midmtx += [Defaults.unknown_int] * n
vf += [Defaults.unknown_double] * n
vv += [Defaults.unknown_double] * n
ctemp += [Defaults.unknown_double] * n
moist += [Defaults.unknown_double] * n
crit += [Defaults.unknown_str] * n
nfti += [Defaults.unknown_int] * n
fti += [Defaults.unknown_str] * n
return result
########################################################################################################################
class PCOMPLS(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCOMPLS/IDENTITY')
########################################################################################################################
class PCONEAX(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCONEAX')
########################################################################################################################
class PCONV(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCONV')
########################################################################################################################
class PCONV1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCONV1')
########################################################################################################################
class PCONVM(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PCONVM')
########################################################################################################################
class PDAMP(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PDAMP')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
b = data['B']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
b[i] = card.b
return {'IDENTITY': data}
########################################################################################################################
class PDAMP5(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PDAMP5')
########################################################################################################################
class PDAMPT(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PDAMPT')
########################################################################################################################
class PELAS(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PELAS')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
k = data['K']
ge = data['GE']
s = data['S']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
k[i] = card.k
ge[i] = card.ge
s[i] = card.s
result = {
'IDENTITY': data
}
return result
########################################################################################################################
class PELAST(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PELAST')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
tkid = data['TKID']
tgeid = data['TGEID']
tknid = data['TKNID']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
tkid[i] = card.tkid
tgeid[i] = card.tgeid
tknid[i] = card.tknid
result = {
'IDENTITY': data
}
return result
########################################################################################################################
class PFAST(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PFAST')
########################################################################################################################
class PGAP(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PGAP')
########################################################################################################################
class PHBDY(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PHBDY')
########################################################################################################################
class PLCOMP(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PLCOMP/IDENTITY')
########################################################################################################################
class PLPLANE(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PLPLANE')
########################################################################################################################
class PLSOLID(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PLSOLID')
########################################################################################################################
class PMASS(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PMASS')
########################################################################################################################
class PROD(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PROD')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid = data['MID']
a = data['A']
j = data['J']
c = data['C']
nsm = data['NSM']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid[i] = card.mid
a[i] = card.A
j[i] = card.j
c[i] = card.c
nsm[i] = card.nsm
result = {'IDENTITY': data}
return result
########################################################################################################################
class PRODN1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PRODN1')
########################################################################################################################
class PSEAM(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSEAM')
########################################################################################################################
class PSHEAR(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSHEAR')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid = data['MID']
t = data['T']
nsm = data['NSM']
f1 = data['F1']
f2 = data['F2']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid[i] = card.mid
t[i] = card.t
nsm[i] = card.nsm
f1[i] = card.f1
f2[i] = card.f2
result = {'IDENTITY': data}
return result
########################################################################################################################
class PSHEARN(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSHEARN')
########################################################################################################################
class PSHELL(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSHELL')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid1 = data['MID1']
t = data['T']
mid2 = data['MID2']
bk = data['BK']
mid3 = data['MID3']
ts = data['TS']
nsm = data['NSM']
z1 = data['Z1']
z2 = data['Z2']
mid4 = data['MID4']
def _get_mid(val, default):
if val is None:
val = default
return val
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid1[i] = card.mid1
t[i] = card.t
mid2[i] = _get_mid(card.mid2, Defaults.default_int)
bk[i] = card.twelveIt3
mid3[i] = _get_mid(card.mid3, Defaults.default_int)
ts[i] = card.tst
nsm[i] = card.nsm
z1[i] = card.z1
z2[i] = card.z2
mid4[i] = _get_mid(card.mid4, Defaults.default_int)
result = {'IDENTITY': data}
return result
########################################################################################################################
class PSHLN1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSHLN1')
########################################################################################################################
class PSHLN2(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSHLN2')
########################################################################################################################
class PSLDN1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSLDN1')
########################################################################################################################
class PSOLID(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PSOLID')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid = data['MID']
cordm = data['CORDM']
in_ = data['IN']
stress = data['STRESS']
isop = data['ISOP']
fctn = data['FCTN']
_integ = {
0: 0, 1: 1, 2: 2, 3: 3, 'BUBBLE': 0, 'GAUSS': 1, 'TWO': 2, 'THREE': 3,
'': Defaults.default_int, None: Defaults.default_int
}
_stress = {
'GRID': Defaults.default_int, 'GAUSS': 1, '': Defaults.default_int, None: Defaults.default_int,
1: 1
}
_isop = {0: 0, 1: 1, 'REDUCED': 0, 'FULL': 1, '': Defaults.default_int, None: Defaults.default_int}
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid[i] = card.mid
cordm[i] = card.cordm
in_[i] = _integ[card.integ]
stress[i] = _stress[card.stress]
isop[i] = _isop[card.isop]
fctn[i] = card.fctn
result = {'IDENTITY': data}
return result
########################################################################################################################
# msc format missing OD2
class PTUBE_SPEC(object):
name = 'PTUBE'
path = '/NASTRAN/INPUT/PROPERTY'
dtype = [('PID', '<i8', ()), ('MID', '<i8', ()), ('OD', '<f8', ()), ('T', '<f8', ()), ('NSM', '<f8', ()),
('OD2', '<f8', ()), ('DOMAIN_ID', '<i8', ())]
is_subtable = False
same_as = 'None'
subtables = []
class PTUBE(InputTable):
table_def = TableDef.create(PTUBE_SPEC)
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
mid = data['MID']
od = data['OD']
t = data['T']
nsm = data['NSM']
od2 = data['OD2']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
mid[i] = card.mid
od[i] = card.OD1
t[i] = card.t
nsm[i] = card.nsm
od2[i] = card.OD2
result = {'IDENTITY': data}
return result
########################################################################################################################
class PVISC(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PVISC')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
data = np.empty(len(card_ids), dtype=self.table_def.dtype)
pid = data['PID']
ce = data['CE']
cr = data['CR']
i = -1
for card_id in card_ids:
i += 1
card = cards[card_id]
pid[i] = card.pid
ce[i] = card.ce
cr[i] = card.cr
return {'IDENTITY': data}
########################################################################################################################
class PWELD(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/PWELD')
########################################################################################################################
class SNORM(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/SNORM')
########################################################################################################################
# TODO: VCCT - where and how is dataset SETID used?
# class VCCT(CardTable):
# """
# <group name="VCCT">
# <dataset name="GRID">
# <field name="GI" type="integer"/>
# </dataset>
# <dataset name="IDENTITY">
# <field name="ID" type="integer"/>
# <field name="IDCR" type="integer"/>
# <field name="ITYPE" type="integer"/>
# <field name="IGROW" type="integer"/>
# <field name="INCM" type="integer"/>
# <field name="METHOD" type="integer"/>
# <field name="TIME" type="double"/>
# <field name="IACT" type="integer"/>
# <field name="CGI" type="double"/>
# <field name="GC" type="double"/>
# <field name="GTH" type="double"/>
# <field name="C" type="double"/>
# <field name="M" type="double"/>
# <field name="GMIN" type="double"/>
# <field name="GC2" type="double"/>
# <field name="GC3" type="double"/>
# <field name="TABCGI" type="integer"/>
# <field name="TABGC" type="integer"/>
# <field name="TABGTH" type="integer"/>
# <field name="TABC" type="integer"/>
# <field name="TABM" type="integer"/>
# <field name="TABGMIN" type="integer"/>
# <field name="TABGC2" type="integer"/>
# <field name="TABGC3" type="integer"/>
# <field name="GRID_POS" type="integer"/>
# <field name="GRID_LEN" type="integer"/>
# <field name="THBY_POS" type="integer"/>
# <field name="THBY_LEN" type="integer"/>
# <field name="DOMAIN_ID" type="integer"/>
# </dataset>
# <dataset name="SETID">
# <field name="SET3ID" type="integer"/>
# </dataset>
# <dataset name="THBY">
# <field name="G1" type="integer"/>
# <field name="G2" type="integer"/>
# <field name="GINC" type="integer"/>
# </dataset>
# </group>
# """
# table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/VCCT/IDENTITY')
########################################################################################################################
# TODO: VIEWEX doesn't conform to the subtable pos/len scheme
# class VIEWEX(CardTable):
# table_def = TableDef.create('/NASTRAN/INPUT/PROPERTY/VIEWEX/IDENTITY')
########################################################################################################################
|
<filename>IoT/MHEALTH/models/CNN_shiftadd_se.py
from adder import adder
import torch
import torch.nn as nn
import torch.nn.functional as F
from se_shift import SEConv2d, SELinear
__all__ = ['CNN_shiftadd_se']
def conv_add(in_planes, out_planes, threshold, sign_threshold, distribution, kernel_size=(3, 3), stride=1, padding=0, quantize=False, weight_bits=8, sparsity=0):
" 3x3 convolution with padding "
shift = SEConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution)
add = adder.Adder2D(out_planes, out_planes, kernel_size=(1,1), stride=1, padding=padding, bias=False, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)
return nn.Sequential(shift, add)
def last_fc(in_planes, out_planes, threshold, sign_threshold, distribution, kernel_size=(3, 3), stride=1, padding=0, quantize=False, weight_bits=8, sparsity=0):
" 3x3 convolution with padding "
shift = SEConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution)
# add = adder.Adder2D(out_planes, out_planes, kernel_size=(1,1), stride=1, padding=padding, bias=False, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)
return shift
class CNN(nn.Module):
def __init__(self, num_classes, threshold, sign_threshold, distribution, quantize=False, weight_bits=8, sparsity=0):
super(CNN, self).__init__()
self.quantize = quantize
self.weight_bits = weight_bits
self.sparsity = sparsity
self.threshold = threshold
self.sign_threshold = sign_threshold
self.distribution = distribution
self.conv1 = conv_add(1, 5, threshold=self.threshold, sign_threshold=self.sign_threshold, distribution=self.distribution,
kernel_size=(5, 5), quantize=self.quantize, weight_bits=self.weight_bits, sparsity=self.sparsity)
self.bn1 = nn.BatchNorm2d(5)
self.conv2 = conv_add(5, 10, threshold=self.threshold, sign_threshold=self.sign_threshold, distribution=self.distribution,
kernel_size=(5, 5), quantize=self.quantize, weight_bits=self.weight_bits, sparsity=self.sparsity)
self.bn2 = nn.BatchNorm2d(10)
# self.conv3 = nn.Conv2d(36, 24, kernel_size=(12, 1))
self.pool1 = nn.MaxPool2d((4,4))
self.pool2 = nn.MaxPool2d((2,2))
self.fc1 = last_fc(8120, num_classes, threshold=self.threshold, sign_threshold=self.sign_threshold, distribution=self.distribution,
kernel_size=(1,1), quantize=self.quantize, weight_bits=self.weight_bits, sparsity=self.sparsity)
self.fc2 = nn.BatchNorm2d(num_classes)
def forward(self, inputs):
x = self.pool1(F.relu(self.bn1(self.conv1(inputs))))
x = self.pool2(F.relu(self.bn2(self.conv2(x))))
# x = self.pool(F.relu(self.conv3(x)))
# x = torch.flatten(x, start_dim=1)
x = x.view(x.size(0), -1)
x = torch.unsqueeze(x, dim=2)
x = torch.unsqueeze(x, dim=3)
x = self.fc1(x)
x = self.fc2(x)
# return F.softmax(x)
return x.view(x.size(0), -1)
def CNN_shiftadd_se(threshold, sign_threshold, distribution, num_classes=10, quantize=False, weight_bits=8, sparsity=0, quantize_v='sbm', **kwargs):
return CNN(num_classes,
threshold=threshold, sign_threshold=sign_threshold, distribution=distribution,
quantize=quantize, weight_bits=weight_bits, sparsity=sparsity,) |
""" Unit tests for each geometry mesh transformation component."""
from __future__ import print_function, division
import numpy as np
import unittest
from openmdao.api import Problem, Group
from openmdao.utils.assert_utils import assert_rel_error, assert_check_partials
from openaerostruct.geometry.geometry_mesh_transformations import \
Taper, ScaleX, Sweep, ShearX, Stretch, ShearY, Dihedral, \
ShearZ, Rotate
from openaerostruct.geometry.utils import generate_mesh
# These have been chosen so that each dimension of the intermediate ndarrays is unique.
NY = 7
NX = 5
def get_mesh(symmetry):
"""
Return a mesh for testing.
"""
ny = (2*NY - 1) if symmetry else NY
# Create a dictionary to store options about the mesh
mesh_dict = {'num_y' : ny,
'num_x' : NX,
'wing_type' : 'CRM',
'symmetry' : symmetry,
'num_twist_cp' : NY}
# Generate the aerodynamic mesh based on the previous dictionary
mesh, twist_cp = generate_mesh(mesh_dict)
surface = {}
surface['symmetry'] = symmetry
surface['type'] = 'aero'
# Random perturbations to the mesh so that we don't mask errors subtractively.
mesh[:, :, 0] += 0.05*np.random.random(mesh[:, :, 2].shape)
mesh[:, :, 1] += 0.05*np.random.random(mesh[:, :, 2].shape)
mesh[:, :, 2] = np.random.random(mesh[:, :, 2].shape)
return mesh
class Test(unittest.TestCase):
def test_taper(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(1)
comp = Taper(val=val, mesh=mesh, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_taper_symmetry(self):
symmetry = True
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(1)
comp = Taper(val=val, mesh=mesh, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_scalex(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(NY)
comp = ScaleX(val=val, mesh_shape=mesh.shape)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_scalex_symmetry(self):
symmetry = True
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(NY)
comp = ScaleX(val=val, mesh_shape=mesh.shape)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_sweep(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(1)
comp = Sweep(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_sweep_symmetry(self):
symmetry = True
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(1)
comp = Sweep(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_shearx(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(NY)
comp = ShearX(val=val, mesh_shape=mesh.shape)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_stretch(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(1)
comp = Stretch(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_stretch_symmetry(self):
symmetry = True
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(1)
comp = Stretch(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_sheary(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(NY)
comp = ShearY(val=val, mesh_shape=mesh.shape)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_dihedral(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = 15.0*np.random.random(1)
comp = Dihedral(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_dihedral_symmetry(self):
symmetry = True
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(1)
comp = Dihedral(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_shearz(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(NY)
comp = ShearZ(val=val, mesh_shape=mesh.shape)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_rotate(self):
symmetry = False
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(NY)
comp = Rotate(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
def test_rotate_symmetry(self):
symmetry = True
mesh = get_mesh(symmetry)
prob = Problem()
group = prob.model
val = np.random.random(NY)
comp = Rotate(val=val, mesh_shape=mesh.shape, symmetry=symmetry)
group.add_subsystem('comp', comp)
prob.setup()
prob['comp.in_mesh'] = mesh
prob.run_model()
check = prob.check_partials(compact_print=True, abs_err_tol=1e-5, rel_err_tol=1e-5)
assert_check_partials(check, atol=1e-6, rtol=1e-6)
if __name__ == '__main__':
unittest.main()
|
import sys
sys.path.append(".")
import py
#from sympy import *
from sympy.numerics import *
from sympy.numerics.functions import *
import math
import cmath
from sympy.utilities.pytest import XFAIL
def test_sqrt():
for i in range(1000):
assert sqrt(Float(i**2)) == i
# These should round identically
for x in [0, 1e-7, 0.1, 0.5, 1, 2, 3, 4, 5, 0.333, 76.19]:
assert sqrt(Float(x)) == float(x)**0.5
assert sqrt(-1) == 1j
assert sqrt(-2).ae(cmath.sqrt(-2))
assert sqrt(-3).ae(cmath.sqrt(-3))
assert sqrt(-100).ae(cmath.sqrt(-100))
assert sqrt(1j).ae(cmath.sqrt(1j))
assert sqrt(-1j).ae(cmath.sqrt(-1j))
assert sqrt(math.pi + math.e*1j).ae(cmath.sqrt(math.pi + math.e*1j))
assert sqrt(math.pi - math.e*1j).ae(cmath.sqrt(math.pi - math.e*1j))
def test_hypot():
assert hypot(0, 0) == 0
assert hypot(0, 0.33) == Float(0.33)
assert hypot(0.33, 0) == Float(0.33)
assert hypot(-0.33, 0) == Float(0.33)
assert hypot(3, 4) == Float(5)
def test_exp():
assert exp(0) == 1
assert exp(10000).ae(Float('8.8068182256629215873e4342'))
assert exp(-10000).ae(Float('1.1354838653147360985e-4343'))
assert exp(log2_float() * Float(10)).ae(1024)
assert exp(2+2j).ae(cmath.exp(2+2j))
def test_log():
assert log(1) == 0
for x in [0.5, 1.5, 2.0, 3.0, 100, 10**50, 1e-50]:
assert log(x) == math.log(x)
assert log(x, x) == 1
assert log(1024, 2) == 10
assert log(10**1234, 10) == 1234
assert log(2+2j).ae(cmath.log(2+2j))
@XFAIL
def test_trig_basic():
for x in (range(100) + range(-100,0)):
t = x / 4.1
assert cos(Float(t)).ae(math.cos(t))
assert sin(Float(t)).ae(math.sin(t))
assert tan(Float(t)).ae(math.tan(t))
assert sin(1+1j).ae(cmath.sin(1+1j))
assert sin(-4-3.6j).ae(cmath.sin(-4-3.6j))
def test_trig_hard():
assert sin(Float(10**50, 150)).ae(-0.7896724934293100827)
assert cos(Float(10**50, 150)).ae(-0.6135286082336635622)
assert sin(1e-6).ae(9.999999999998333e-007)
assert cos(1e-6).ae(0.9999999999995)
def test_atan():
import math
assert atan(-2.3).ae(math.atan(-2.3))
assert atan2(1,1).ae(math.atan2(1,1))
assert atan2(1,-1).ae(math.atan2(1,-1))
assert atan2(-1,-1).ae(math.atan2(-1,-1))
assert atan2(-1,1).ae(math.atan2(-1,1))
assert atan2(-1,0).ae(math.atan2(-1,0))
assert atan2(1,0).ae(math.atan2(1,0))
|
<reponame>jmcb/jquizzyva
#!/usr/bin/env python
import functools
import re
SET_FINDER = re.compile("\[(\^?:?[A-Z]+)\]")
MAX_WORD_LENGTH = 16
try:
from util._pattern import try_word, CAnagramPattern
except ImportError:
try_word = None
CAnagramPattern = None
class AnagramPattern (object):
"""
A pattern consists primarily of a series of letters, placeholders,
wildcards and sets of characters, which represents a variety of possible
words by way of an expression. It also provides an interface for testing
whether or not a letter matches the current pattern: if the letter matches,
the pattern is modified and a true value is return. Thus, letter-by-letter
testing of words can take place. For convenience, there exists an interface
for testing a word, which clones the object and performs a letter-by-letter
check of the word, returning a boolean value whenever one is available. The
length of the incoming word is also compared against the "length" of the
pattern (which is defined by a number of variables; see calc_length), and
if it fails to meet these initial constraints, no checking of the word is
needed and a False value is returned.
For the most part, outside of the cloning of the object, the interface
would appear to be very quick: the more 'different' a word is to our
pattern, the sooner we are likely to encounter something that doesn't
match, and thus the sooner we'll return a False value.
"""
pattern = None
blanks = 0
wildcard = False
sets = None
neg_sets = None
letters = None
cpattern = None
length = 0
blank_store = None
def __init__ (self, subanagram=False):
"""
Create a new pattern. It is possible to define a subanagram by passing
True to the subanagram parameter, but for the most part it would be
better to use the derivative class, SubPattern, instead.
:param subanagram: Denote that this pattern's length constraints are
flexible. Thus, if we reach the end of a pattern and we still have
'letters left', and this value is True, we accept the word; if we
reach the end and this value is False, we reject the word as not
fully meeting the pattern criteria.
"""
super(AnagramPattern, self).__init__()
self.sets = []
self.neg_sets = []
self.letters = []
self.blank_store = []
self.subanagram = subanagram
def __len__ (self):
"""
Determine the "virtual" length of the pattern. If we contain wildcards,
then the length of the is always 15 -- there is always the capacity to
have more letters added when a wildcard is in play.
"""
if self.wildcard:
return MAX_WORD_LENGTH
else:
return self.length
def calc_length (self):
"""
Determine the "actual" length of the pattern, disregarding any
wildcards that may be contained. This consists of the number of sets of
letters, the number of "blank" placeholders, and the number of required
letters still available.
"""
return len(self.letters) + len(self.sets) + len(self.neg_sets) + self.blanks
@classmethod
def fromstring (cls, pattern, subanagram=False):
"""
Create a new pattern based on a string.
:param pattern: A pattern consists of any number of letters from A to
Z, any number of placeholder ? symbols, any number of wildcard *
symbols (though due to the nature of the pattern, only the first
matters; multiple wildcards are redundant), and any number of sets,
defined as a series of letters enclosed with brackets.
:param subanagram: See the subanagram parameter of the Pattern __init__
function.
"""
pattern = pattern.upper()
self = cls(subanagram=subanagram)
self.pattern = pattern
self.blanks = self.pattern.count("?")
if self.pattern.count("*"):
self.wildcard = True
self.sets = []
self.neg_sets = []
pattern = pattern.replace("?", "").replace("*", "")
for cset in SET_FINDER.findall(pattern):
pattern = pattern.replace("[%s]" % cset, "")
nset = None
neg = False
if "^" in cset:
neg = True
cset = cset[1:]
if ":" in cset:
if cset[1] == "C": # Consonants
nset = set("BCDFGHJKLMNPQRSTVWXYZ")
elif cset[1] == "V": # Vowels
nset = set("AEIOU")
elif cset[1] == "H": # Heavies
nset = set("JKZQX")
elif cset[1] == "M": # Mediums
nset = set("HFVWY")
elif cset[1] == "L": # Lights
nset = set("PCMB")
elif cset[1] == "T" or cset[1] == "P": # Twos and "Pips"
nset = set("AEIOUDGLNRST")
else:
nset = set(cset)
if neg:
self.neg_sets.append(nset)
else:
self.sets.append(nset)
self.letters = list(pattern)
self.length = self.calc_length()
return self
def as_cpattern (self):
if self.cpattern is not None:
return self.cpattern
if CAnagramPattern is None:
return None
self.cpattern = CAnagramPattern(self.blanks, self.length, len(self.letters), [ord(l) for l in self.letters], len(self.sets), [[ord(l) for l in s] for s in self.sets], len(self.neg_sets), [[ord(l) for l in s] for s in self.neg_sets], self.subanagram, self.wildcard)
return self.cpattern
def try_word (self, word):
"""
Statefully determine if a word matches the current pattern; this method
clones the current pattern object and performs a letter-by-letter
comparison.
:param word: The word to be checked against the current pattern.
"""
if try_word is not None:
return bool(try_word(self.as_cpattern(), word))
blanks = self.blanks
letters = self.letters[:]
sets = self.sets[:]
nsets = self.neg_sets[:]
length = self.length
subanagram = self.subanagram
wildcard = self.wildcard
used_blanks = []
wordlen = len(word)
if not wildcard and wordlen > length:
return False, []
if wordlen < length and not subanagram:
return False, []
for letter in word:
if letter in letters:
del letters[letters.index(letter)]
continue
got_nset = None
for nind, nset in enumerate(nsets):
if letter in nset:
return False, []
else:
got_nset = nind
continue
if got_nset is not None:
del nsets[got_nset]
used_blanks.append(letter)
continue
got_set = None
for cind, cset in enumerate(sets):
if letter in cset:
got_set = cind
else:
continue
if got_set is not None:
del sets[got_set]
used_blanks.append(letter)
continue
if blanks > 0:
blanks -= 1
used_blanks.append(letter)
continue
if wildcard:
used_blanks.append(letter)
continue
return False, []
if letters and sets and blanks and not subanagram:
return False, []
self.blank_store.append(used_blanks)
return True, used_blanks
def bounds (self):
"""
Represent the bounds of this parameter as an SQLite statement.
"""
if self.subanagram:
return "words.length<=%s" % len(self)
if self.length != len(self):
return "words.length BETWEEN %s AND %s" % (self.length, len(self))
else:
return "words.length=%s" % len(self)
def __repr__ (self):
return "<%s '%s' wildcard=%s blanks=%s sets=%s letters=%s>" % (self.__class__.__name__, self.pattern, self.wildcard, self.blanks, self.sets, self.letters)
class SubAnagramPattern (AnagramPattern):
"""
This pattern is a convenience subclass of Pattern; the usage,
initialisation, etc, are identical to Pattern, but it automatically sets
subanagram=True to all of these.
"""
subanagram = True
def __init__ (self, subanagram=True):
"""
Create a new SubPattern. The subanagram parameter for this function is
ignored, but exists in order to provide compatability of signatures.
:param subanagram: Ignored.
"""
super(SubAnagramPattern, self).__init__(subanagram=True)
class Pattern (AnagramPattern):
"""
This is a simple parser that converts a pattern string into a regular expression.
"""
_regexp = None
def __init__ (self, subanagram=False):
super(Pattern, self).__init__(subanagram=False)
def as_regexp (self):
if self._regexp is not None:
return self._regexp
pat = self.pattern.replace("?", "(.)").replace("*", "(.+)")
if not pat.endswith("$"):
pat = pat + "$"
pat = pat.replace("[", "([").replace("]", "])")
self._regexp = re.compile(pat)
return self._regexp
|
from django.urls import path, include
from django.urls.conf import re_path
from .views import *
from rest_framework.routers import DefaultRouter
#router = DefaultRouter()
#router.register('ejes', EjeViewSet, basename='ejes')
#router.register('instituciones', InstitucionViewSet, basename='instituciones')
#router.register('fuenteInformaciones', FuenteInformacionViewSet, basename='fuenteInformaciones')
#router.register('departamentos', DepartamentoViewSet, basename='departamentos')
#router.register('factorDesagregaciones', FactorDesagregacionViewSet, basename='factorDesagregaciones')
#router.register('resultados', ResultadoViewSet, basename='resultados')
app_name = 'visor'
urlpatterns = [
#Descarga Archivo Indicador
path("archivo/indicador/<int:indicador>", ArchivoIndicadorView.as_view(), name="archivo"),
#Descargas documentos
path("descarga/", IndicadorDescargaView.as_view(), name="indicador-descarga"),
path("descarga/<int:pk>", DocumentoDescargaView.as_view(), name="documento-descarga"),
path("descarga/documento/<int:documento>", DocumentoView.as_view(), name="documento"),
#API Root
path("api/", api_root, name="api_root"),
#Eje API
path("eje/lista/", EjeList.as_view(), name="eje-lista"),
path("eje/detalle/<int:pk>", EjeDetail.as_view(), name="eje-detalle"),
#Resultado API
path("resultado/lista/", ResultadoList.as_view(), name="resultado-lista"),
path("resultado/detalle/<int:pk>", ResultadoDetail.as_view(), name="resultado-detalle"),
#Institucion API
path("institucion/lista/", InstitucionList.as_view(), name="institucion-lista"),
path("institucion/detalle/<int:pk>", InstitucionDetail.as_view(), name="institucion-detalle"),
#Departamento API
path("departamento/lista/", DepartamentoList.as_view(), name="departamento-lista"),
path("departamento/detalle/<int:pk>", DepartamentoDetail.as_view(), name="departamento-detalle"),
#FactorDesagregacion API
path("factorDesagregacion/lista/", FactorDesagregacionList.as_view(), name="factorDesagregacion-lista"),
path("factorDesagregacion/detalle/<int:pk>", FactorDesagregacionDetail.as_view(), name="factorDesagregacion-detalle"),
#FuenteInformacion API
path("fuenteInformacion/lista/", FuenteInformacionList.as_view(), name="fuenteInformacion-lista"),
path("fuenteInformacion/detalle/<int:pk>", FuenteInformacionDetail.as_view(), name="fuenteInformacion-detalle"),
#Municipio API
path("municipio/lista/", MunicipioList.as_view(), name="municipio-lista"),
path("municipio/detalle/<int:pk>", MunicipioDetail.as_view(), name="municipio-detalle"),
#Area API
path("area/lista/", AreaList.as_view(), name="area-lista"),
path("area/detalle/<int:pk>", AreaDetail.as_view(), name="area-detalle"),
#ValorFactor API
path("valorFactor/lista/", ValorFactorList.as_view(), name="valorFactor-lista"),
path("valorFactor/lista/<int:categoria>", ValorFactorList.as_view(), name="valorFactor-lista-filtrada"),
path("valorFactor/detalle/<int:pk>", ValorFactorDetail.as_view(), name="valorFactor-detalle"),
#UnidadMedida API
path("unidadMedida/lista/", UnidadMedidaList.as_view(), name="unidadMedida-lista"),
path("unidadMedida/detalle/<int:pk>", UnidadMedidaDetail.as_view(), name="unidadMedida-detalle"),
#Variable API
path("variable/lista/", VariableList.as_view(), name="variable-lista"),
path("variable/detalle/<int:pk>", VariableDetail.as_view(), name="variable-detalle"),
#Indicador API
path("indicador/lista/", IndicadorList.as_view(), name="indicador-lista"),
path("indicador/detalle/<int:pk>", IndicadorDetail.as_view(), name="indicador-detalle"),
#MedicionIndicador API
path("medicion/lista/", MedicionIndicadorList.as_view(), name="medicion-lista"),
path("medicion/lista/<int:indicador>/", MedicionIndicadorList.as_view(), name="medicion-lista-indicador"),
path("medicion/detalle/<int:pk>", MedicionIndicadorDetail.as_view(), name="medicion-detalle"),
#Grafica API
path("grafica/<int:indicador>", GraficaV.as_view(), name="grafica"),
#Indicador Grafica
path("grafica/", IndicadorG.as_view(), name="indicador-grafica"),
#Indicador Select
path("indicadores/select", IndicadorSelect.as_view(), name="indicador-select"),
#Resultado Recomendacion
path("resultado/recomendacion/<int:pk>", ResultadoRecomendacion.as_view(), name="resultado-recomendacion"),
]# + router.urls
|
"""Selecting simulations from a scan based on parameters.
"""
import os
import pickle
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy
from tqdm import tqdm
from mlxtk.cwd import WorkingDir
from mlxtk.log import get_logger
from mlxtk.parameters import Parameters, get_variables
from mlxtk.util import make_path, map_parallel_progress
LOGGER = get_logger(__name__)
class ParameterSelection:
def __init__(
self,
parameters: Iterable[Parameters],
path: Union[str, Path] = None,
indices: Iterable[int] = None,
):
if indices is None:
self.parameters = [(i, p) for i, p in enumerate(parameters)]
else:
self.parameters = list(zip(indices, parameters))
self.path = None if path is None else make_path(path).resolve()
def get_variable_names(self) -> List[str]:
return get_variables([p[1] for p in self.parameters])[0]
def copy(self):
return ParameterSelection(
deepcopy(
[p[1].copy() for p in self.parameters],
self.path,
[p[0] for p in self.parameters],
)
)
def partition_single(self, parameter_name: str):
parameter_values = self.get_values(parameter_name)
partitions = {
value: [[], []] for value in parameter_values
} # type: Dict[Any, List[List[int], List[Parameters]]]
for index, parameter in self.parameters:
partitions[parameter[parameter_name]][0].append(index)
partitions[parameter[parameter_name]][1].append(
parameter.copy().remove_parameter(parameter_name)
)
return {
value: ParameterSelection(
partitions[value][1], self.path, partitions[value][0]
)
for value in parameter_values
}
def partition(self, parameter_names: Union[str, List[str]]):
if isinstance(parameter_names, str):
return self.partition_single(parameter_names)
raise NotImplementedError(
"Only partitioning according to a single parameter is implemented yet."
)
def fix_parameter(self, name: str, value: Any) -> "ParameterSelection":
"""Select by the value of a single parameter.
Args:
name: Name of the parameter to fix.
value: Desired value for the parameter.
Returns:
A new ParameterSelection containing only matching parameter sets.
"""
entries = []
indices = []
for index, entry in self.parameters:
if entry[name] == value:
entries.append(entry)
indices.append(index)
return ParameterSelection(entries, self.path, indices)
def group_by(self, name: str):
return {
value: self.fix_parameter(name, value) for value in self.get_values(name)
}
def select_parameter(self, name: str, values: Iterable[Any]):
"""Select by multiple values of a single parameter.
Args:
name: Name of the parameter to fix.
value: Desired values for the parameter.
Returns:
A new ParameterSelection containing only matching parameter sets.
"""
return ParameterSelection(
[entry[1] for entry in self.parameters if entry[1][name] in values],
self.path,
[entry[0] for entry in self.parameters if entry[1][name] in values],
)
def select_parameters(self, names: Iterable[str], values: Iterable[Iterable[Any]]):
"""Select by multiple values of a single parameter.
Args:
name: Name of the parameter to fix.
value: Desired values for the parameter.
Returns:
A new ParameterSelection containing only matching parameter sets.
"""
selection = self
for name, vals in zip(names, values):
selection = selection.select_parameter(name, vals)
return selection
def get_values(self, name: str) -> Set[Any]:
"""Get all unique values for a parameter.
Args:
name: Name of the parameter.
Returns:
All unique values of the given parameter.
"""
return list({entry[1][name] for entry in self.parameters})
def get_path(self, parameters: Parameters) -> Path:
for entry, path in zip(self.parameters, self.get_paths()):
if parameters.has_same_common_parameters(entry[1]):
return path
raise RuntimeError("cannot find path for parameters: " + str(parameters))
def get_paths(self) -> List[Path]:
"""Compute the paths for all included parameter sets.
Raises:
ValueError: No path is provided for this ParameterSelection
Returns:
Paths of all included parameter sets.
"""
if not self.path:
raise ValueError("No path is specified for ParameterSelection")
return [self.path / "by_index" / str(i) for i, _ in self.parameters]
def generate_paths(self, subpath: Union[str, Path]) -> List[Path]:
return [Path(path) / subpath for path in self.get_paths()]
def get_parameters(self) -> List[Parameters]:
"""Get all included parameters sets.
Returns:
A list of all included parameter sets.
"""
return [parameter for _, parameter in self.parameters]
def get_variable_values(self) -> Tuple[List[str], Dict[str, numpy.array]]:
variables = self.get_variable_names()
values = {var: [] for var in variables}
for _, parameters in self.parameters:
for var in variables:
values[var].append(parameters[var])
for var in variables:
values[var] = numpy.array(values[var])
return variables, values
def foreach(
self, func: Callable[[int, str, Parameters], Any], parallel=True
) -> List[Any]:
"""Call a function for each included parameter set.
Args:
func: Function to call for each parameter set. It takes the index
of the parameter set as the first argument, the path as a
second argument and the parameter set as the third argument.
Returns:
The provided function may return values. This function returns a
list of all return values created by calling the function for each
parameter set.
"""
def helper(item):
return func(item[0], item[1], item[2])
work = [
[entry[0], path, entry[1]]
for entry, path in zip(self.parameters, self.get_paths())
]
if parallel:
return map_parallel_progress(helper, work)
return [
func(entry[0], path, entry[1])
for entry, path in tqdm(list(zip(self.parameters, self.get_paths())))
]
def plot_foreach(
self, name: str, func: Callable[[int, str, Parameters], None]
) -> Optional[List[Any]]:
if not self.path:
raise RuntimeError("No path set for parameter selection")
plot_dir = self.path / "plots" / name
if not plot_dir.exists():
os.makedirs(plot_dir)
with WorkingDir(plot_dir):
return self.foreach(func)
def __str__(self):
return "\n".join(f"{i}: {p}" for i, p in self.parameters)
def load_scan(path: Union[str, Path]) -> ParameterSelection:
"""Load all parameter sets of a parameter scan.
Args:
path: Path to the parameter scan containing the file ``scan.pickle``.
"""
path = make_path(path)
with open(path / "scan.pickle", "rb") as fptr:
obj = pickle.load(fptr)
return ParameterSelection((parameter for parameter in obj), path)
def group_scans_by(
selections: List[ParameterSelection], parameter_name: str
) -> Dict[Any, List[ParameterSelection]]:
values = set()
for selection in selections:
scan_values = selection.get_values(parameter_name)
if len(scan_values) != 1:
raise RuntimeError(
'exactly one parameter for "{}" required per scan'.format(
parameter_name
)
)
values.add(scan_values.pop())
result = {}
for selection in selections:
val = selection.parameters[0][1][parameter_name]
if parameter_name not in result:
result[parameter_name] = []
result[parameter_name].append(val)
return result
|
<filename>page_rep.py<gh_stars>0
import tkinter as tk
import numpy as np
def main():
mainwindow = tk.Tk()
mainwindow.title("OS Simulator")
mainwindow.minsize(400, 300)
matdisplay_btn = tk.Button(master=mainwindow, text="Page Replacement Algorithm", command=getinputPagerep,background="cyan")
matdisplay_btn.pack()
mainwindow.mainloop()
def getinputPagerep():
input_win = tk.Tk()
input_win.title("Page Replacement Algorithm Inputs")
frame_0_0 = tk.Frame(master=input_win, borderwidth=1)
frame_0_0.grid(row=0, column=0)
m_frame_lbl = tk.Label(master=frame_0_0, text="Number of frames",background="yellow")
m_frame_lbl.pack()
frame_1_0 = tk.Frame(master=input_win, borderwidth=1)
frame_1_0.grid(row=1, column=0)
n_seq_lbl = tk.Label(master=frame_1_0, text="Size of the sequence",background="yellow")
n_seq_lbl.pack()
frame_0_1 = tk.Frame(master=input_win, borderwidth=1)
frame_0_1.grid(row=0, column=1)
m_frame_entry = tk.Entry(master=frame_0_1)
m_frame_entry.pack()
frame_1_1 = tk.Frame(master=input_win, borderwidth=1)
frame_1_1.grid(row=1, column=1)
n_seq_entry = tk.Entry(master=frame_1_1)
n_seq_entry.pack()
def submit():
m = int(m_frame_entry.get())
n = int(n_seq_entry.get())
input_win.destroy()
get_input_Pagerep(m,n)
frame_2_1 = tk.Frame(master=input_win, borderwidth=1)
frame_2_1.grid(row=2, column=1)
submit_btn = tk.Button(master=frame_2_1, text="Submit", command=submit,background="green",foreground="white",highlightcolor="blue")
submit_btn.pack()
input_win.mainloop()
def __fifolruoptimal(seq,n,m):
lst=[]
pagef=[]
print("1.FIFO")
f = -1
page_faults = 0
page = []
for i in range(m):
page.append(-1)
for i in range(n):
flag = 0
for j in range(m):
if(page[j] == seq[i]):
flag = 1
break
if flag == 0:
f=(f+1)%m
page[f] = seq[i]
page_faults+=1
print("\n%d ->" % (seq[i])),
for j in range(m):
if page[j] != -1:
print (page[j]),
else:
print ("-"),
else:
print ("\n%d -> No Page Fault" % (seq[i])),
print ("\n Total page faults : %d.\n\n" % (page_faults))
lst.append(page)
pagef.append(page_faults)
print("2.LRU")
x = 0
page_faults = 0
page = []
for i in range(m):
page.append(-1)
for i in range(n):
flag = 0
for j in range(m):
if(page[j] == seq[i]):
flag = 1
break
if flag == 0:
if page[x] != -1:
min = 999
for k in range(m):
flag = 0
j = i
while j>=0:
j-=1
if(page[k] == seq[j]):
flag = 1
break
if (flag == 1 and min > j):
min = j
x = k
page[x] = seq[i]
x=(x+1)%m
page_faults+=1
print ("\n%d ->" % (seq[i])),
for j in range(m):
if page[j] != -1:
print (page[j]),
else:
print ("-"),
else:
print ("\n%d -> No Page Fault" % (seq[i])),
print ("\n Total page faults : %d.\n\n" % (page_faults))
lst.append(page)
pagef.append(page_faults)
print("3.OPTIMAL")
x = 0
page_faults = 0
page = []
for i in range(m):
page.append(-1)
for i in range(n):
flag = 0
for j in range(m):
if(page[j] == seq[i]):
flag = 1
break
if flag == 0:
if page[x] != -1:
max = -1
for k in range(m):
flag = 0
j = i
while j<n-1:
j+=1
if(page[k] == seq[j]):
flag = 1
break
if (flag == 1 and max < j):
max = j
x = k
page[x] = seq[i]
x=(x+1)%m
page_faults+=1
print ("\n%d ->" % (seq[i])),
for j in range(m):
if page[j] != -1:
print (page[j]),
else:
print ("-"),
else:
print ("\n%d -> No Page Fault" % (seq[i])),
print ("\n Total page faults : %d." % (page_faults))
lst.append(page)
pagef.append(page_faults)
text="\nFinal frame contents:\n1.FIFO "+str(lst[0])+"\n2.LRU "+str(lst[1])+"\n3.OPTIMAL "+str(lst[2])+"\nTotal page faults:\n\
1.FIFO :"+str(pagef[0])+"\n2.LRU :"+str(pagef[1])+"\n3.OPTIMAL :"+str(pagef[2])+"\n"
__fifolruoptimal_win = tk.Tk()
__fifolruoptimal_win.title("Page Replacement Algorithm Output")
__fifolruoptimal_win.minsize(400, 300)
frame = tk.Frame(master=__fifolruoptimal_win)
seq_lbl = tk.Label(master=frame, text=text)
seq_lbl.pack()
frame.pack()
__fifolruoptimal_win.mainloop()
def get_input_Pagerep(m,n):
window = tk.Tk()
window.title("Page Replacment Algorithm Input")
a=[]
frame = tk.Frame(window, borderwidth=1)
frame.grid(row=0, column=0)
lbl = tk.Label(master=frame, text="Sequence:\n(Input only positive values)",background="yellow")
lbl.pack()
for i in range(n):
frame = tk.Frame(window, borderwidth=1)
frame.grid(row=i+1, column=2)
entry = tk.Entry(master=frame)
a.append(entry)
entry.pack()
def takesum():
seq = []
for obj in a:
temp = obj.get()
try:
temp = int(temp)
except ValueError:
temp = 0
seq.append(temp)
seq = np.array(seq).reshape(n)
window.destroy()
__fifolruoptimal(seq,n,m)
submitspace = tk.Frame(master=window, borderwidth=1)
submitspace.grid(row=m+3, column=2*n)
quitspace = tk.Frame(master=window, borderwidth=1 )
quitspace.grid(row=m+3, column=3)
quit_btn = tk.Button(master=quitspace, text="Quit", command = window.destroy,background="red",foreground="white")
quit_btn.pack()
submit_btn = tk.Button(master=submitspace, text="Submit", command=takesum,background="green",foreground="white")
submit_btn.pack()
window.mainloop()
if __name__ == "__main__":
main()
|
'''
Created on Mar 29, 2017
@author: gcampagn
'''
import os
import sys
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('GTK3Cairo')
import matplotlib.pyplot as plt
from util.seq2seq import Seq2SeqEvaluator
from util.loader import unknown_tokens, load_data
from util.general_utils import get_minibatches
from model import initialize
def show_pca(X, sentences):
plt.figure()
plt.plot(X[:,0], X[:,1], 'x')
for x, sentence in zip(X, sentences):
plt.text(x[0]+0.01, x[1]-0.01, sentence, horizontalalignment='left', verticalalignment='top')
plt.show()
def reconstruct_sentences(inputs, end_of_string, reverse):
sentences = [None]*len(inputs)
for i, input in enumerate(inputs):
input = list(input)
try:
input = input[:input.index(end_of_string)]
except ValueError:
pass
sentences[i] = ' '.join(map(lambda x: reverse[x], input))
if len(sentences[i]) > 50:
sentences[i] = sentences[i][:50] + '...'
return sentences
def run():
if len(sys.argv) < 6:
print "** Usage: python " + sys.argv[0] + " <<Benchmark: tt/geo>> <<Model: bagofwords/seq2seq>> <<Input Vocab>> <<Word Embeddings>> <<Model Directory>> <<Train Set>> <<PCA Set>>"
sys.exit(1)
np.random.seed(42)
benchmark = sys.argv[1]
config, words, reverse, model = initialize(benchmark=benchmark, model_type=sys.argv[2], input_words=sys.argv[3], embedding_file=sys.argv[4]);
model_dir = sys.argv[5]
train_data = load_data(sys.argv[6], words, config.grammar.dictionary,
reverse, config.grammar.tokens,
config.max_length)
pca_data = load_data(sys.argv[7], words, config.grammar.dictionary,
reverse, config.grammar.tokens,
config.max_length)
config.apply_cmdline(sys.argv[8:])
print "unknown", unknown_tokens
# Tell TensorFlow that the model will be built into the default Graph.
# (not required but good practice)
with tf.Graph().as_default():
# Build the model and add the variable initializer Op
model.capture_final_encoder_state = True
model.build()
loader = tf.train.Saver()
# Create a session for running Ops in the Graph
with tf.Session() as sess:
loader.restore(sess, os.path.join(model_dir, 'best'))
inputs, input_lengths, _, _ = train_data
final_encoder_state = None
final_encoder_size = None
if config.rnn_cell_type == 'lstm':
final_encoder_state = tf.concat([model.final_encoder_state[-1].c, model.final_encoder_state[-1].h], 1)
final_encoder_size = 2 * config.hidden_size
else:
final_encoder_state = model.final_encoder_state[-1]
final_encoder_size = config.hidden_size
final_states_arrays = []
# capture all the final encoder states
for input_batch, input_length_batch in get_minibatches([inputs, input_lengths],
config.batch_size):
feed_dict = model.create_feed_dict(input_batch, input_length_batch)
state_array = sess.run(final_encoder_state, feed_dict=feed_dict)
#print state_array.shape
final_states_arrays.append(state_array)
X = np.concatenate(final_states_arrays, axis=0)
assert X.shape == (len(inputs), final_encoder_size)
X = tf.constant(X)
mean = tf.reduce_mean(X, axis=0)
centered_X = X - mean
S, U, V = tf.svd(centered_X)
# take only the top 2 components
V = V[:2]
V_array, mean_array = sess.run([V, mean])
inputs, input_lengths, _, _ = pca_data
X = final_encoder_state
centered_X = X - tf.constant(mean_array)
transformed_X = tf.matmul(centered_X, tf.constant(V_array.T))
feed_dict = model.create_feed_dict(inputs, input_lengths)
X_pca = sess.run(transformed_X, feed_dict=feed_dict)
sentences = reconstruct_sentences(inputs, words['<<EOS>>'], reverse)
show_pca(X_pca, sentences)
if __name__ == '__main__':
run() |
<reponame>ywen666/code-transformer<filename>scripts/deduplicate-java-pretrain.py
"""
To generate the huge java-pretrain dataset, we first manually merge java-small, java-medium and java-large into the
same folder. It can then happen that in the new larger training set there are Java methods from the same projects or
even the same methods as in the test partition for java-small for example. This would skew the final results and
violate the underlying design principle behind the code2seq datasets to split the partitions by project.
Hence, in this script we search for similar pairs of java classes between the train partition of java-pretrain and
the valid/test partition of java-small/java-medium.
Similarity is defined as follows:
1) Only pairs of Java class with the same file name are considered, otherwise comparing every possible pair kills us.
Usually, if two Java classes are the same or have only been altered slightly, their file names will be the same.
2) Similarity of a candidate pair is computed using difflib's SequenceMatcher
3) If similarity exceeds a certain threshold (0.7 in our experiments) the corresponding file is marked to be deleted
from the train partition of java-pretrain
This script does not delete files, it only computes the list of files to be deleted and stores it in
`{CODE2SEQ_RAW_DATA_PATH}/java-pretrain/files_to_delete_{dataset}_{partition}.p`
This pickled file can then be loaded in the deduplication notebook where further project-level deletion is done to
minimize the risk of having Java methods from the respective code2seq valid/test partitions in the new java-pretrain
train partition.
"""
import argparse
import re
from collections import defaultdict
from difflib import SequenceMatcher
from pathlib import Path
from joblib import parallel_backend, Parallel, delayed
from tqdm import tqdm
from code_transformer.preprocessing.datamanager.base import DataManager
from code_transformer.utils.io import save_pickled
from code_transformer.env import CODE2SEQ_RAW_DATA_PATH
FILE_SIMILARTY = 0.7 # Similarity threshold that defines when to delete a file in java-pretrain
NUM_PROCESSES = 12
BATCH_SIZE = 5
parser = argparse.ArgumentParser()
parser.add_argument("dataset", choices=["java-small", "java-medium"])
parser.add_argument("partition", choices=["validation", "test"])
args = parser.parse_args()
data_path_code2seq = CODE2SEQ_RAW_DATA_PATH
dataset = args.dataset
partition = args.partition
projects_folder = Path(f"{data_path_code2seq}/{dataset}/{partition}")
reference_dataset = 'java-pretrain'
reference_partition = 'training'
reference_projects_folder = Path(f"{data_path_code2seq}/{reference_dataset}/{reference_partition}")
reference_projects = {p for p in reference_projects_folder.iterdir()}
def get_files_recursive(folder):
if not folder.is_dir():
return [folder]
results = []
for file in folder.iterdir():
if file.is_dir():
results.extend(get_files_recursive(file))
else:
results.append(file)
return results
def remove_comments(string):
pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*$)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
def _replacer(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(2) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(_replacer, string)
def read_file(f):
try:
return f.read_text()
except UnicodeDecodeError:
return f.read_text(encoding='cp1252')
def similar(a, b):
return SequenceMatcher(None, a, remove_comments(read_file(b))).ratio()
def find_files_to_delete(batch):
files_to_delete = []
for search_file, candidate_files in batch:
try:
search_file_content = remove_comments(search_file.read_text())
except UnicodeDecodeError:
continue
for candidate_file in candidate_files:
try:
if similar(search_file_content, candidate_file) > FILE_SIMILARTY:
files_to_delete.append(candidate_file)
except UnicodeDecodeError:
pass
return files_to_delete
if __name__ == '__main__':
print("Indexing files...")
file_lookup = defaultdict(list)
for i, p in enumerate(reference_projects_folder.iterdir()):
for f in get_files_recursive(p):
file_lookup[f.stem].append(f)
results = dict()
files_to_delete = []
with parallel_backend("loky") as parallel_config:
execute_parallel = Parallel(NUM_PROCESSES, verbose=0)
print(len(list(projects_folder.iterdir())))
for i, project in enumerate(tqdm(list(projects_folder.iterdir()))):
search_files = get_files_recursive(project)
num_files = len(search_files)
print(project.stem, num_files)
batch_generator = DataManager.to_batches(((search_file, file_lookup[search_file.stem]) for search_file in
search_files),
BATCH_SIZE)
result = execute_parallel(
delayed(find_files_to_delete)(batch) for batch in batch_generator)
for res in result:
files_to_delete.extend(res)
save_pickled(set(files_to_delete), f"{data_path_code2seq}/java-pretrain/files_to_delete_{dataset}_{partition}")
|
import os
import sys
import random
from psychopy import clock, core, event, logging, visual
from datetime import datetime
script_dir = os.path.dirname(os.path.abspath(__file__))
class Hampton2006(object):
'''
>>> env = Hampton2006(deterministic_reward=True, deterministic_reversal=True)
>>> env.reset()
>>> env.state = 0
>>> env.seed(42)
>>> env.step(0)
(1, 0.25, False, {})
>>> env.step(1)
(0, -0.25, False, {})
>>> [env.step(0) for _ in range(3)]
[(1, 0.25, False, {}), (1, 0.25, False, {}), (1, 0.25, False, {})]
>>> env.state
0
>>> env.step(0)
(1, 0.25, False, {})
>>> env.state
1
'''
def __init__(self, logging=None, deterministic_reward=False, deterministic_reversal=False):
self.action_space = range(2)
# Observations: 0: Failure, 1: Success
self.observation_space = range(2)
# States: 0: Rewarded action is 0, 1: Rewarded action is 1
self.state_space = range(2)
self.rnd = random.Random()
self.logging = logging
self.deterministic_reward = deterministic_reward
self.deterministic_reversal = deterministic_reversal
self.num_reversals = 0
def log(self, msg):
if self.logging is not None:
self.logging.log(level=self.logging.EXP, msg='Hampton2006: ' + msg)
def seed(self, seed):
self.rnd.seed(seed)
def reset(self):
self.state = self.rnd.choice(self.state_space)
self.correct_count = 0
self.num_reversals = 0
return None
def step(self, action):
assert action in self.action_space
# One stimulus was designated the correct stimulus in that choice of that stimulus
# lead to a monetary reward (winning 25 cents) on 70% of occasions and a monetary
# loss (losing 25 cents) 30% of the time.
if action == self.state:
self.correct_count += 1
prob_reward = 0.7
# The other stimulus was incorrect in that choice of that stimulus lead to a
# reward 40% of the time and a punishment 60% of the time, thus leading to a
# cumulative monetary loss.
else:
self.correct_count = 0
prob_reward = 0.4
if self.deterministic_reward:
rewarded = action == self.state
else:
rewarded = self.rnd.random() < prob_reward
if rewarded:
observation = 1
reward = +0.25
else:
observation = 0
reward = -0.25
self.log('Action={} State={} Rewarded={} CorrectCount={}'.format(
action, self.state, rewarded, self.correct_count))
# After having chosen the correct stimulus on four consecutive occasions,
# the contingencies reversed with a probability of 0.25 on each successive trial.
if self.correct_count >= 4 and (
self.deterministic_reversal or
self.rnd.random() < 0.25
):
self.state = 1 if self.state == 0 else 0
self.correct_count = 0
self.num_reversals += 1
self.log('Reversal')
return observation, reward, False, {}
if __name__ == '__main__xxx':
args = sys.argv[1:]
if args[0] == 'test':
import doctest
doctest.testmod()
print('Tests completed.')
sys.exit(0)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Define useful functions.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def getKeysOrQuit():
keyPress = event.getKeys(keyList=keyList)
if 'escape' in keyPress:
QuitTask()
return keyPress
def QuitTask():
"""Close PsychoPy and associated windows."""
W.mouseVisible = True
W.close()
core.quit()
def CheckForEscape():
"""Check for 'escape' key."""
keyPress = event.getKeys(keyList=['escape'])
if keyPress:
QuitTask()
event.clearEvents()
def FixationBlock(sec):
"""Present fixation cross."""
# Draw/log fixation cross.
fix.draw()
W.logOnFlip(level=logging.EXP, msg='Fixation cross')
W.flip()
# Wait.
timer = clock.CountdownTimer(sec)
while timer.getTime() > 0:
# Check keys.
CheckForEscape()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Define experiment.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Define valid key presses.
keyList = ['escape', '1', '2']
# Define practice variable.
is_practice = '--practice' in sys.argv
# Define RNG used for stim ordering
stim_order_rnd = random.Random()
seed = 54
stim_order_rnd.seed(seed)
# Initialize task
env = Hampton2006(
logging=logging,
deterministic_reward='--deterministic_reward' in sys.argv,
deterministic_reversal='--deterministic_reversal' in sys.argv,
)
env.reset()
# Initialize task seed
env_seed = 43
env.seed(env_seed)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Preprations.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Request subject ID.
msg = 'Initializing Hampton2006 task.\n\nPlease enter subject ID.\n'
subject_id = input(msg)
# Open window.
W = visual.Window(
size=(1920, 1080),
fullscr=True,
units='norm', color=[-1, -1, -1], autoLog=False)
W.mouseVisible = False
white = [1, 1, 1]
black = [-1, -1, -1]
blue = '#023EFF' # [-1, -1, 1]
yellow = '#FFC400' # [1, 1, -1]
green = '#1AC938'
red = '#E8000B'
leftpos = (-0.4, 0)
rightpos = (+0.4, 0)
rect_width, rect_height = 0.25, 0.25 * W.size[0] / W.size[1]
# Initialize fixation cross (used in FixationBlock).
fix = visual.GratingStim(
W, mask='cross', pos=(0, 0), sf=0,
size=(0.1, 0.1 * W.size[0] / W.size[1]),
color=white)
# Initialize stimuli
stim0 = visual.Rect(
win=W,
units='norm',
width=rect_width,
height=rect_height,
pos=leftpos,
fillColor=blue,
lineColor=black,
lineWidth=30,
)
stim1 = visual.Rect(
win=W,
units='norm',
width=rect_width,
height=rect_height,
pos=rightpos,
fillColor=yellow,
lineColor=black,
lineWidth=30,
)
# Initialize text.
feedback_positive = visual.TextStim(
W, "\u2713", color=green, height=1,
pos=(0, 0))
feedback_negative = visual.TextStim(
W, "\u00D7", color=red, height=1,
pos=(0, 0))
cumulative_rew = 0.0
reward_text = visual.TextStim(
W, text='', autoLog=False,
pos=(0, 0.8))
def update_reward_text():
reward_text.setText('${:.02f}'.format(cumulative_rew))
update_reward_text()
# Initialize logging.
globalClock = core.Clock()
logging.setDefaultClock(globalClock)
logging.LogFile('Hampton2006-{}-{}.log'.format(datetime.now().isoformat(), subject_id), level=logging.EXP, filemode='w')
# Logging various details about experiment
logging.log(level=logging.EXP, msg='CLI args {}, ordering seed {}, task seed {}'.format(
sys.argv, seed, env_seed,
))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Wait for scanner.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Before advancing to task, wait for scanner to
# send TTL pulse. To abort task, hit 'escape' key.
waiting = visual.TextStim(W, text='Waiting for scanner...', autoLog=False)
waiting.draw()
W.flip()
keyPress, = event.waitKeys(keyList=['equal', 'escape'])
if keyPress == 'escape':
QuitTask()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Task.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Run the task. To abort task, hit 'escape' key.
FixationBlock(2)
def render(msg, leftmost, rewarded=None, action_selected=None):
if leftmost == 0:
stim0.pos = leftpos
stim1.pos = rightpos
elif leftmost == 1:
stim1.pos = leftpos
stim0.pos = rightpos
if action_selected == 0:
stim0.lineColor = white
stim1.lineColor = black
elif action_selected == 1:
stim1.lineColor = white
stim0.lineColor = black
else:
stim0.lineColor = black
stim1.lineColor = black
if rewarded is None:
fix.draw()
elif rewarded:
reward_text.draw()
feedback_positive.draw()
else:
reward_text.draw()
feedback_negative.draw()
if leftmost is not None:
stim0.draw()
stim1.draw()
W.logOnFlip(level=logging.EXP, msg=msg)
W.flip()
key_and_leftmost_to_action = {
('1', 0): 0,
('2', 0): 1,
('1', 1): 1,
('2', 1): 0,
}
actions = [0, 1]
num_trials = 100
# Run task.
for _ in range(num_trials):
'''
Each trial takes 5 seconds on average.
- Display stimuli for [1, 2, or 3] seconds.
- Highlight selected stimuli.
- Display feedback for [2, 3, or 4] seconds.
For 100 trials, we expect 500 seconds.
With a TR of 1, this corresponds to 500 volumes.
'''
leftmost = stim_order_rnd.choice(actions)
# Average of 2.0s
stimulus_duration = stim_order_rnd.choice([1, 2, 3])
# Average of 3.0s
feedback_duration = stim_order_rnd.choice([2, 3, 4])
# Render stimus
render('stim presentation for duration {:.03f}. feedback duration {:.03f}'.format(
stimulus_duration, feedback_duration), leftmost)
keyPress = None
action = None
timer = clock.CountdownTimer(stimulus_duration)
while timer.getTime() > 0:
keyPress = getKeysOrQuit()
if '1' in keyPress or '2' in keyPress:
action = key_and_leftmost_to_action[keyPress[0], leftmost]
render('stim select', leftmost, action_selected=action)
break
while timer.getTime() > 0:
keyPress = getKeysOrQuit()
# Render feedback
if action is None:
render('no response', leftmost=None)
else:
observation, reward, _, _ = env.step(action)
cumulative_rew += reward
update_reward_text()
render('feedback', leftmost, action_selected=action, rewarded=observation == 1)
timer = clock.CountdownTimer(feedback_duration)
while timer.getTime() > 0:
keyPress = getKeysOrQuit()
'''
# Render fixation for a blank screen
render('only fixation', leftmost=None)
timer = clock.CountdownTimer(1.0)
while timer.getTime() > 0:
keyPress = getKeysOrQuit()
'''
# End condition for practice rounds.
if is_practice and env.num_reversals >= 3:
logging.log(level=logging.EXP, msg='Ending practice due to {} reversals'.format(env.num_reversals))
break
# Quit.
logging.log(level=logging.EXP, msg='Done. Final reward ${:0.2f}'.format(cumulative_rew))
QuitTask()
|
#LCD Configuration
import Adafruit_CharLCD as LCD
lcd_rs = 27
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
lcd_columns = 16
lcd_rows = 2
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight)
#Importing Libraries
from ftplib import FTP
from datetime import datetime
import sys, time, json
#Config
DownloadsDir = '/enter/download/directory/here'
ConfigFile = '/enter/cache/directory/here/last.json'
ftpserver = 'example.com'
username = 'supercow'
password = '<PASSWORD>'
debug = True # Type false if you don't want anything to be printed
#Small functions to clean the code
def debug(txt): #Just a print statement, bool == true
if debug == True:
print(txt)
def disp(txt, delay=None): #Simple display manager
lcd.clear() #Clears LCD
lcd.message(txt) #Prints LCD message
if (delay != None):
time.sleep(delay) #If there is delay declared, delay
def status(message): #Generally to be used when program is not active
lcd.clear()
lcd.message(datetime.now().strftime('%b %d %H:%M') + "\n" + message)
ftp = FTP(ftpserver) #Declaring ftp server ip
try:
ftp.login(username,password)
debug("Login Successful")
disp("Connected:\n"+ftpserver,1)
except ftplib.all_errors:
debug("Login Failed, exiting...")
disp("Failed:\n"+ftpserver,2)
status("Err:Login Failed")
sys.exit()
f = open(ConfigFile,'r') # Opening Config file
localfiles = json.load(f) # Importing config file to list
debug("Imported.")
f.close() # Closing file
files = [] # Creating an empty list for filelist from server
files = ftp.nlst() #Receiving the file list from server
files = filter(lambda k: '.zip' in k, files) #Excluding anything other than zip
difference = [] #List for difference between local files and remote files
difference = list(set(files) - set(localfiles))
if (len(difference) == 0): #See if there is no file to sync
disp("All files are\nup to date!",3)
status(" All Synced")
debug("All files up to date, exiting...")
sys.exit()
#Starting to download missing files
downloaded = []
filesLeft = len(difference)
connFailCount = 0
for i in range(len(difference)):
try:
debug(str(i) + "/" + str(filesLeft) + " Left..\n" + difference[i])
disp("Download Started\n"+difference[i],0.1)
ftp.retrbinary('RETR ' + difference[i], open(DownloadsDir + difference[i],'wb').write)
disp("File Downloaded\n"+difference[i],1)
debug("File downloaded: " + difference[i])
downloaded.append(difference[i])
except:
disp("Download failed\n"+difference[i],1)
debug("Download failed : " + difference[i])
if connFailCount >= 3:
break
else:
connFailCount += 1
failed = []
failed = list(set(difference) - set(downloaded))
f = open(ConfigFile, 'w')
json.dump(downloaded,f)
f.close()
if (len(failed) == 0):
disp("All files are\nup to date!",3)
debug("All files are synchronized")
status(" All Synced")
elif (connFailCount >= 3):
status("Connection Error")
else:
disp(str(len(failed)) + " failed to\ndownload.",3)
debug(str(len(failed)) + " files are failed to download")
status(str(len(failed))+" Down. Failed")
|
import itertools
import gflags
import logging
import os
from ct.client import log_client
from ct.client import state
from ct.client import temp_db
from ct.crypto import error
from ct.crypto import merkle
from ct.proto import client_pb2
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("entry_write_batch_size", 1000, "Maximum number of "
"entries to batch into one database write")
class Monitor(object):
def __init__(self, client, verifier, hasher, db, temp_db, state_keeper):
self.__client = client
self.__verifier = verifier
self.__hasher = hasher
self.__db = db
self.__state_keeper = state_keeper
# TODO(ekasper): once consistency checks are in place, also load/store
# Merkle tree info.
# Depends on: Merkle trees implemented in Python.
self.__state = client_pb2.MonitorState()
try:
self.__state = self.__state_keeper.read(client_pb2.MonitorState)
except state.FileNotFoundError:
# TODO(ekasper): initialize state file with a setup script, so we
# can raise with certainty when it's not found.
logging.warning("Monitor state file not found, assuming first "
"run.")
else:
if not self.__state.HasField("verified_sth"):
logging.warning("No verified monitor state, assuming first run.")
# load compact merkle tree state from the monitor state
self.__verified_tree = merkle.CompactMerkleTree(hasher)
self.__verified_tree.load(self.__state.verified_tree)
def __repr__(self):
return "%r(%r, %r, %r, %r)" % (self.__class__.__name__, self.__client,
self.__verifier, self.__db,
self.__state_file)
def __str__(self):
return "%s(%s, %s, %s, %s)" % (self.__class__.__name__, self.__client,
self.__verifier, self.__db,
self.__state_file)
def __update_state(self, new_state):
"""Update state and write to disk."""
# save compact merkle tree state into the monitor state
self.__verified_tree.save(new_state.verified_tree)
self.__state_keeper.write(new_state)
self.__state = new_state
logging.info("New state is %s" % new_state)
@property
def servername(self):
return self.__client.servername
@property
def data_timestamp(self):
"""Timestamp of the latest verified data, in milliseconds since epoch.
"""
return self.__state.verified_sth.timestamp
def _set_pending_sth(self, new_sth):
"""Set pending_sth from new_sth, or just verified_sth if not bigger."""
if new_sth.tree_size < self.__state.verified_sth.tree_size:
raise ValueError("pending size must be >= verified size")
if new_sth.timestamp <= self.__state.verified_sth.timestamp:
raise ValueError("pending time must be > verified time")
new_state = client_pb2.MonitorState()
new_state.CopyFrom(self.__state)
if new_sth.tree_size > self.__state.verified_sth.tree_size:
new_state.pending_sth.CopyFrom(new_sth)
else:
new_state.verified_sth.CopyFrom(new_sth)
self.__update_state(new_state)
def _set_verified_tree(self, new_tree):
"""Set verified_tree and maybe move pending_sth to verified_sth."""
self.__verified_tree = new_tree
old_state = self.__state
new_state = client_pb2.MonitorState()
new_state.CopyFrom(self.__state)
assert old_state.pending_sth.tree_size >= new_tree.tree_size
if old_state.pending_sth.tree_size == new_tree.tree_size:
# all pending entries retrieved
# already did consistency checks so this should always be true
assert (old_state.pending_sth.sha256_root_hash ==
self.__verified_tree.root_hash())
new_state.verified_sth.CopyFrom(old_state.pending_sth)
new_state.ClearField("pending_sth")
self.__update_state(new_state)
def _verify_consistency(self, old_sth, new_sth):
try:
proof = self.__client.get_sth_consistency(
old_sth.tree_size, new_sth.tree_size)
logging.debug("got proof for (%s, %s): %s",
old_sth.tree_size, new_sth.tree_size,
map(lambda b: b[:8].encode("base64")[:-2] + "...", proof))
self.__verifier.verify_sth_consistency(old_sth, new_sth, proof)
except error.VerifyError as e:
# catches both ConsistencyError and ProofError. when alerts are
# implemented, only the former should trigger an immediate alert;
# the latter may have innocent causes (e.g. data corruption,
# software bug) so we could give it a chance to recover before
# alerting.
logging.error("Could not verify STH consistency: %s vs %s!!!\n%s" %
(old_sth, new_sth, e))
raise
def _update_sth(self):
"""Get a new candidate STH. If update succeeds, stores the new STH as
pending. Does nothing if there is already a pending
STH.
Returns: True if the update succeeded."""
if self.__state.HasField("pending_sth"):
return True
logging.info("Fetching new STH")
try:
sth_response = self.__client.get_sth()
logging.info("Got new STH: %s" % sth_response)
except (log_client.HTTPError, log_client.InvalidResponseError) as e:
logging.error("get-sth from %s failed: %s" % (self.servername, e))
return False
# If we got the same response as last time, do nothing.
# If we got an older response than last time, return False.
# (It is not necessarily an inconsistency - the log could be out of
# sync - but we should not rewind to older data.)
#
# The client should always return an STH but best eliminate the
# None == None case explicitly by only shortcutting the verification
# if we already have a verified STH.
if self.__state.HasField("verified_sth"):
if sth_response == self.__state.verified_sth:
logging.info("Ignoring already-verified STH: %s" %
sth_response)
return True
elif (sth_response.timestamp <
self.__state.verified_sth.timestamp):
logging.error("Rejecting received STH: timestamp is older "
"than current verified STH: %s vs %s " %
(sth_response, self.__state.verified_sth))
return False
try:
# Given that we now only store verified STHs, the audit info here
# is not all that useful.
# TODO(ekasper): we should be tracking consistency instead.
self.__verifier.verify_sth(sth_response)
audited_sth = client_pb2.AuditedSth()
audited_sth.sth.CopyFrom(sth_response)
audited_sth.audit.status = client_pb2.VERIFIED
self.__db.store_sth(self.servername, audited_sth)
except (error.EncodingError, error.VerifyError) as e:
logging.error("Invalid STH: %s" % sth_response)
return False
# Verify consistency to catch the log trying to trick us
# into rewinding the tree.
try:
self._verify_consistency(self.__state.verified_sth, sth_response)
except error.VerifyError:
return False
# We now have a valid STH that is newer than our current STH: we should
# be holding on to it until we have downloaded and verified data under
# its signature.
logging.info("STH verified, updating state.")
self._set_pending_sth(sth_response)
return True
def _compute_projected_sth(self, extra_leaves):
"""Compute a partial projected STH.
Useful for when an intermediate STH is not directly available from the
server, but you still want to do something with the root hash.
Args:
extra_leaves: Extra leaves present in the tree for the new STH, in
the same order as in that tree.
Returns:
(partial_sth, new_tree)
partial_sth: A partial STH with timestamp 0 and empty signature.
new_tree: New CompactMerkleTree with the extra_leaves integrated.
"""
partial_sth = client_pb2.SthResponse()
old_size = self.__verified_tree.tree_size
partial_sth.tree_size = old_size + len(extra_leaves)
# we only want to check the hash, so just use a dummy timestamp
# that looks valid so the temporal verifier doesn't complain
partial_sth.timestamp = 0
extra_raw_leaves = [leaf.leaf_input for leaf in extra_leaves]
new_tree = self.__verified_tree.extended(extra_raw_leaves)
partial_sth.sha256_root_hash = new_tree.root_hash()
return partial_sth, new_tree
@staticmethod
def __estimate_time(num_new_entries):
if num_new_entries < 1000:
return "a moment"
elif num_new_entries < 1000000:
return "a while"
else:
return "all night"
def __fetch_entries(self, start, end):
num_new_entries = end - start + 1
logging.info("Fetching %d new entries: this will take %s..." %
(num_new_entries,
self.__estimate_time(num_new_entries)))
new_entries = self.__client.get_entries(start, end)
next_sequence_number = start
# Loop until we a) have all entries b) error out or c) exhaust the
# generator.
while next_sequence_number < end + 1:
try:
entry_batch = list(itertools.islice(
new_entries, FLAGS.entry_write_batch_size))
except (log_client.HTTPError,
log_client.InvalidResponseError) as e:
logging.error("get-entries from %s failed: %s" %
(self.servername, e))
return False
if not entry_batch:
# Generator exhausted prematurey.
logging.error("Failed to fetch all entries: expected tree size "
"%d vs retrieved tree size %d" %
(end + 1, next_sequence_number))
return False
logging.info("Fetched %d entries" % len(entry_batch))
# check that the batch is consistent with the eventual pending_sth
try:
# calculate the hash for the latest fetched certs
partial_sth, new_tree = self._compute_projected_sth(entry_batch)
self._verify_consistency(partial_sth, self.__state.pending_sth)
except error.VerifyError:
return False
logging.info("Verified %d entries" % len(entry_batch))
self._set_verified_tree(new_tree)
# TODO(ekasper): parse temporary data into permanent storage.
next_sequence_number += len(entry_batch)
return True
def _update_entries(self):
"""Retrieve new entries according to the pending STH.
Returns: True if the update succeeded.
"""
if not self.__state.HasField("pending_sth"):
return True
# Default is 0, which is what we want.
wanted_entries = self.__state.pending_sth.tree_size
last_verified_size = self.__verified_tree.tree_size
if (wanted_entries > last_verified_size and not
self.__fetch_entries(last_verified_size, wanted_entries-1)):
return False
return True
def update(self):
"""Update log view. Returns True if the update succeeded, False if any
error occurred."""
logging.info("Starting update for %s" % self.servername)
if not self._update_sth() or not self._update_entries():
logging.error("Update failed")
return False
return True
|
<reponame>J-Owens/soccerdata<gh_stars>0
"""Scraper for api.clubelo.com."""
import re
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from unidecode import unidecode
from ._common import BaseReader, standardize_colnames
from ._config import DATA_DIR, NOCACHE, NOSTORE, TEAMNAME_REPLACEMENTS
CLUB_ELO_DATADIR = DATA_DIR / "ClubElo"
CLUB_ELO_API = "http://api.clubelo.com"
class ClubElo(BaseReader):
"""Provides pd.DataFrames from CSV API at http://api.clubelo.com.
Data will be downloaded as necessary and cached locally in
``~/soccerdata/data/ClubElo``.
Since the source does not provide league names, this class will not filter
by league. League names will be inserted from the other sources where
available. Leagues that are only covered by clubelo.com will have NaN
values.
Parameters
----------
no_cache : bool
If True, will not use cached data.
no_store : bool
If True, will not store downloaded data.
data_dir : Path
Path to directory where data will be cached.
"""
def __init__(
self, no_cache: bool = NOCACHE, no_store: bool = NOSTORE, data_dir: Path = CLUB_ELO_DATADIR
):
"""Initialize a new ClubElo reader."""
super().__init__(no_cache=no_cache, no_store=no_store, data_dir=data_dir)
def read_by_date(self, date: Optional[Union[str, datetime]] = None) -> pd.DataFrame:
"""Retrieve ELO scores for all teams at specified date.
Elo scores are available as early as 1939. Values before 1960 should
be considered provisional.
Parameters
----------
date : datetime object or string like 'YYYY-MM-DD'
Date for which to retrieve ELO scores. If no date is specified,
get today's scores.
Returns
-------
pd.DataFrame
"""
if not date:
date = datetime.today()
elif isinstance(date, str):
date = datetime.strptime(date, "%Y-%m-%d")
else:
pass # Assume datetime object
datestring = date.strftime("%Y-%m-%d")
filepath = self.data_dir / f"{datestring}.csv"
url = f"{CLUB_ELO_API}/{datestring}"
data = self._download_and_save(url, filepath)
df = (
pd.read_csv(
data, parse_dates=["From", "To"], infer_datetime_format=True, dayfirst=False
)
.pipe(standardize_colnames)
.rename(columns={"club": "team"})
.replace({"team": TEAMNAME_REPLACEMENTS})
.replace("None", float("nan"))
.assign(rank=lambda x: x["rank"].astype("float"))
.assign(league=lambda x: x["country"] + "_" + x["level"].astype(str))
.pipe(self._translate_league)
.reset_index(drop=True)
.set_index("team")
)
return df
def read_team_history(
self, team: str, max_age: Union[int, timedelta] = 1
) -> Optional[pd.DataFrame]:
"""Retrieve full ELO history for one club.
For the exact spelling of a club's name, check the result
of :func:`~soccerdata.ClubElo.read_by_date` or
`clubelo.com <http://clubelo.com/Ranking>`__. You can also use
alternative team names specified in `teamname_replacements.json`.
Values before 1960 should be considered provisional.
Parameters
----------
team : str
The club's name
max_age : int for age in days, or timedelta object
The max. age of locally cached file before re-download.
Raises
------
TypeError
If max_age is not an integer or timedelta object.
ValueError
If no ratings for the given team are available.
Returns
-------
pd.DataFrame
"""
teams_to_check = [k for k, v in TEAMNAME_REPLACEMENTS.items() if v == team]
teams_to_check.append(team)
for i, _ in enumerate(teams_to_check):
teams_to_check[i] = unidecode(teams_to_check[i])
teams_to_check[i] = re.sub(r"[\s']", "", teams_to_check[i])
for _team in teams_to_check:
filepath = self.data_dir / f"{_team}.csv"
url = f"{CLUB_ELO_API}/{_team}"
data = self._download_and_save(url, filepath, max_age)
df = (
pd.read_csv(
data,
parse_dates=["From", "To"],
infer_datetime_format=True,
dayfirst=False,
)
.pipe(standardize_colnames)
.rename(columns={"club": "team"})
.replace("None", float("nan"))
.assign(rank=lambda x: x["rank"].astype("float"))
.set_index("from")
.sort_index()
)
if len(df) > 0:
# clubelo.com returns a CSV with just a header for nonexistent club
df.replace({"team": TEAMNAME_REPLACEMENTS}, inplace=True)
return df
raise ValueError(f"No data found for team {team}")
|
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.preprocessing import image
def cod(y_true, y_pred):
SSR=K.sum(K.square(y_true-y_pred))
SST=K.sum(K.square(y_true-K.mean(y_true)))
RS=SSR/SST
return RS
def pred4_newloss(y_true, y_pred):
return K.mean((K.abs(y_pred - y_true))-K.constant(5), axis=-1)
def pred1_newloss(y_true, y_pred):
return K.mean((K.abs(y_pred - y_true))-K.constant(6), axis=-1)
def wpred4_newloss(y_true, y_pred):
return K.mean((K.abs(y_pred - y_true))-K.constant(4.5), axis=-1)
def wpred1_newloss(y_true, y_pred):
return K.mean((K.abs(y_pred - y_true))-K.constant(5.5), axis=-1)
def augment_data(images):
for i in range(0,images.shape[0]):
if np.random.random() > 0.5:
images[i] = images[i][:,::-1]
if np.random.random() > 0.75:
images[i] =image.random_rotation(images[i], 20, row_axis=0, col_axis=1, channel_axis=2)
if np.random.random() > 0.75:
images[i] = image.random_shear(images[i], 0.2, row_axis=0, col_axis=1, channel_axis=2)
if np.random.random() > 0.75:
images[i] = image.random_shift(images[i], 0.2, 0.2, row_axis=0, col_axis=1, channel_axis=2)
if np.random.random() > 0.75:
images[i] = image.random_zoom(images[i], [0.8,1.2], row_axis=0, col_axis=1, channel_axis=2)
return images
def data_generator_reg(X,Y,Y20,Y4,batch_size):
while True:
idxs = np.random.permutation(len(X))
X = X[idxs]
Y = Y[idxs]
Y20=Y20[idxs]
Y4=Y4[idxs]
p,q,q1,q2,q3 = [],[],[],[],[]
for i in range(len(X)):
p.append(X[i])
q.append(Y[i])
q1.append(Y4[i])
q2.append(Y[i])
q3.append(Y[i])
if len(p) == batch_size:
yield augment_data(np.array(p)),[np.array(q),np.array(q1),np.array(q2),np.array(q3)]
p,q,q1,q2,q3 = [],[],[],[],[]
if p:
yield augment_data(np.array(p)),[np.array(q),np.array(q1),np.array(q2),np.array(q3)]
p,q,q1,q2,q3 = [],[],[],[],[]
def ker_init(shape, dtype=None):
kernel = tf.Variable(
[[[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]]],
[[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[-7., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]]],
[[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]]]])
return kernel
def read_data(data_name):
#tfdataset = tf.data.Dataset.list_files(data_name)
image_feature_description = {
'image': tf.io.FixedLenFeature([], tf.string),
'age': tf.io.FixedLenFeature([], tf.int64),
'age20': tf.io.FixedLenFeature([], tf.int64),
'age4': tf.io.FixedLenFeature([], tf.int64),
'age1': tf.io.FixedLenFeature([], tf.int64)
}
image_features = tf.io.parse_single_example(data_name, image_feature_description)
image = tf.image.decode_jpeg(image_features['image'], channels=3)
Input={}
Input['input_1']=image
label={}
label["pred_a"] = tf.cast(image_features['age'], tf.float32)
label['pre_4'] = tf.cast(image_features['age20']+image_features['age4'], tf.float32)
label['pre_1'] = tf.cast(image_features['age'], tf.float32)
label['pre_cod'] = tf.cast(image_features['age'], tf.float32)
return Input,label |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List, Dict, Callable
import re
from datetime import datetime
from recognizers_text.utilities import RegExpUtility
from ..utilities import DateUtils
from ..base_holiday import BaseHolidayParserConfiguration
from ...resources.portuguese_date_time import PortugueseDateTime
class PortugueseHolidayParserConfiguration(BaseHolidayParserConfiguration):
@property
def holiday_names(self) -> Dict[str, List[str]]:
return self._holiday_names
@property
def holiday_regex_list(self) -> List[str]:
return self._holiday_regexes
@property
def holiday_func_dictionary(self) -> Dict[str, Callable[[int], datetime]]:
return self._holiday_func_dictionary
def sanitize_holiday_token(self, holiday: str) -> str:
return re.sub('[ \']', '', holiday)
def __init__(self, config):
super().__init__()
self._holiday_regexes = [
RegExpUtility.get_safe_reg_exp(PortugueseDateTime.HolidayRegex1),
RegExpUtility.get_safe_reg_exp(PortugueseDateTime.HolidayRegex2),
RegExpUtility.get_safe_reg_exp(PortugueseDateTime.HolidayRegex3)
]
self._holiday_names = PortugueseDateTime.HolidayNames
self._variable_holidays_timex_dictionary = PortugueseDateTime.VariableHolidaysTimexDictionary
self.next_prefix_regex = RegExpUtility.get_safe_reg_exp(
PortugueseDateTime.NextPrefixRegex)
self.previous_prefix_regex = RegExpUtility.get_safe_reg_exp(
PortugueseDateTime.PreviousPrefixRegex)
self.this_prefix_regex = RegExpUtility.get_safe_reg_exp(
PortugueseDateTime.ThisPrefixRegex)
def _init_holiday_funcs(self) -> Dict[str, Callable[[int], datetime]]:
local = dict([
("pai", PortugueseHolidayParserConfiguration.fathers_day),
("mae", PortugueseHolidayParserConfiguration.mothers_day),
("acaodegracas", PortugueseHolidayParserConfiguration.thanksgiving_day),
("trabalho", PortugueseHolidayParserConfiguration.international_workers_day),
("pascoa", PortugueseHolidayParserConfiguration.easter_day),
("natal", PortugueseHolidayParserConfiguration.christmas_day),
("vesperadenatal", PortugueseHolidayParserConfiguration.christmas_eve),
("anonovo", PortugueseHolidayParserConfiguration.new_year),
("versperadeanonovo", PortugueseHolidayParserConfiguration.new_year_eve),
("yuandan", PortugueseHolidayParserConfiguration.new_year),
("professor", PortugueseHolidayParserConfiguration.teacher_day),
("todosossantos", PortugueseHolidayParserConfiguration.halloween_day),
("crianca", PortugueseHolidayParserConfiguration.children_day),
("mulher", PortugueseHolidayParserConfiguration.female_day)
])
return {**super()._init_holiday_funcs(), **local}
@staticmethod
def mao_birthday(year: int) -> datetime:
return datetime(year, 12, 26)
@staticmethod
def new_year(year: int) -> datetime:
return datetime(year, 1, 1)
@staticmethod
def teacher_day(year: int) -> datetime:
return datetime(year, 9, 10)
@staticmethod
def singles_day(year: int) -> datetime:
return datetime(year, 11, 11)
@staticmethod
def halloween_day(year: int) -> datetime:
return datetime(year, 10, 31)
@staticmethod
def youth_day(year: int) -> datetime:
return datetime(year, 5, 4)
@staticmethod
def children_day(year: int) -> datetime:
return datetime(year, 6, 1)
@staticmethod
def female_day(year: int) -> datetime:
return datetime(year, 3, 8)
@staticmethod
def tree_plant_day(year: int) -> datetime:
return datetime(year, 3, 12)
@staticmethod
def girls_day(year: int) -> datetime:
return datetime(year, 3, 7)
@staticmethod
def white_lover_day(year: int) -> datetime:
return datetime(year, 3, 14)
@staticmethod
def valentines_day(year: int) -> datetime:
return datetime(year, 2, 14)
@staticmethod
def christmas_day(year: int) -> datetime:
return datetime(year, 12, 25)
@staticmethod
def inauguration_day(year: int) -> datetime:
return datetime(year, 1, 20)
@staticmethod
def groundhog_day(year: int) -> datetime:
return datetime(year, 2, 2)
@staticmethod
def st_patrick_day(year: int) -> datetime:
return datetime(year, 3, 17)
@staticmethod
def fool_day(year: int) -> datetime:
return datetime(year, 4, 1)
@staticmethod
def st_george_day(year: int) -> datetime:
return datetime(year, 4, 23)
@staticmethod
def may_day(year: int) -> datetime:
return datetime(year, 5, 1)
@staticmethod
def cinco_de_mayo_day(year: int) -> datetime:
return datetime(year, 5, 5)
@staticmethod
def baptiste_day(year: int) -> datetime:
return datetime(year, 6, 24)
@staticmethod
def usa_independence_day(year: int) -> datetime:
return datetime(year, 7, 4)
@staticmethod
def bastille_day(year: int) -> datetime:
return datetime(year, 7, 14)
@staticmethod
def all_hallow_day(year: int) -> datetime:
return datetime(year, 11, 1)
@staticmethod
def all_souls_day(year: int) -> datetime:
return datetime(year, 11, 2)
@staticmethod
def guy_fawkes_day(year: int) -> datetime:
return datetime(year, 11, 5)
@staticmethod
def veterans_day(year: int) -> datetime:
return datetime(year, 11, 11)
@staticmethod
def christmas_eve(year: int) -> datetime:
return datetime(year, 12, 24)
@staticmethod
def new_year_eve(year: int) -> datetime:
return datetime(year, 12, 31)
@staticmethod
def easter_day(year: int) -> datetime:
return DateUtils.min_value
@staticmethod
def juneteenth(year: int) -> datetime:
return datetime(year, 6, 19)
def get_swift_year(self, text: str) -> int:
trimmed_text = text.strip().lower()
swift = -10
if self.next_prefix_regex.search(trimmed_text):
swift = 1
if self.previous_prefix_regex.search(trimmed_text):
swift = -1
if self.this_prefix_regex.search(trimmed_text):
swift = 0
return swift
|
<gh_stars>0
"""
This module provides a ASE calculator class [#ase1]_ for SchNetPack models, as
well as a general Interface to all ASE calculation methods, such as geometry
optimisation, normal mode computation and molecular dynamics simulations.
References
----------
.. [#ase1] Larsen, Mortensen, Blomqvist, Castelli, Christensen, Dułak, Friis,
Groves, Hammer, Hargus: The atomic simulation environment -- a Python
library for working with atoms.
Journal of Physics: Condensed Matter, 9, 27. 2017.
"""
import os
import ase
from ase import units
from ase.calculators.calculator import Calculator, all_changes
from ase.io import read, write
from ase.io.trajectory import Trajectory
from ase.md import VelocityVerlet, Langevin, MDLogger
from ase.md.velocitydistribution import (
MaxwellBoltzmannDistribution,
Stationary,
ZeroRotation,
)
from ase.optimize import QuasiNewton
from ase.vibrations import Vibrations
import torch
import schnetpack
import logging
import schnetpack.task
from schnetpack import properties
from schnetpack.data.loader import _atoms_collate_fn
from schnetpack.transform import CastTo32, CastTo64
from schnetpack.units import convert_units
from typing import Optional, List, Union
from ase import Atoms
log = logging.getLogger(__name__)
__all__ = ["SpkCalculator", "AseInterface", "AtomsConverter"]
class AtomsConverterError(Exception):
pass
class AtomsConverter:
"""
Convert ASE atoms to SchNetPack input batch format for model prediction.
"""
def __init__(
self,
neighbor_list: schnetpack.transform.Transform,
device: Union[str, torch.device] = "cpu",
dtype: torch.dtype = torch.float32,
):
self.neighbor_list = neighbor_list
self.device = device
self.dtype = dtype
# get transforms and initialize neighbor list
self.transforms: List[schnetpack.transform.Transform] = [neighbor_list]
# Set numerical precision
if dtype == torch.float32:
self.transforms.append(CastTo32())
elif dtype == torch.float64:
self.transforms.append(CastTo64())
else:
raise AtomsConverterError(f"Unrecognized precision {dtype}")
def __call__(self, atoms: Atoms):
"""
Args:
atoms (ase.Atoms): ASE atoms object of the molecule.
Returns:
dict[str, torch.Tensor]: input batch for model.
"""
inputs = {
properties.n_atoms: torch.tensor([atoms.get_global_number_of_atoms()]),
properties.Z: torch.from_numpy(atoms.get_atomic_numbers()),
properties.R: torch.from_numpy(atoms.get_positions()),
properties.cell: torch.from_numpy(atoms.get_cell().array),
properties.pbc: torch.from_numpy(atoms.get_pbc()),
}
for transform in self.transforms:
inputs = transform(inputs)
inputs = _atoms_collate_fn([inputs])
# Move input batch to device
inputs = {p: inputs[p].to(self.device) for p in inputs}
return inputs
class SpkCalculator(Calculator):
"""
ASE calculator for schnetpack machine learning models.
Args:
model: Trained model for calculations
neighbor_list: neighbor list for computing interatomic distances.
device: select to run calculations on 'cuda' or 'cpu'
energy: name of energy property in provided model.
forces: name of forces in provided model.
stress: name of stress property in provided model.
energy_units: energy units used by model
forces_units: force units used by model
stress_units: stress units used by model
precision: toggle model precision
**kwargs: Additional arguments for basic ase calculator class
"""
energy = "energy"
forces = "forces"
stress = "stress"
implemented_properties = [energy, forces, stress]
def __init__(
self,
model: schnetpack.model.AtomisticModel,
converter: AtomsConverter,
energy: str = "energy",
forces: str = "forces",
stress: str = "stress",
energy_units: Union[str, float] = "kcal/mol",
forces_units: Union[str, float] = "kcal/mol/Angstrom",
stress_units: Union[str, float] = "kcal/mol/Angstrom/Angstrom/Angstrom",
**kwargs,
):
Calculator.__init__(self, **kwargs)
self.converter = converter
self.model = model
self.model.to(device=self.converter.device, dtype=self.converter.dtype)
# TODO: activate computation of stress in model if requested
# Mapping between ASE names and model outputs
self.property_map = {
self.energy: energy,
self.forces: forces,
self.stress: stress,
}
# Unit conversion to default ASE units
self.property_units = {
self.energy: convert_units(energy_units, "eV"),
self.forces: convert_units(forces_units, "eV/Angstrom"),
self.stress: convert_units(stress_units, "eV/Ang/Ang/Ang"),
}
def calculate(
self,
atoms: ase.Atoms = None,
properties: List[str] = ["energy"],
system_changes: List[str] = all_changes,
):
"""
Args:
atoms (ase.Atoms): ASE atoms object.
properties (list of str): do not use this, no functionality
system_changes (list of str): List of changes for ASE.
"""
# First call original calculator to set atoms attribute
# (see https://wiki.fysik.dtu.dk/ase/_modules/ase/calculators/calculator.html#Calculator)
if self.calculation_required(atoms, properties):
Calculator.calculate(self, atoms)
# Convert to schnetpack input format
model_inputs = self.converter(atoms)
model_results = self.model(model_inputs)
results = {}
# TODO: use index information to slice everything properly
for prop in properties:
model_prop = self.property_map[prop]
if model_prop in model_results:
if prop == self.energy:
# ase calculator should return scalar energy
results[prop] = (
model_results[model_prop].cpu().data.numpy()[0]
* self.property_units[prop]
)
else:
results[prop] = (
model_results[model_prop].cpu().data.numpy()
* self.property_units[prop]
)
else:
raise AtomsConverterError(
"'{:s}' is not a property of your model. Please "
"check the model"
"properties!".format(model_prop)
)
self.results = results
class AseInterface:
"""
Interface for ASE calculations (optimization and molecular dynamics)
"""
def __init__(
self,
molecule_path: str,
working_dir: str,
model: schnetpack.model.AtomisticModel,
converter: AtomsConverter,
energy: str = "energy",
forces: str = "forces",
stress: str = "stress",
energy_units: Union[str, float] = "kcal/mol",
forces_units: Union[str, float] = "kcal/mol/Angstrom",
stress_units: Union[str, float] = "kcal/mol/Angstrom/Angstrom/Angstrom",
):
"""
Args:
molecule_path: Path to initial geometry
working_dir: Path to directory where files should be stored
model: Trained model
neighbor_list: neighbor list for computing interatomic distances.
device: select to run calculations on 'cuda' or 'cpu'
energy: name of energy property in provided model.
forces: name of forces in provided model.
stress: name of stress property in provided model.
energy_units: energy units used by model
forces_units: force units used by model
stress_units: stress units used by model
precision: toggle model precision
"""
# Setup directory
self.working_dir = working_dir
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
# Load the molecule
self.molecule = read(molecule_path)
# Set up calculator
calculator = SpkCalculator(
model,
converter=converter,
energy=energy,
forces=forces,
stress=stress,
energy_units=energy_units,
forces_units=forces_units,
stress_units=stress_units,
)
self.molecule.set_calculator(calculator)
self.dynamics = None
def save_molecule(self, name: str, file_format: str = "xyz", append: bool = False):
"""
Save the current molecular geometry.
Args:
name: Name of save-file.
file_format: Format to store geometry (default xyz).
append: If set to true, geometry is added to end of file (default False).
"""
molecule_path = os.path.join(
self.working_dir, "{:s}.{:s}".format(name, file_format)
)
write(molecule_path, self.molecule, format=file_format, append=append)
def calculate_single_point(self):
"""
Perform a single point computation of the energies and forces and
store them to the working directory. The format used is the extended
xyz format. This functionality is mainly intended to be used for
interfaces.
"""
energy = self.molecule.get_potential_energy()
forces = self.molecule.get_forces()
self.molecule.energy = energy
self.molecule.forces = forces
self.save_molecule("single_point", file_format="xyz")
def init_md(
self,
name: str,
time_step: float = 0.5,
temp_init: float = 300,
temp_bath: Optional[float] = None,
reset: bool = False,
interval: int = 1,
):
"""
Initialize an ase molecular dynamics trajectory. The logfile needs to
be specifies, so that old trajectories are not overwritten. This
functionality can be used to subsequently carry out equilibration and
production.
Args:
name: Basic name of logfile and trajectory
time_step: Time step in fs (default=0.5)
temp_init: Initial temperature of the system in K (default is 300)
temp_bath: Carry out Langevin NVT dynamics at the specified
temperature. If set to None, NVE dynamics are performed
instead (default=None)
reset: Whether dynamics should be restarted with new initial
conditions (default=False)
interval: Data is stored every interval steps (default=1)
"""
# If a previous dynamics run has been performed, don't reinitialize
# velocities unless explicitly requested via restart=True
if self.dynamics is None or reset:
self._init_velocities(temp_init=temp_init)
# Set up dynamics
if temp_bath is None:
self.dynamics = VelocityVerlet(self.molecule, time_step * units.fs)
else:
self.dynamics = Langevin(
self.molecule,
time_step * units.fs,
temp_bath * units.kB,
1.0 / (100.0 * units.fs),
)
# Create monitors for logfile and a trajectory file
logfile = os.path.join(self.working_dir, "{:s}.log".format(name))
trajfile = os.path.join(self.working_dir, "{:s}.traj".format(name))
logger = MDLogger(
self.dynamics,
self.molecule,
logfile,
stress=False,
peratom=False,
header=True,
mode="a",
)
trajectory = Trajectory(trajfile, "w", self.molecule)
# Attach monitors to trajectory
self.dynamics.attach(logger, interval=interval)
self.dynamics.attach(trajectory.write, interval=interval)
def _init_velocities(
self,
temp_init: float = 300,
remove_translation: bool = True,
remove_rotation: bool = True,
):
"""
Initialize velocities for molecular dynamics
Args:
temp_init: Initial temperature in Kelvin (default 300)
remove_translation: Remove translation components of velocity (default True)
remove_rotation: Remove rotation components of velocity (default True)
"""
MaxwellBoltzmannDistribution(self.molecule, temp_init * units.kB)
if remove_translation:
Stationary(self.molecule)
if remove_rotation:
ZeroRotation(self.molecule)
def run_md(self, steps: int):
"""
Perform a molecular dynamics simulation using the settings specified
upon initializing the class.
Args:
steps: Number of simulation steps performed
"""
if not self.dynamics:
raise AttributeError(
"Dynamics need to be initialized using the" " 'setup_md' function"
)
self.dynamics.run(steps)
def optimize(self, fmax: float = 1.0e-2, steps: int = 1000):
"""
Optimize a molecular geometry using the Quasi Newton optimizer in ase
(BFGS + line search)
Args:
fmax: Maximum residual force change (default 1.e-2)
steps: Maximum number of steps (default 1000)
"""
name = "optimization"
optimize_file = os.path.join(self.working_dir, name)
optimizer = QuasiNewton(
self.molecule,
trajectory="{:s}.traj".format(optimize_file),
restart="{:s}.pkl".format(optimize_file),
)
optimizer.run(fmax, steps)
# Save final geometry in xyz format
self.save_molecule(name)
def compute_normal_modes(self, write_jmol: bool = True):
"""
Use ase calculator to compute numerical frequencies for the molecule
Args:
write_jmol: Write frequencies to input file for visualization in jmol (default=True)
"""
freq_file = os.path.join(self.working_dir, "normal_modes")
# Compute frequencies
frequencies = Vibrations(self.molecule, name=freq_file)
frequencies.run()
# Print a summary
frequencies.summary()
# Write jmol file if requested
if write_jmol:
frequencies.write_jmol()
|
import struct
import time
from enum import Enum
from app.connector import BasicConnector
from app.exceptions import InvalidCommandError, InvalidChecksumError, DeviceAuthError, InvalidResponseError, \
ReadFailedError, AdbCommandFailureException, InterleavedDataError
# Maximum amount of data in an ADB packet.
MAX_ADB_DATA = 4096
# ADB protocol version.
VERSION = 0x01000000
TIMEOUT_CODE = -7
class Auth(Enum):
TOKEN = 1
SIGNATURE = 2
RSA_PUBLIC_KEY = 3
class DataCommand(Enum):
SYNC = b'SYNC'
CNXN = b'CNXN'
AUTH = b'AUTH'
OPEN = b'OPEN'
OKAY = b'OKAY'
CLOSE = b'CLSE'
WRITE = b'WRTE'
FAIL = b'FAIL'
@classmethod
def get_all_command(cls):
return [[k for k, v in va.items()] for attr, va in cls.__dict__.items() if attr == "_value2member_map_"][0]
class AndroidDebugBridgeProtocol:
def __init__(self, connector, local_id, remote_id, timeout_ms):
self.connector = connector
self.local_id = local_id
self.remote_id = remote_id
self.timeout_ms = timeout_ms
def __send(self, command, arg0, arg1, data=b''):
message = AndroidDebugBridgeMessage(command, arg0, arg1, data)
message.send_packed_message(self.connector, self.timeout_ms)
def write(self, data):
self.__send(DataCommand.WRITE.value, arg0=self.local_id, arg1=self.remote_id, data=data)
cmd, okay_data = self.read_until_cmd_is(DataCommand.OKAY)
if cmd != DataCommand.OKAY.value:
if cmd == DataCommand.FAIL.value:
raise AdbCommandFailureException(
'Command failed.', okay_data)
raise InvalidCommandError(
'Expected an OKAY in response to a WRITE, got %s (%s)',
cmd, okay_data)
return len(data)
def read_until_conn_close(self):
while True:
cmd, data = self.read_until_cmd_is(DataCommand.CLOSE, DataCommand.WRITE)
if cmd == DataCommand.CLOSE.value:
self.__send(DataCommand.CLOSE.value, arg0=self.local_id, arg1=self.remote_id)
break
if cmd != DataCommand.WRITE.value:
if cmd == DataCommand.FAIL.value:
raise AdbCommandFailureException(
'Command failed.', data)
raise InvalidCommandError('Expected a WRITE or a CLOSE, got %s (%s)',
cmd, data)
yield data
def send_ok(self):
self.__send(DataCommand.OKAY.value, arg0=self.local_id, arg1=self.remote_id)
def read_until_cmd_is(self, *command: DataCommand):
cmd, remote_id, local_id, data = AndroidDebugBridgeMessage.read_from_connector(
self.connector, [cmd.value for cmd in command], self.timeout_ms)
if local_id != 0 and self.local_id != local_id:
raise InterleavedDataError("We don't support multiple streams...")
if remote_id != 0 and self.remote_id != remote_id:
raise InvalidResponseError(
'Incorrect remote id, expected %s got %s' % (
self.remote_id, remote_id))
# Ack write packets.
if cmd == DataCommand.WRITE.value:
self.send_ok()
return cmd, data
def close(self):
self.__send(DataCommand.CLOSE.value, arg0=self.local_id, arg1=self.remote_id)
cmd, data = self.read_until_cmd_is(DataCommand.CLOSE)
if cmd != DataCommand.CLOSE.value:
if cmd == DataCommand.FAIL.value:
raise AdbCommandFailureException('Command failed.', data)
raise InvalidCommandError(f'Expected a {DataCommand.CLOSE} response, got %s (%s)',
cmd, data)
def make_command():
id_to_wire = {
cmd_id: sum(c << (i * 8) for i, c in enumerate(bytearray(cmd_id))) for cmd_id in DataCommand.get_all_command()
}
wire_to_id = {wire: cmd_id for cmd_id, wire in id_to_wire.items()}
return id_to_wire, wire_to_id
class AndroidDebugBridgeMessage:
_PACK_FORMAT_ = b'<6I'
commands, constants = make_command()
def __init__(self, command=None, arg0=None, arg1=None, data=b''):
self.command = self.commands[command]
self.magic = self.command ^ 0xFFFFFFFF
self.arg0 = arg0
self.arg1 = arg1
self.data = data
@staticmethod
def calc_check_sum(data):
# adb中的检验和为所有直接加起来形成的整数(据说)
if isinstance(data, bytearray):
total = sum(data)
elif isinstance(data, bytes):
total = sum(data)
else:
# Unicode字符
total = sum(map(ord, data))
return total & 0xFFFFFFFF
@property
def checksum(self):
return self.calc_check_sum(self.data)
def pack(self):
return struct.pack(self._PACK_FORMAT_, self.command, self.arg0, self.arg1,
len(self.data), self.checksum, self.magic)
@classmethod
def unpack(cls, message):
try:
cmd, arg0, arg1, data_length, data_checksum, unused_magic = struct.unpack(
cls._PACK_FORMAT_, message)
except struct.error as e:
raise ValueError('Unable to unpack ADB command.', cls._PACK_FORMAT_, message, e)
return cmd, arg0, arg1, data_length, data_checksum
def send_packed_message(self, conn: BasicConnector, timeout=None):
conn.write(self.pack(), timeout)
conn.write(self.data, timeout)
@classmethod
def read_from_connector(cls, conn: BasicConnector, expected_cmds, timeout_ms=None, total_timeout_ms=None):
total_timeout_ms = conn.timeout_second(total_timeout_ms)
start = time.time()
while True:
msg = conn.read(24, timeout_ms)
cmd, arg0, arg1, data_length, data_checksum = cls.unpack(msg)
command = cls.constants.get(cmd)
if not command:
raise InvalidCommandError(
'Unknown command: %x' % cmd, cmd, (arg0, arg1))
if command in expected_cmds:
break
if time.time() - start > total_timeout_ms:
raise InvalidCommandError(
'Never got one of the expected responses (%s)' % expected_cmds,
cmd, (timeout_ms, total_timeout_ms))
if data_length > 0:
data = bytearray()
while data_length > 0:
temp = conn.read(data_length, timeout_ms)
if len(temp) != data_length:
print(
"Data_length {} does not match actual number of bytes read: {}".format(data_length, len(temp)))
data += temp
data_length -= len(temp)
actual_checksum = cls.calc_check_sum(data)
if actual_checksum != data_checksum:
raise InvalidChecksumError(
'Received checksum %s != %s', (actual_checksum, data_checksum))
else:
data = b''
return command, arg0, arg1, bytes(data)
@classmethod
def Connect(cls, conn, banner=b'notadb', rsa_keys=None, auth_timeout_ms=100):
msg = cls(
command=DataCommand.CNXN.value, arg0=VERSION, arg1=MAX_ADB_DATA,
data=b'host::%s\0' % banner)
msg.send_packed_message(conn)
cmd, arg0, arg1, banner = cls.read_from_connector(conn, [b'CNXN', b'AUTH'])
if cmd == DataCommand.AUTH.value:
if not rsa_keys:
raise DeviceAuthError(
'Device authentication required, no keys available.')
for rsa_key in rsa_keys:
if arg0 != Auth.TOKEN.value:
raise InvalidResponseError(
'Unknown AUTH response: %s %s %s' % (arg0, arg1, banner))
signed_token = rsa_key.Sign(banner)
msg = cls(
command=DataCommand.AUTH.value, arg0=Auth.SIGNATURE.value, arg1=0, data=signed_token)
msg.send_packed_message(conn)
cmd, arg0, unused_arg1, banner = cls.read_from_connector(conn, [DataCommand.CNXN.value,
DataCommand.AUTH.value])
if cmd == DataCommand.CNXN.value:
return banner
msg = cls(
command=DataCommand.AUTH.value, arg0=Auth.RSA_PUBLIC_KEY.valu, arg1=0,
data=rsa_keys[0].GetPublicKey() + b'\0')
msg.send_packed_message(conn)
try:
cmd, arg0, unused_arg1, banner = cls.read_from_connector(
conn, [DataCommand.CNXN.value], timeout_ms=auth_timeout_ms)
except ReadFailedError as e:
if e.usb_error.value == TIMEOUT_CODE: # Timeout
raise DeviceAuthError(
'Accept auth key on device, then retry.')
raise
return banner
return banner
@classmethod
def open_connection(cls, conn, destination, timeout_ms=None):
local_id = 1
msg = cls(
command=DataCommand.OPEN.value, arg0=local_id, arg1=0,
data=destination + b'\0')
msg.send_packed_message(conn, timeout_ms)
cmd, remote_id, their_local_id, _ = cls.read_from_connector(conn,
[DataCommand.CLOSE.value, DataCommand.OKAY.value],
timeout_ms=timeout_ms)
if local_id != their_local_id:
raise InvalidResponseError(
'Expected the local_id to be {}, got {}'.format(local_id, their_local_id))
if cmd == DataCommand.CLOSE.value:
cmd, remote_id, their_local_id, _ = cls.read_from_connector(conn,
[DataCommand.CLOSE.value,
DataCommand.OKAY.value],
timeout_ms=timeout_ms)
if cmd == DataCommand.CLOSE.value:
return None
if cmd != DataCommand.OKAY.value:
raise InvalidCommandError('Expected a ready response, got {}'.format(cmd),
cmd, (remote_id, their_local_id))
return AndroidDebugBridgeProtocol(conn, local_id, remote_id, timeout_ms)
@classmethod
def streaming_command(cls, usb, service, command='', timeout_ms=None):
if not isinstance(command, bytes):
command = command.encode('utf8')
connection = cls.open_connection(
usb, destination=b'%s:%s' % (service, command),
timeout_ms=timeout_ms)
for data in connection.ReadUntilClose():
yield data.decode('utf8')
if __name__ == '__main__':
print(DataCommand.get_all_command())
|
<reponame>theshaodi/algorithm004-05
#@author:leacoder
#@des: 递归 电话号码的字母组合
"""
递归处理digits字符串中的每个字符
digits的每个字符又有多种情况(多个字母需要处理)
递归终止条件就是 digits字符串中的每个字符 已处理结束
"""
class Solution:
phone = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
output = [] # 输出
def letterCombinations(self, digits: str) -> List[str]:
# 字典 存储 每个数字对应的字母
self.output = []
if digits:
self.recursive("",digits) # "" 字母组合 最开始为"" , digits 待处理的字符串(处理后剩下的)
return self.output
#递归函数
# combination,上一层处理后的到的字母组合
# next_digits,上层处理后剩余的字符串(本层需要处理的字符串)
def recursive(self,combination , next_digits):
if len(next_digits) == 0: # 终止条件 字符串中的每个字母已处理结束
self.output.append(combination) # 将组合的字母组合存入output输出
return
else:
for letter in self.phone[next_digits[0]]: # 处理 digits的每个字母 的多种情况
# combination + letter 将当前层处理的字母加入 字母组合
# next_digits[1:] 递归到 digits 字符串的下一个字符
self.recursive(combination + letter,next_digits[1:])
#@author:leacoder
#@des: 迭代 电话号码的字母组合
"""
迭代digits字符串中每个字符num进行处理
遍历 phone[num] 中每个字母并加入现有结果中得到新的结果
digits字符串中num字符处理结束(phone[num] 中每个字母处理完成)更新现有结果
"""
class Solution:
phone = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
output = [""] # 输出 初始 存储""
for num in digits: # 对digits字符串中每个字符进行处理
tmplist=[] # 中间变量
for letter in self.phone[num]: # 遍历 phone[num] 每个字母进行处理
for tmp in output: # 遍历output向输出中添加新增的字母
# 注意 tmp + letter,tmp在前 因为新增的字母要加在后面
tmplist.append(tmp + letter)
output = tmplist # digits字符串中num字符处理结束(phone[num] 中每个字母处理完成)更新现有结果
return output
"""
把代码块
for num in digits: # 对digits字符串中每个字符进行处理
tmplist=[] # 中间变量
for letter in self.phone[num]: # 遍历 phone[num] 每个字母进行处理
for tmp in output: # 遍历output向输出中添加新增的字母
# 注意 tmp + letter,tmp在前 因为新增的字母要加在后面
tmplist.append(tmp + letter)
output = tmplist # digits字符串中num字符处理结束(phone[num] 中每个字母处理完成)更新现有结果
可以精简为:
for num in digits:
output = [tmp + letter for letter in self.phone[num] for tmp in output]
""" |
"""Steganography, by <NAME>
2020-10-07 v1.0
"""
import collections, logging, math
from random import randint
from PIL import Image # pip install Pillow
def bits2bytes(bits):
while True:
byte = 0
for _ in range(8):
try:
byte = (byte << 1) | next(bits)
except StopIteration:
return
yield byte
def bytes2bits(byts):
for byte in byts:
for bit in format(byte, "08b"):
yield int(bit)
def get_lowest_bits(img):
band_range = range(len(img.getbands()))
for pixel in img.getdata():
for i in band_range:
yield pixel[i] & 1
def zeroes():
while True:
yield 0
def random_bits():
while True:
yield randint(0, 1)
def rand(seed=123456789):
"https://stackoverflow.com/a/3062783/819417"
seed = (1103515245 * seed + 12345) % 2 ** 31
return seed
def set_lowest_bits(img, bits=None, filler=None):
if bits == None:
bits = []
logging.debug(f"Setting bits: {bits[:32]}...")
bits = iter(bits)
w, h = img.size
band_range = range(len(img.getbands()))
pixels = img.load()
done = False
for y in range(h):
for x in range(w):
pixel = list(pixels[x, y])
for i in band_range:
try:
bit = next(bits)
if bit:
pixel[i] |= 1
else:
pixel[i] &= ~1
except StopIteration:
if filler:
bits = filler()
else:
done = True
break
if y < 1 and x < 32:
logging.debug(f"Setting {pixels[x, y]} to {pixel}...")
pixels[x, y] = tuple(pixel)
if done:
return img
return img
def hide(data, cover=None, filler=None):
data_length = len(data)
cover_mode = "RGB"
if not cover:
logging.info("Generating 4:3 image to hold data...")
w = 0
h = 0
while True:
w += 4
h += 3
max_bytes = w * h * len(cover_mode) // 8
if max_bytes > data_length + math.ceil(math.log2(max_bytes)):
break
logging.info(f"{w}x{h}")
cover = Image.new(cover_mode, (w, h), color=(255, 255, 255, 0))
max_bits = cover.size[0] * cover.size[1] * len(cover.mode)
header_size = math.ceil(math.log2(max_bits // 8))
max_bits -= header_size
max_bytes = max_bits // 8
logging.info(
f"Cover has {max_bits:,} bits / {max_bytes:,} bytes available. ({header_size}-bit header)"
)
logging.info(
f"Message has {data_length*8:,} bits / {data_length:,} bytes: {data[:32]}... {[c for c in data[:32]]}"
)
if data_length * 8 > max_bits:
raise ValueError(
f"Message too long for cover. {data_length*8:,} > {max_bits:,} bits."
)
if data_length > (2 ** header_size):
raise ValueError(
f"Message too long for header. {data_length:,} >= {2**header_size:,} bytes."
)
bit_stream = bytes2bits(data)
bits = list(bit_stream)
logging.debug(f"{len(bits)} data bits: {bits[:100]}...")
length_header = [int(b) for b in format(data_length, f"0{header_size}b")]
logging.debug(
f"Add {header_size}-bit header to specify length of data. {length_header}"
)
bits = length_header + bits
cover = set_lowest_bits(cover, bits, filler)
logging.info("Data hidden.")
return cover
def reveal(cover):
max_bits = cover.size[0] * cover.size[1] * len(cover.mode)
header_size = math.ceil(math.log2(max_bits // 8))
logging.info("Recovering bits.")
bits = list(get_lowest_bits(cover))
logging.debug(f"{len(bits):,} recovered bits: {bits[:32]}...{bits[-32:]}")
data_length_bits = bits[:header_size]
data_length_string = "".join(str(b) for b in data_length_bits)
logging.debug(
f"{header_size}-bit header: {data_length_string} ({int(data_length_string, 2):,})"
)
data_length = int(data_length_string, 2)
logging.info(f"Data length: {data_length:,}")
data = list(bits2bytes(iter(bits[header_size : header_size + data_length * 8])))
logging.debug(
f"{len(data):,} recovered bytes: {data[:32]}... {bytes(data[:32])}..."
)
return bytes(data)
if __name__ == "__main__":
import io, os, sys
args = sys.argv[1:]
try:
if "--debug" in args or "-d" in args:
logging.basicConfig(level=logging.DEBUG)
elif "--verbose" in args or "-v" in args:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
if "--help" in args or "-h" in args:
raise IndexError
data_file = None if not "-i" in args else args[args.index("-i") + 1]
filler = (
zeroes
if "--filler=zeroes" in args
else random_bits
if "--filler=random" in args
else None
)
cover_file = None if not "-c" in args else args[args.index("-c") + 1]
output_file = None if not "-o" in args else args[args.index("-o") + 1]
except IndexError:
print(
"Usage: [-i input] [--filler=zeroes|random] [-c cover] [-o output] [--reveal -r] [--verbose | -v] [--debug | -d] [--help | -h]",
file=sys.stderr,
)
sys.exit(1)
### undocumented test parameters
if "--test" in args:
logging.basicConfig(level=logging.INFO)
if 0:
from PIL import ImageDraw, ImageFont
base = Image.new("RGBA", (320, 240), color="black")
overlay = Image.new("RGBA", base.size, (255, 255, 255, 0))
text = "Red pill"
font = ImageFont.truetype("arial.ttf", 50)
draw = ImageDraw.Draw(overlay)
text_w, text_h = draw.textsize(text, font=font)
draw.text(
(base.size[0] / 2 - text_w / 2, base.size[1] / 2 - text_h / 2),
"Red pill",
font=font,
fill=(255, 0, 0, 200),
)
out = Image.alpha_composite(base, overlay)
out.save("test/redpill.webp", lossless=True)
sys.exit(0)
cover = hide(
b"The Matrix has you.",
Image.new("RGBA", (40, 20), color="white"),
)
cover.save(
"temp.webp", lossless=True
) # FIXME: Pillow WebP RGBA bug. Works fine with RGB or PNG.
cover = Image.open("temp.webp")
print(reveal(cover))
sys.exit(0)
if "--pytest" in args:
import pytest
sys.exit(pytest.main(sys.argv[2:]))
###
if "--reveal" in args or "-r" in args:
if data_file:
secret = reveal(Image.open(data_file))
else:
fp = io.BytesIO()
fp.write(sys.stdin.buffer.read())
secret = reveal(Image.open(fp))
if output_file:
open(output_file, "wb").write(secret)
else:
os.write(1, secret)
else:
try:
if data_file:
secret = open(data_file, "rb").read()
else:
secret = sys.stdin.buffer.read()
image = hide(
secret, None if not cover_file else Image.open(cover_file), filler
)
except ValueError as e:
print(
e,
file=sys.stderr,
)
sys.exit(2)
if output_file:
image.save(output_file, lossless=True)
else:
# Write data to stdout for redirection.
out = io.BytesIO()
image.save(out, format="WEBP", lossless=True)
out.seek(0)
os.write(1, out.read())
|
import logging
import pytest
from requests import Request
from starlette.applications import Starlette
from starlette.endpoints import HTTPEndpoint
from starlette.middleware import Middleware
from starlette.responses import PlainTextResponse
from starlette.testclient import TestClient
import layab.starlette
@pytest.fixture
def client():
app = Starlette(
middleware=[
Middleware(layab.starlette.LoggingMiddleware, skip_paths=["/skipped"])
]
)
@app.route("/logging")
class Logging(HTTPEndpoint):
def get(self, request: Request):
return PlainTextResponse("")
def post(self, request: Request):
return PlainTextResponse("")
def put(self, request: Request):
return PlainTextResponse("")
def delete(self, request: Request):
return PlainTextResponse("")
@app.route("/logging_failure")
class LoggingFailure(HTTPEndpoint):
def get(self, request: Request):
raise Exception("Error message")
def post(self, request: Request):
raise Exception("Error message")
def put(self, request: Request):
raise Exception("Error message")
def delete(self, request: Request):
raise Exception("Error message")
@app.route("/skipped")
class Skipped(HTTPEndpoint):
def get(self, request: Request):
return PlainTextResponse("")
def post(self, request: Request):
return PlainTextResponse("")
def put(self, request: Request):
return PlainTextResponse("")
def delete(self, request: Request):
return PlainTextResponse("")
return TestClient(app, raise_server_exceptions=False)
@pytest.fixture
def mock_uuid(monkeypatch):
class UUIDMock:
@staticmethod
def uuid4():
return "1-2-3-4-5"
monkeypatch.setattr(layab.starlette, "uuid", UUIDMock)
def test_log_get_request_details(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.get("/logging")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "GET",
"request_status": "start",
"request_url.path": "/logging",
}
end_message = eval(caplog.messages[1])
end_message.pop("request_processing_time")
assert end_message == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "GET",
"request_status": "success",
"request_status_code": 200,
"request_url.path": "/logging",
}
def test_log_delete_request_details(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.delete("/logging")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "DELETE",
"request_status": "start",
"request_url.path": "/logging",
}
end_message = eval(caplog.messages[1])
end_message.pop("request_processing_time")
assert end_message == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "DELETE",
"request_status": "success",
"request_status_code": 200,
"request_url.path": "/logging",
}
def test_log_post_request_details(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.post("/logging")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "POST",
"request_status": "start",
"request_url.path": "/logging",
}
end_message = eval(caplog.messages[1])
end_message.pop("request_processing_time")
assert end_message == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "POST",
"request_status": "success",
"request_status_code": 200,
"request_url.path": "/logging",
}
def test_log_put_request_details(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.put("/logging")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "PUT",
"request_status": "start",
"request_url.path": "/logging",
}
end_message = eval(caplog.messages[1])
end_message.pop("request_processing_time")
assert end_message == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "PUT",
"request_status": "success",
"request_status_code": 200,
"request_url.path": "/logging",
}
def test_log_get_request_details_on_failure(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.get("/logging_failure")
assert response.status_code == 500
assert response.text == "Internal Server Error"
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "GET",
"request_status": "start",
"request_url.path": "/logging_failure",
}
end_message = eval(caplog.messages[1])
end_message.pop("error.traceback")
assert end_message == {
"error.class": "Exception",
"error.msg": "Error message",
"request.data": b"",
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "GET",
"request_status": "error",
"request_url.path": "/logging_failure",
}
def test_log_delete_request_details_on_failure(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.delete("/logging_failure")
assert response.status_code == 500
assert response.text == "Internal Server Error"
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "DELETE",
"request_status": "start",
"request_url.path": "/logging_failure",
}
end_message = eval(caplog.messages[1])
end_message.pop("error.traceback")
assert end_message == {
"error.class": "Exception",
"error.msg": "Error message",
"request.data": b"",
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "DELETE",
"request_status": "error",
"request_url.path": "/logging_failure",
}
def test_log_post_request_details_on_failure(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.post("/logging_failure")
assert response.status_code == 500
assert response.text == "Internal Server Error"
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "POST",
"request_status": "start",
"request_url.path": "/logging_failure",
}
end_message = eval(caplog.messages[1])
end_message.pop("error.traceback")
assert end_message == {
"error.class": "Exception",
"error.msg": "Error message",
"request.data": b"",
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "POST",
"request_status": "error",
"request_url.path": "/logging_failure",
}
def test_log_put_request_details_on_failure(client, caplog, mock_uuid):
caplog.set_level(logging.INFO)
response = client.put("/logging_failure")
assert response.status_code == 500
assert response.text == "Internal Server Error"
assert len(caplog.messages) == 2
assert eval(caplog.messages[0]) == {
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "PUT",
"request_status": "start",
"request_url.path": "/logging_failure",
}
end_message = eval(caplog.messages[1])
end_message.pop("error.traceback")
assert end_message == {
"error.class": "Exception",
"error.msg": "Error message",
"request.data": b"",
"request_headers.accept": "*/*",
"request_headers.accept-encoding": "gzip, deflate",
"request_headers.connection": "keep-alive",
"request_headers.content-length": "0",
"request_headers.host": "testserver",
"request_headers.user-agent": "testclient",
"request_id": "1-2-3-4-5",
"request_method": "PUT",
"request_status": "error",
"request_url.path": "/logging_failure",
}
def test_skip_log_get_request(client, caplog):
caplog.set_level(logging.INFO)
response = client.get("/skipped")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 0
def test_skip_log_delete_request(client, caplog):
caplog.set_level(logging.INFO)
response = client.delete("/skipped")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 0
def test_skip_log_post_request(client, caplog):
caplog.set_level(logging.INFO)
response = client.post("/skipped")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 0
def test_skip_log_put_request(client, caplog):
caplog.set_level(logging.INFO)
response = client.put("/skipped")
assert response.status_code == 200
assert response.text == ""
assert len(caplog.messages) == 0
|
#!/usr/bin/env python
"""Writes records to a configurable number of WARC files."""
import os
import re
import uuid
import Queue
import shutil
import socket
import logging
from datetime import datetime
from hanzo.warctools import WarcRecord
LOGGING_FORMAT="[%(asctime)s] %(levelname)s: %(message)s"
logging.basicConfig(format=LOGGING_FORMAT, level=logging.DEBUG)
logger = logging.getLogger("warcwriterpool")
__version__ = "0.1.2"
def warc_datetime_str(date):
"""Amendedment to warctools' function to fix non-ISO8601 output."""
iso = date.isoformat()
if "." in iso:
iso = iso[:iso.find(".")]
if "+" in iso:
iso = iso[:iso.find("+")]
return iso + "Z"
class WarcWriterPool:
def __init__(self, pool_size=1, gzip=True, prefix="BL", output_dir=".", max_size=1073741824, description=None, write_warcinfo=True):
self.gzip = gzip
self.prefix = prefix
self.output_dir = output_dir
self.max_size = max_size
self.pool = Queue.Queue()
self.warcs = {}
self.total = 0
self.hostname = socket.gethostname()
self.ip = socket.gethostbyname(socket.gethostname())
self.description = description
self.write_warcinfo = write_warcinfo
self.software = "%s/%s" % (__name__, __version__)
if gzip:
self.suffix = ".gz"
else:
self.suffix = ""
logger.debug("Pooling %i WARCs." % pool_size)
self.add_warcs(pool_size)
def __enter__(self):
logger.debug("Entering context.")
return self
def write_warcinfo_record(self, warc):
"""Writes the initial warcinfo record."""
headers = [
(WarcRecord.TYPE, WarcRecord.WARCINFO),
(WarcRecord.DATE, warc_datetime_str(datetime.now())),
(WarcRecord.ID, "<urn:uuid:%s>" % uuid.uuid1()),
]
data = "software=%s\nhostname=%s\nip=%s" % (self.software, self.hostname, self.ip)
if self.description is not None:
data += "\ndescription=%s" % self.description
record = WarcRecord(headers=headers, content=("application/warc-fields", data))
record.write_to(warc, gzip=self.gzip)
warc.flush()
def add_warcs(self, number):
"""Adds a new WARC and rebuilds the Queue."""
for n in range(number):
name = "%s/%s-%s-%s.warc%s.open" % (self.output_dir, self.prefix, datetime.now().strftime("%Y%m%d%H%M%S%f"), self.total, self.suffix)
self.total += 1
fh = open(name, "wb")
if self.write_warcinfo:
self.write_warcinfo_record(fh)
self.warcs[name] = fh
logger.debug("Added %s" % name)
with self.pool.mutex:
self.pool.queue.clear()
x = [self.pool.put(warc) for warc in self.warcs.keys()]
def warc_reached_max_size(self, path):
"""Checks whether a given WARC has reached the maximum filesize."""
stat = os.stat(path)
if stat.st_size >= self.max_size:
logger.info("Size limit exceeded for %s" % path)
self.warcs[path].close()
shutil.move(path, re.sub("\.open$", "", path))
del self.warcs[path]
self.add_warcs(1)
logger.debug("Checked size: %s" % str(self.warcs.keys()))
return True
logger.debug("Checked size: %s" % str(self.warcs.keys()))
return False
def write_record(self, headers, mime, data):
"""Writes a WARC record.
Arguments:
headers -- Array of WARC headers.
mime -- MIME type of the data.
data -- the data block.
"""
record = WarcRecord(headers=headers, content=(mime, data))
logger.debug("Getting WARC: %s" % str(self.warcs.keys()))
name = self.pool.get()
logger.debug("Writing to: %s" % name)
fh = self.warcs[name]
record.write_to(fh, gzip=self.gzip)
fh.flush()
if not self.warc_reached_max_size(name):
logger.debug("%s undersized; adding back to the pool." % name)
self.pool.put(name)
def cleanup(self):
"""Closes any open file handles."""
for name, fh in self.warcs.iteritems():
if not fh.closed:
fh.close()
if name.endswith(".open"):
shutil.move(name, re.sub("\.open$", "", name))
def __exit__(self, exc_type, exc_value, traceback):
logger.debug("Exiting context.")
self.cleanup()
|
<gh_stars>0
import re
class Token:
def __init__(self, type, match):
global code
global col
if isinstance(match, str):
end_pos = len(match)
else:
end_pos = match.span()[1]
self.raw_data = code[:end_pos]
if self.raw_data in keywords:
type = 'keyword'
self.type = type
self.line = line
col += end_pos
code = code[end_pos:]
def __str__(self):
return 'token : {} : {}'.format(self.type, self.raw_data)
__repr__ = __str__
def view_tokens(tokens):
for i in tokens:
print(i)
def macro(code):
global defined
pl = 0
while pl < len(code):
if code[pl] == '`':
original_pl = pl
pl += 1
mat = ''
while pl < len(code) and code[pl] != '`':
code = code
mat += code[pl]
pl += 1
if mat.startswith('include'):
fopen = open(mat[len('include')+1:])
new_code = fopen.read()
fopen.close()
new_code = macro(new_code)
code = code[:original_pl]+new_code+code[pl+1:]
elif mat.startswith('define'):
to_define = mat.split()[1]
set_to = mat[8+len(to_define):]
if set_to != '':
defined[to_define] = set_to
else:
defined[to_define] = 'Empty Definition'
code = code[:original_pl]+code[pl+1:]
elif mat.startswith('undefine'):
to_define = mat.split()[1]
set_to = mat[7+len(to_define):]
if to_define in defined:
del defined[to_define]
code = code[:original_pl]+code[pl+1:]
elif mat.startswith('print'):
print_out = mat.split()[1]
if print_out == '*':
for i in defined:
print(i, ':', defined[i])
elif print_out in defined:
print(print_out, ':', defined[print_out])
else:
print(print_out, ': Not defined')
code = code[:original_pl]+code[pl+1:]
elif mat.startswith('putstr'):
print(mat[7:])
code = code[:original_pl]+code[pl+1:]
elif mat.startswith('if'):
perams = mat.split()[1:]
invert = perams[0] == 'not'
if invert:
perams = perams[1:]
if perams[0] == 'defined':
cond = perams[1] in defined
elif perams[0] == 'equal':
if perams[2] != 'expr':
if perams[1] in defined:
var_pre = defined[perams[1]]
else:
var_pre = None
if perams[2] in defined:
var_post = defined[perams[2]]
else:
var_post = None
cond = var_pre == var_post
else:
exit()
if invert:
cond = not cond
code = code[:original_pl]+code[pl+1:]
depth = 1
interm = ''
while depth > 0:
if code[original_pl:].startswith('`if'):
depth += 1
elif code[original_pl:].startswith('`end if'):
depth -= 1
interm += code[original_pl+1]
code = code[:original_pl]+code[original_pl+1:]
interm = interm[:-2]
if cond:
interm = macro(interm)
else:
interm = ''
pre = code[:original_pl]
post = code[original_pl:]
code = pre+interm+post[7:]
pl = original_pl
elif code[pl] in '\"\'':
cpl = code[pl]
pl += 1
while pl < len(code) and code[pl] != cpl:
pl += 1
pl += 1
return code
def tokenize(icode):
global code
global line
global col
global keywords
global defined
defined = {}
keywords = [
'int',
'char',
'bool',
'if',
'while',
'for',
'loop',
'else',
'elif'
]
code = macro(icode)
match_regexes = {
'name': r'[a-zA-Z_]+[a-zA-Z]*',
'int': r'[0-9]+',
'float': r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)',
'semicolon': r';',
}
compiled_regexes = {}
for i in match_regexes:
compiled_regexes[i] = re.compile(match_regexes[i])
operators = ['+', '-', '*', '/', '^', '%', '&', '&&', '||', '<', '>',
'<=', '>=', '+=', '-=', '*=', '/=', '=', '==', '!=', '**']
operators.sort(key=lambda op: 100-len(op))
brace_types = '{}()[],'
brace_names = {
'(': 'l paren',
')': 'r paren',
'{': 'l curly',
'}': 'r curly',
'[': 'l list',
']': 'r list',
',': 'comma'
}
return_tokens = []
line = 1
col = 1
while 1:
end_flag = True
while len(code) > 0 and code[0] in '\n\t ':
if code[0] == '\n':
line += 1
col = 0
code = code[1:]
col += 1
matches = {}
if len(code) == 0:
return return_tokens
if code[0] in '\"\'':
pl = 1
while len(code) > pl and code[pl] not in '\"\'':
pl += 1
new_token = Token('str', code[:pl+1])
return_tokens.append(new_token)
end_flag = True
continue
if end_flag and code[0] in brace_types:
return_tokens.append(Token(brace_names[code[0]], code[0]))
end_flag = True
continue
if end_flag:
for i in operators:
if code[:len(i)] == i:
return_tokens.append(Token('operator', i))
end_flag = False
continue
if end_flag:
for i in compiled_regexes:
matches[i] = compiled_regexes[i].match(code)
if end_flag:
for type in matches:
if matches[type] is not None:
if type != 'name':
return_tokens.append(Token(type, matches[type]))
else:
span = matches['name'].span()[1]
data = code[:span]
if data in defined:
code = defined[data]+code[span:]
else:
return_tokens.append(Token(type, matches[type]))
end_flag = False
continue
if end_flag:
break
print("lexer error")
exit()
|
<gh_stars>0
# coding: utf-8
"""
:mod:`boardgamegeek.guild` - Guild information
==============================================
.. module:: boardgamegeek.guild
:platform: Unix, Windows
:synopsis: classes for storing guild information
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from copy import copy
from .things import Thing
class Guild(Thing):
"""
Class containing guild information
"""
def _format(self, log):
log.info("id : {}".format(self.id))
log.info("name : {}".format(self.name))
log.info("category : {}".format(self.category))
log.info("manager : {}".format(self.manager))
log.info("website : {}".format(self.website))
log.info("description: {}".format(self.description))
log.info("country : {}".format(self.country))
log.info("state : {}".format(self.state))
log.info("city : {}".format(self.city))
log.info("address : {}".format(self.address))
log.info("postal code: {}".format(self.postalcode))
if self.members:
log.info("{} members".format(len(self.members)))
for i in self.members:
log.info(" - {}".format(i))
def __init__(self, data):
kw = copy(data)
if "members" in kw:
self._members = set(kw.pop("members"))
else:
self._members = set()
super(Guild, self).__init__(kw)
@property
def country(self):
"""
:return: country
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("country")
@property
def city(self):
"""
:return: city
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("city")
@property
def address(self):
"""
:return: address (both fields concatenated)
:rtype: str
:return: ``None`` if n/a
"""
address = ""
if self._data.get("addr1"):
address += self._data.get("addr1")
if self._data.get("addr2"):
if len(address):
address += " " # delimit the two address fields by a space
address += self._data.get("addr2")
return address if len(address) else None
@property
def addr1(self):
"""
:return: first field of the address
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("addr1")
@property
def addr2(self):
"""
:return: second field of the address
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("addr2")
@property
def postalcode(self):
"""
:return: postal code
:rtype: integer
:return: ``None`` if n/a
"""
return self._data.get("postalcode")
@property
def state(self):
"""
:return: state or provine
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("stateorprovince")
@property
def category(self):
"""
:return: category
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("category")
@property
def members(self):
"""
:return: members of the guild
:rtype: set of str
"""
return self._members
@property
def members_count(self):
"""
:return: number of members, as reported by the server
:rtype: int
"""
return self._data.get("member_count", 0)
@property
def description(self):
"""
:return: description
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("description")
@property
def manager(self):
"""
:return: manager
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("manager")
@property
def website(self):
"""
:return: website address
:rtype: str
:return: ``None`` if n/a
"""
return self._data.get("website")
def add_member(self, member):
self._members.add(member)
def __len__(self):
return len(self._members)
def __repr__(self):
return "Guild (id: {})".format(self.id)
def __iter__(self):
for member in self._members:
yield member
|
<reponame>vhn0912/python-snippets
import util_make_files
util_make_files.pathlib_basic()
import pathlib
p_file = pathlib.Path('temp/file.txt')
print(p_file)
# temp/file.txt
print(type(p_file))
# <class 'pathlib.PosixPath'>
print(str(p_file))
# temp/file.txt
print(type(str(p_file)))
# <class 'str'>
print(p_file.name)
# file.txt
print(type(p_file.name))
# <class 'str'>
print(p_file.stem)
# file
print(type(p_file.stem))
# <class 'str'>
p_dir = pathlib.Path('temp/dir/')
print(p_dir)
# temp/dir
print(type(p_dir))
# <class 'pathlib.PosixPath'>
print(p_dir.name)
# dir
print(p_dir.stem)
# dir
print(p_file.suffix)
# .txt
print(type(p_file.suffix))
# <class 'str'>
print(p_dir.suffix)
#
print(p_file.suffix.lstrip('.'))
# txt
print(p_file.suffix[1:])
# txt
print(p_dir.suffix.lstrip('.'))
#
print(p_dir.suffix[1:])
#
p_sub = pathlib.Path('temp/dir/sub_dir/file2.txt')
print(p_sub)
# temp/dir/sub_dir/file2.txt
print(p_sub.parent)
# temp/dir/sub_dir
print(type(p_sub.parent))
# <class 'pathlib.PosixPath'>
print(p_sub.parents[0])
# temp/dir/sub_dir
print(p_sub.parents[1])
# temp/dir
print(p_sub.parents[2])
# temp
print(p_sub.parents[3])
# .
# print(p_sub.parents[4])
# IndexError: 4
p_abs = p_sub.resolve()
print(p_abs)
# /Users/mbp/Documents/my-project/python-snippets/notebook/temp/dir/sub_dir/file2.txt
print(p_abs.parents[4])
# /Users/mbp/Documents/my-project/python-snippets
# print(p_abs.parents[10])
# IndexError: 10
p_file = pathlib.Path('temp/file.txt')
print(p_file)
# temp/file.txt
p_file_rel = pathlib.Path('temp/dir/sub_dir/../../file.txt')
print(p_file_rel)
# temp/dir/sub_dir/../../file.txt
print(p_file.samefile(p_file_rel))
# True
print(p_file.parents[0])
# temp
print(p_file.parents[1])
# .
print(p_file_rel.parents[0])
# temp/dir/sub_dir/../..
print(p_file_rel.parents[1])
# temp/dir/sub_dir/..
print(p_file_rel.parents[2])
# temp/dir/sub_dir
print(p_file_rel.parents[3])
# temp/dir
print(p_file_rel.resolve())
# /Users/mbp/Documents/my-project/python-snippets/notebook/temp/file.txt
print(p_file_rel.resolve().relative_to(p_file_rel.cwd()))
# temp/file.txt
print(p_file.with_name('file_new.txt'))
# temp/file_new.txt
print(type(p_file.with_name('file_new.txt')))
# <class 'pathlib.PosixPath'>
print(p_dir.with_name('dir_new'))
# temp/dir_new
print(p_dir.with_name('file_new.txt'))
# temp/file_new.txt
p_file.with_name('file_new.txt').touch()
print(p_file.with_name('file_new.txt').exists())
# True
print(p_file.with_suffix('.text'))
# temp/file.text
print(type(p_file.with_suffix('.text')))
# <class 'pathlib.PosixPath'>
# print(p_file.with_suffix('text'))
# ValueError: Invalid suffix 'text'
import shutil
shutil.rmtree('temp')
|
""" Lexemes base definitions for the lexemes module. """
from enum import Enum
# Definitions
# ============================================================================
# Characters
# ----------------------------------------------------------------------------
def char_range(first, last):
""" Set of characters first..last. """
return set(chr(c) for c in range(ord(first), ord(last) + 1))
EOF = chr(26)
EOL = "\n"
SPACE = set("\n\r\v\f\t ")
ESCAPED1 = set("ntvbrfa")
ESCAPED2 = set("\\?'\"()[]{}")
ESCAPED = ESCAPED1 | ESCAPED2
OCTAL = char_range("0", "7")
DIGIT = OCTAL | {"8", "9"}
XDIGIT2 = char_range("A", "F")
XDIGIT3 = char_range("a", "f")
XDIGIT = DIGIT | XDIGIT2 | XDIGIT3
IDENTFST1 = char_range("a", "z")
IDENTFST2 = char_range("A", "Z")
IDENTFST3 = {"_"}
IDENTFST = IDENTFST1 | IDENTFST2 | IDENTFST3
IDENTRST3 = {"'", "$"}
IDENTRST = IDENTFST | DIGIT | IDENTRST3
SYMBOLIC = set("%&+-./:=@~`^|*!?<>#")
X = set("xX")
P = set("pP")
E = set("eE")
SIGN = set("-+")
FL = set("fFlL")
LU = set("LlUu")
EXTCODE_TAG = set("#$^")
OTHERS = set("()[]{},;")
# Non‑finals
# ----------------------------------------------------------------------------
class NonFin(Enum):
""" Non‑final lexical products. """
ABSPROP = "ABSPROP"
ABST0YPE = "ABST0YPE"
ABSTYPE = "ABSTYPE"
ABSVIEW = "ABSVIEW"
ABSVIEWT0YPE = "ABSVIEWT0YPE"
ABSVIEWTYPE = "ABSVIEWTYPE"
CASE = "CASE"
CASE_neg = "CASE_neg"
CASE_pos = "CASE_pos"
CASTFN = "CASTFN"
COMMENT_block_c = "COMMENT_block_c"
COMMENT_block_ml = "COMMENT_block_ml"
DATAPROP = "DATAPROP"
DATATYPE = "DATATYPE"
DATAVIEW = "DATAVIEW"
DATAVTYPE = "DATAVTYPE"
DLRDELAY = "DLRDELAY"
DLREFFMASK_ALL = "DLREFFMASK_ALL"
DLREFFMASK_EXN = "DLREFFMASK_EXN"
DLREFFMASK_NTM = "DLREFFMASK_NTM"
DLREFFMASK_REF = "DLREFFMASK_REF"
DLREFFMASK_WRT = "DLREFFMASK_WRT"
DLRLDELAY = "DLRLDELAY"
DLRLST = "DLRLST"
DLRLST_T = "DLRLST_T"
DLRLST_VT = "DLRLST_VT"
DLRREC = "DLRREC"
DLRREC_T = "DLRREC_T"
DLRREC_VT = "DLRREC_VT"
DLRTUP = "DLRTUP"
DLRTUP_T = "DLRTUP_T"
DLRTUP_VT = "DLRTUP_VT"
DLRVCOPYENV_V = "DLRVCOPYENV_V"
DLRVCOPYENV_VT = "DLRVCOPYENV_VT"
FIX = "FIX"
FIXAT = "FIXAT"
FN = "FN"
FNX = "FNX"
FUN = "FUN"
IMPLEMENT = "IMPLEMENT"
IMPLMNT = "IMPLMNT"
INFIX = "INFIX"
INFIXL = "INFIXL"
INFIXR = "INFIXR"
LAM = "LAM"
LAMAT = "LAMAT"
LLAM = "LLAM"
LLAMAT = "LLAMAT"
MACDEF = "MACDEF"
MACRODEF = "MACRODEF"
POSTFIX = "POSTFIX"
PRAXI = "PRAXI"
PREFIX = "PREFIX"
PRFN = "PRFN"
PRFUN = "PRFUN"
PRIMPLMNT = "PRIMPLMNT"
PROP = "PROP"
PROPDEF = "PROPDEF"
PROP_neg = "PROP_neg"
PROP_pos = "PROP_pos"
PRVAL = "PRVAL"
PRVAR = "PRVAR"
T0YPE = "T0YPE"
T0YPE_neg = "T0YPE_neg"
T0YPE_pos = "T0YPE_pos"
TYPE = "TYPE"
TYPEDEF = "TYPEDEF"
TYPE_neg = "TYPE_neg"
TYPE_pos = "TYPE_pos"
VAL = "VAL"
VAL_neg = "VAL_neg"
VAL_pos = "VAL_pos"
VAR = "VAR"
VIEW = "VIEW"
VIEWDEF = "VIEWDEF"
VIEW_neg = "VIEW_neg"
VIEW_pos = "VIEW_pos"
VIEWT0YPE = "VIEWT0YPE"
VIEWT0YPE_neg = "VIEWT0YPE_neg"
VIEWT0YPE_pos = "VIEWT0YPE_pos"
VIEWTYPE = "VIEWTYPE"
VIEWTYPEDEF = "VIEWTYPEDEF"
VIEWTYPE_neg = "VIEWTYPE_neg"
VIEWTYPE_pos = "VIEWTYPE_pos"
WITHPROP = "WITHPROP"
WITHTYPE = "WITHTYPE"
WITHVIEW = "WITHVIEW"
WITHVIEWTYPE = "WITHVIEWTYPE"
# Finals
# ----------------------------------------------------------------------------
class Fin(Enum):
""" Final lexical products. """
T_ABSTYPE = "T_ABSTYPE"
T_ADDRAT = "T_ADDRAT"
T_ADDR_OR_IDENT = "T_ADDR_OR_IDENT" # Renamed
T_AND = "T_AND"
T_ASSUME = "T_ASSUME"
T_AS = "T_AS"
T_ATLBRACE = "T_ATLBRACE"
T_ATLBRACKET = "T_ATLBRACKET"
T_ATLPAREN = "T_ATLPAREN"
T_AT_OR_SIDENT = "T_AT_OR_SIDENT" # Renamed
T_BACKSLASH_OR_IDENT = "T_BACKSLASH_OR_IDENT" # Renamed
T_BANG_OR_IDENT = "T_BANG_OR_IDENT" # Renamed
T_BAR = "T_BAR"
T_BEGIN = "T_BEGIN"
T_BQUOTELPAREN = "T_BQUOTELPAREN"
T_BQUOTE = "T_BQUOTE"
T_CASE = "T_CASE"
T_CHAR = "T_CHAR"
T_CLASSDEC = "T_CLASSDEC"
T_COLONLT = "T_COLONLT"
T_COLON = "T_COLON"
T_COMMALPAREN = "T_COMMALPAREN"
T_COMMA = "T_COMMA"
T_COMMENT_block = "T_COMMENT_block"
T_COMMENT_line = "T_COMMENT_line"
T_COMMENT_rest = "T_COMMENT_rest"
T_DATASORT = "T_DATASORT"
T_DATATYPE = "T_DATATYPE"
T_DLRARRPSZ = "T_DLRARRPSZ"
T_DLRBREAK = "T_DLRBREAK"
T_DLRCONTINUE = "T_DLRCONTINUE"
T_DLRD2CTYPE = "T_DLRD2CTYPE"
T_DLRDELAY = "T_DLRDELAY"
T_DLREFFMASK_ARG = "T_DLREFFMASK_ARG"
T_DLREFFMASK = "T_DLREFFMASK"
T_DLREXTERN = "T_DLREXTERN"
T_DLREXTFCALL = "T_DLREXTFCALL"
T_DLREXTKIND = "T_DLREXTKIND"
T_DLREXTMCALL = "T_DLREXTMCALL"
T_DLREXTVAL = "T_DLREXTVAL"
T_DLREXTYPE_STRUCT = "T_DLREXTYPE_STRUCT"
T_DLREXTYPE = "T_DLREXTYPE"
T_DLRLITERAL = "T_DLRLITERAL"
T_DLRLST = "T_DLRLST"
T_DLRMYFILENAME = "T_DLRMYFILENAME"
T_DLRMYFUNCTION = "T_DLRMYFUNCTION"
T_DLRMYLOCATION = "T_DLRMYLOCATION"
T_DLRRAISE = "T_DLRRAISE"
T_DLRREC = "T_DLRREC"
T_DLRSHOWTYPE = "T_DLRSHOWTYPE"
T_DLRSOLASSERT = "T_DLRSOLASSERT"
T_DLRSOLVERIFY = "T_DLRSOLVERIFY"
T_DLRTEMPENVER = "T_DLRTEMPENVER"
T_DLRTUP = "T_DLRTUP"
T_DLRTYREP = "T_DLRTYREP"
T_DLRVARARG = "T_DLRVARARG"
T_DLRVCOPYENV = "T_DLRVCOPYENV"
T_DOLLAR = "T_DOLLAR"
T_DO = "T_DO"
T_DOTDOTDOT = "T_DOTDOTDOT"
T_DOTDOT = "T_DOTDOT"
T_DOTINT = "T_DOTINT"
T_DOTLTGTDOT = "T_DOTLTGTDOT"
T_DOTLT = "T_DOTLT"
T_DOT = "T_DOT"
T_ELSE = "T_ELSE"
T_END = "T_END"
T_EOF = "T_EOF"
T_EQGTGT = "T_EQGTGT"
T_EQGT = "T_EQGT"
T_EQLTGT = "T_EQLTGT"
T_EQLT = "T_EQLT"
T_EQSLASHEQGTGT = "T_EQSLASHEQGTGT"
T_EQSLASHEQGT = "T_EQSLASHEQGT"
T_EQ_OR_DIDENT = "T_EQ_OR_DIDENT" # Renamed
T_ERR = "T_ERR"
T_EXCEPTION = "T_EXCEPTION"
T_EXTCODE = "T_EXTCODE"
T_EXTERN = "T_EXTERN"
T_EXTVAR = "T_EXTVAR"
T_EXTYPE = "T_EXTYPE"
T_FIXITY = "T_FIXITY"
T_FIX = "T_FIX"
T_FLOAT = "T_FLOAT"
T_FOLDAT = "T_FOLDAT"
T_FOLD_OR_IDENT = "T_FOLD_OR_IDENT" # Renamed
T_FORSTAR = "T_FORSTAR"
T_FOR = "T_FOR"
T_FREEAT = "T_FREEAT"
T_FREE_OR_IDENT = "T_FREE_OR_IDENT" # Renamed
T_FUN = "T_FUN"
T_GTDOT = "T_GTDOT"
T_GTLT_OR_DIDENT = "T_GTLT_OR_DIDENT" # Renamed
T_GT_OR_IDENT = "T_GT_OR_IDENT" # Renamed
T_HASHLBRACKET = "T_HASHLBRACKET"
T_HASH = "T_HASH"
T_IDENT_alp = "T_IDENT_alp"
T_IDENT_arr = "T_IDENT_arr"
T_IDENT_dlr = "T_IDENT_dlr"
T_IDENT_ext = "T_IDENT_ext"
T_IDENT_srp = "T_IDENT_srp"
T_IDENT_sym = "T_IDENT_sym"
T_IDENT_tmp = "T_IDENT_tmp"
T_IFCASE = "T_IFCASE"
T_IF = "T_IF"
T_IMPLEMENT = "T_IMPLEMENT"
T_IMPORT = "T_IMPORT"
T_INT = "T_INT"
T_IN = "T_IN"
T_INTZERO = "T_INTZERO"
T_LAM = "T_LAM"
T_LBRACE = "T_LBRACE"
T_LBRACKET = "T_LBRACKET"
T_LET = "T_LET"
T_LOCAL = "T_LOCAL"
T_LPAREN = "T_LPAREN"
T_LT_OR_IDENT = "T_LT_OR_IDENT" # Renamed
T_MACDEF = "T_MACDEF"
T_MINUSGT_OR_SIDENT = "T_MINUSGT_OR_SIDENT" # Renamed
T_MINUSLTGT = "T_MINUSLTGT"
T_MINUSLT = "T_MINUSLT"
T_NONFIX = "T_NONFIX"
T_OF = "T_OF"
T_OP = "T_OP"
T_OVERLOAD = "T_OVERLOAD"
T_PERCENTLPAREN = "T_PERCENTLPAREN"
T_PERCENT_OR_IDENT = "T_PERCENT_OR_IDENT" # Renamed
T_QMARK_OR_IDENT = "T_QMARK_OR_IDENT" # Renamed
T_QUOTELBRACE = "T_QUOTELBRACE"
T_QUOTELBRACKET = "T_QUOTELBRACKET"
T_QUOTELPAREN = "T_QUOTELPAREN"
T_RBRACE = "T_RBRACE"
T_RBRACKET = "T_RBRACKET"
T_REASSUME = "T_REASSUME"
T_REC = "T_REC"
T_RPAREN = "T_RPAREN"
T_SCASE = "T_SCASE"
T_SEMICOLON = "T_SEMICOLON"
T_SIF = "T_SIF"
T_SORTDEF = "T_SORTDEF"
T_SPACE = "T_SPACE"
T_SRPASSERT = "T_SRPASSERT"
T_SRPCODEGEN2 = "T_SRPCODEGEN2"
T_SRPDEFINE = "T_SRPDEFINE"
T_SRPDYNLOAD = "T_SRPDYNLOAD"
T_SRPELIFDEF = "T_SRPELIFDEF"
T_SRPELIFNDEF = "T_SRPELIFNDEF"
T_SRPELIF = "T_SRPELIF"
T_SRPELSE = "T_SRPELSE"
T_SRPENDIF = "T_SRPENDIF"
T_SRPERROR = "T_SRPERROR"
T_SRPIFDEF = "T_SRPIFDEF"
T_SRPIFNDEF = "T_SRPIFNDEF"
T_SRPIF = "T_SRPIF"
T_SRPINCLUDE = "T_SRPINCLUDE"
T_SRPPRAGMA = "T_SRPPRAGMA"
T_SRPPRERR = "T_SRPPRERR"
T_SRPPRINT = "T_SRPPRINT"
T_SRPREQUIRE = "T_SRPREQUIRE"
T_SRPSTALOAD = "T_SRPSTALOAD"
T_SRPTHEN = "T_SRPTHEN"
T_SRPUNDEF = "T_SRPUNDEF"
T_STACST = "T_STACST"
T_STADEF = "T_STADEF"
T_STATIC = "T_STATIC"
T_STRING = "T_STRING"
T_SYMELIM = "T_SYMELIM"
T_SYMINTR = "T_SYMINTR"
T_THEN = "T_THEN"
T_TILDE_OR_IDENT = "T_TILDE_OR_IDENT" # Renamed
T_TKINDEF = "T_TKINDEF"
T_TRY = "T_TRY"
T_TYPEDEF = "T_TYPEDEF"
T_TYPE = "T_TYPE"
T_TYPE_OR_IDENT = "T_TYPE_OR_IDENT" # Added
T_VAL = "T_VAL"
T_VAR = "T_VAR"
T_VIEWAT = "T_VIEWAT"
T_WHEN = "T_WHEN"
T_WHERE = "T_WHERE"
T_WHILESTAR = "T_WHILESTAR"
T_WHILE = "T_WHILE"
T_WITH = "T_WITH"
T_WITHTYPE = "T_WITHTYPE"
# Translation of non‑finals to finals
# ----------------------------------------------------------------------------
NONFINS_TRANSL = {
NonFin.ABSPROP: Fin.T_ABSTYPE,
NonFin.ABST0YPE: Fin.T_ABSTYPE,
NonFin.ABSTYPE: Fin.T_ABSTYPE,
NonFin.ABSVIEWT0YPE: Fin.T_ABSTYPE,
NonFin.ABSVIEW: Fin.T_ABSTYPE,
NonFin.ABSVIEWTYPE: Fin.T_ABSTYPE,
NonFin.CASE_neg: Fin.T_CASE,
NonFin.CASE_pos: Fin.T_CASE,
NonFin.CASE: Fin.T_CASE,
NonFin.CASTFN: Fin.T_FUN,
NonFin.COMMENT_block_c: Fin.T_COMMENT_block,
NonFin.COMMENT_block_ml: Fin.T_COMMENT_block,
NonFin.DATAPROP: Fin.T_DATATYPE,
NonFin.DATATYPE: Fin.T_DATATYPE,
NonFin.DATAVIEW: Fin.T_DATATYPE,
NonFin.DATAVTYPE: Fin.T_DATATYPE,
NonFin.DLRDELAY: Fin.T_DLRDELAY,
NonFin.DLREFFMASK_ALL: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_EXN: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_NTM: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_REF: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_WRT: Fin.T_DLREFFMASK_ARG,
NonFin.DLRLDELAY: Fin.T_DLRDELAY,
NonFin.DLRLST: Fin.T_DLRLST,
NonFin.DLRLST_T: Fin.T_DLRLST,
NonFin.DLRLST_VT: Fin.T_DLRLST,
NonFin.DLRREC: Fin.T_DLRREC,
NonFin.DLRREC_T: Fin.T_DLRREC,
NonFin.DLRREC_VT: Fin.T_DLRREC,
NonFin.DLRTUP: Fin.T_DLRTUP,
NonFin.DLRTUP_T: Fin.T_DLRTUP,
NonFin.DLRTUP_VT: Fin.T_DLRTUP,
NonFin.DLRVCOPYENV_V: Fin.T_DLRVCOPYENV,
NonFin.DLRVCOPYENV_VT: Fin.T_DLRVCOPYENV,
NonFin.FIXAT: Fin.T_FIX,
NonFin.FIX: Fin.T_FIX,
NonFin.FN: Fin.T_FUN,
NonFin.FNX: Fin.T_FUN,
NonFin.FUN: Fin.T_FUN,
NonFin.IMPLEMENT: Fin.T_IMPLEMENT,
NonFin.IMPLMNT: Fin.T_IMPLEMENT,
NonFin.INFIXL: Fin.T_FIXITY,
NonFin.INFIXR: Fin.T_FIXITY,
NonFin.INFIX: Fin.T_FIXITY,
NonFin.LAMAT: Fin.T_LAM,
NonFin.LAM: Fin.T_LAM,
NonFin.LLAMAT: Fin.T_LAM,
NonFin.LLAM: Fin.T_LAM,
NonFin.MACDEF: Fin.T_MACDEF,
NonFin.MACRODEF: Fin.T_MACDEF,
NonFin.POSTFIX: Fin.T_FIXITY,
NonFin.PRAXI: Fin.T_FUN,
NonFin.PREFIX: Fin.T_FIXITY,
NonFin.PRFN: Fin.T_FUN,
NonFin.PRFUN: Fin.T_FUN,
NonFin.PRIMPLMNT: Fin.T_IMPLEMENT,
NonFin.PROPDEF: Fin.T_TYPEDEF,
NonFin.PROP_neg: Fin.T_TYPE,
NonFin.PROP_pos: Fin.T_TYPE,
NonFin.PROP: Fin.T_TYPE_OR_IDENT,
NonFin.PRVAL: Fin.T_VAL,
NonFin.PRVAR: Fin.T_VAR,
NonFin.T0YPE_neg: Fin.T_TYPE,
NonFin.T0YPE_pos: Fin.T_TYPE,
NonFin.T0YPE: Fin.T_TYPE,
NonFin.TYPEDEF: Fin.T_TYPEDEF,
NonFin.TYPE_neg: Fin.T_TYPE,
NonFin.TYPE_pos: Fin.T_TYPE,
NonFin.TYPE: Fin.T_TYPE_OR_IDENT,
NonFin.VAL_neg: Fin.T_VAL,
NonFin.VAL_pos: Fin.T_VAL,
NonFin.VAL: Fin.T_VAL,
NonFin.VAR: Fin.T_VAR,
NonFin.VIEWDEF: Fin.T_TYPEDEF,
NonFin.VIEW_neg: Fin.T_TYPE,
NonFin.VIEW_pos: Fin.T_TYPE,
NonFin.VIEWT0YPE_neg: Fin.T_TYPE,
NonFin.VIEWT0YPE_pos: Fin.T_TYPE,
NonFin.VIEWT0YPE: Fin.T_TYPE,
NonFin.VIEW: Fin.T_TYPE_OR_IDENT,
NonFin.VIEWTYPEDEF: Fin.T_TYPEDEF,
NonFin.VIEWTYPE_neg: Fin.T_TYPE,
NonFin.VIEWTYPE_pos: Fin.T_TYPE,
NonFin.VIEWTYPE: Fin.T_TYPE_OR_IDENT,
NonFin.WITHPROP: Fin.T_WITHTYPE,
NonFin.WITHTYPE: Fin.T_WITHTYPE,
NonFin.WITHVIEW: Fin.T_WITHTYPE,
NonFin.WITHVIEWTYPE: Fin.T_WITHTYPE}
assert all(isinstance(x, NonFin) for x in NONFINS_TRANSL)
assert all(isinstance(x, Fin) for x in NONFINS_TRANSL.values())
assert all(x in NONFINS_TRANSL for x in NonFin)
# Translation of some idents to products
# ----------------------------------------------------------------------------
IDENTS_TRANSL = {
# Finals
"and": Fin.T_AND,
"as": Fin.T_AS,
"assume": Fin.T_ASSUME,
"absimpl": Fin.T_ASSUME,
"@": Fin.T_AT_OR_SIDENT,
"!": Fin.T_BANG_OR_IDENT,
"|": Fin.T_BAR,
"begin": Fin.T_BEGIN,
"`": Fin.T_BQUOTE,
"classdec": Fin.T_CLASSDEC,
":": Fin.T_COLON,
"datasort": Fin.T_DATASORT,
"$arrpsz": Fin.T_DLRARRPSZ,
"$arrptrsize": Fin.T_DLRARRPSZ,
"$break": Fin.T_DLRBREAK,
"$continue": Fin.T_DLRCONTINUE,
"$d2ctype": Fin.T_DLRD2CTYPE,
"$effmask": Fin.T_DLREFFMASK,
"$extern": Fin.T_DLREXTERN,
"$extfcall": Fin.T_DLREXTFCALL,
"$extkind": Fin.T_DLREXTKIND,
"$extmcall": Fin.T_DLREXTMCALL,
"$extval": Fin.T_DLREXTVAL,
"$extype": Fin.T_DLREXTYPE,
"$extype_struct": Fin.T_DLREXTYPE_STRUCT,
"$literal": Fin.T_DLRLITERAL,
"$myfilename": Fin.T_DLRMYFILENAME,
"$myfunction": Fin.T_DLRMYFUNCTION,
"$mylocation": Fin.T_DLRMYLOCATION,
"$raise": Fin.T_DLRRAISE,
"$showtype": Fin.T_DLRSHOWTYPE,
"$solver_assert": Fin.T_DLRSOLASSERT,
"$solver_verify": Fin.T_DLRSOLVERIFY,
"$tempenver": Fin.T_DLRTEMPENVER,
"$tyrep": Fin.T_DLRTYREP,
"$vararg": Fin.T_DLRVARARG,
"do": Fin.T_DO,
"$": Fin.T_DOLLAR,
".": Fin.T_DOT,
"..": Fin.T_DOTDOT,
"...": Fin.T_DOTDOTDOT,
".<>.": Fin.T_DOTLTGTDOT,
".<": Fin.T_DOTLT,
"else": Fin.T_ELSE,
"end": Fin.T_END,
"=": Fin.T_EQ_OR_DIDENT,
"=>": Fin.T_EQGT,
"=>>": Fin.T_EQGTGT,
"=<": Fin.T_EQLT,
"=<>": Fin.T_EQLTGT,
"=/=>": Fin.T_EQSLASHEQGT,
"=/=>>": Fin.T_EQSLASHEQGTGT,
"exception": Fin.T_EXCEPTION,
"extern": Fin.T_EXTERN,
"extvar": Fin.T_EXTVAR,
"extype": Fin.T_EXTYPE,
">.": Fin.T_GTDOT,
">": Fin.T_GT_OR_IDENT,
"><": Fin.T_GTLT_OR_DIDENT,
"#": Fin.T_HASH,
"ifcase": Fin.T_IFCASE,
"if": Fin.T_IF,
"import": Fin.T_IMPORT,
"in": Fin.T_IN,
"let": Fin.T_LET,
"local": Fin.T_LOCAL,
"<": Fin.T_LT_OR_IDENT,
"->": Fin.T_MINUSGT_OR_SIDENT,
"-<": Fin.T_MINUSLT,
"-<>": Fin.T_MINUSLTGT,
"nonfix": Fin.T_NONFIX,
"of": Fin.T_OF,
"op": Fin.T_OP,
"overload": Fin.T_OVERLOAD,
"%": Fin.T_PERCENT_OR_IDENT,
"?": Fin.T_QMARK_OR_IDENT,
"reassume": Fin.T_REASSUME,
"absreimpl": Fin.T_REASSUME,
"rec": Fin.T_REC,
"scase": Fin.T_SCASE,
"sif": Fin.T_SIF,
"sortdef": Fin.T_SORTDEF,
"#assert": Fin.T_SRPASSERT,
"#codegen2": Fin.T_SRPCODEGEN2,
"#define": Fin.T_SRPDEFINE,
"dynload": Fin.T_SRPDYNLOAD,
"#dynload": Fin.T_SRPDYNLOAD,
"#elifdef": Fin.T_SRPELIFDEF,
"#elif": Fin.T_SRPELIF,
"#elifndef": Fin.T_SRPELIFNDEF,
"#else": Fin.T_SRPELSE,
"#endif": Fin.T_SRPENDIF,
"#error": Fin.T_SRPERROR,
"#ifdef": Fin.T_SRPIFDEF,
"#if": Fin.T_SRPIF,
"#ifndef": Fin.T_SRPIFNDEF,
"#include": Fin.T_SRPINCLUDE,
"#pragma": Fin.T_SRPPRAGMA,
"#prerr": Fin.T_SRPPRERR,
"#print": Fin.T_SRPPRINT,
"#require": Fin.T_SRPREQUIRE,
"staload": Fin.T_SRPSTALOAD,
"#staload": Fin.T_SRPSTALOAD,
"#then": Fin.T_SRPTHEN,
"#undef": Fin.T_SRPUNDEF,
"sta": Fin.T_STACST,
"stacst": Fin.T_STACST,
"stadef": Fin.T_STADEF,
"static": Fin.T_STATIC,
"symelim": Fin.T_SYMELIM,
"symintr": Fin.T_SYMINTR,
"then": Fin.T_THEN,
"~": Fin.T_TILDE_OR_IDENT,
"tkindef": Fin.T_TKINDEF,
"try": Fin.T_TRY,
"when": Fin.T_WHEN,
"where": Fin.T_WHERE,
"with": Fin.T_WITH,
# Non‑finals
"absprop": NonFin.ABSPROP,
"abst0ype": NonFin.ABST0YPE,
"abstflat": NonFin.ABST0YPE,
"abstbox": NonFin.ABSTYPE,
"abstype": NonFin.ABSTYPE,
"absview": NonFin.ABSVIEW,
"absviewt0ype": NonFin.ABSVIEWT0YPE,
"absvt0ype": NonFin.ABSVIEWT0YPE,
"absvtflat": NonFin.ABSVIEWT0YPE,
"absviewtype": NonFin.ABSVIEWTYPE,
"absvtbox": NonFin.ABSVIEWTYPE,
"absvtype": NonFin.ABSVIEWTYPE,
"castfn": NonFin.CASTFN,
"dataprop": NonFin.DATAPROP,
"datatype": NonFin.DATATYPE,
"dataview": NonFin.DATAVIEW,
"dataviewtype": NonFin.DATAVTYPE,
"datavtype": NonFin.DATAVTYPE,
"$delay": NonFin.DLRDELAY,
"$effmask_all": NonFin.DLREFFMASK_ALL,
"$effmask_exn": NonFin.DLREFFMASK_EXN,
"$effmask_ntm": NonFin.DLREFFMASK_NTM,
"$effmask_ref": NonFin.DLREFFMASK_REF,
"$effmask_wrt": NonFin.DLREFFMASK_WRT,
"$ldelay": NonFin.DLRLDELAY,
"$list": NonFin.DLRLST,
"$lst": NonFin.DLRLST,
"$list_t": NonFin.DLRLST_T,
"$lst_t": NonFin.DLRLST_T,
"$list_vt": NonFin.DLRLST_VT,
"$lst_vt": NonFin.DLRLST_VT,
"$rec": NonFin.DLRREC,
"$record": NonFin.DLRREC,
"$record_t": NonFin.DLRREC_T,
"$rec_t": NonFin.DLRREC_T,
"$record_vt": NonFin.DLRREC_VT,
"$rec_vt": NonFin.DLRREC_VT,
"$tuple_t": NonFin.DLRTUP_T,
"$tup_t": NonFin.DLRTUP_T,
"$tup": NonFin.DLRTUP,
"$tuple": NonFin.DLRTUP,
"$tuple_vt": NonFin.DLRTUP_VT,
"$tup_vt": NonFin.DLRTUP_VT,
"$vcopyenv_vt": NonFin.DLRVCOPYENV_VT,
"$vcopyenv_v": NonFin.DLRVCOPYENV_V,
"fn": NonFin.FN,
"fnx": NonFin.FNX,
"fun": NonFin.FUN,
"implement": NonFin.IMPLEMENT,
"implmnt": NonFin.IMPLMNT,
"infix": NonFin.INFIX,
"infixl": NonFin.INFIXL,
"infixr": NonFin.INFIXR,
"macdef": NonFin.MACDEF,
"macrodef": NonFin.MACRODEF,
"postfix": NonFin.POSTFIX,
"praxi": NonFin.PRAXI,
"prefix": NonFin.PREFIX,
"prfn": NonFin.PRFN,
"prfun": NonFin.PRFUN,
"primplement": NonFin.PRIMPLMNT,
"primplmnt": NonFin.PRIMPLMNT,
"propdef": NonFin.PROPDEF,
"prval": NonFin.PRVAL,
"prvar": NonFin.PRVAR,
"typedef": NonFin.TYPEDEF,
"var": NonFin.VAR,
"viewdef": NonFin.VIEWDEF,
"viewtypedef": NonFin.VIEWTYPEDEF,
"vtypedef": NonFin.VIEWTYPEDEF,
"withprop": NonFin.WITHPROP,
"withtype": NonFin.WITHTYPE,
"withviewtype": NonFin.WITHVIEWTYPE,
"withvtype": NonFin.WITHVIEWTYPE,
"withview": NonFin.WITHVIEW,
# Added
"case": NonFin.CASE,
"prop": NonFin.PROP,
"type": NonFin.TYPE,
"t0ype": NonFin.T0YPE,
"vtype": NonFin.VIEWTYPE,
"vt0ype": NonFin.VIEWT0YPE,
"view": NonFin.VIEW,
"viewtype": NonFin.VIEWTYPE,
"viewt0ype": NonFin.VIEWT0YPE,
"val": NonFin.VAL,
"for": Fin.T_FOR,
"while": Fin.T_WHILE,
"addr": Fin.T_ADDR_OR_IDENT,
"fold": Fin.T_FOLD_OR_IDENT,
"free": Fin.T_FREE_OR_IDENT,
"lam": NonFin.LAM,
"llam": NonFin.LLAM,
"fix": NonFin.FIX}
assert all(isinstance(x, str) for x in IDENTS_TRANSL)
assert all(isinstance(x, (Fin, NonFin)) for x in IDENTS_TRANSL.values())
def ident_translation(ident, default, in_feffs):
""" Ident possibly translated after IDENTS_TRANSL. """
if in_feffs:
# Only apply this single translation when in function effects.
if default == Fin.T_IDENT_sym and ident == ">":
return Fin.T_GT_OR_IDENT
return default
return IDENTS_TRANSL[ident] if ident in IDENTS_TRANSL else default
# Prefixes
# ----------------------------------------------------------------------------
# ### Types
class TreeNode:
""" Prefix table as a tree. """
__slots__ = ["next", "product"]
def __init__(self):
self.next = dict() # char -> TreeNode.
self.product = None # Fin, NonFin, Start or None.
def add_to_tree(tree, prefix, product):
""" Add prefix as a branch of tree. """
node = tree
for c in prefix:
if c not in node.next:
node.next[c] = TreeNode()
node = node.next[c]
assert node.product is None # Unique definitions: no ambigous prefixes.
node.product = product
def tree_step(node, c):
""" Step in tree by c from node (transition on c). """
if c not in node.next:
return None
return node.next[c]
# ### Further processing interpretation
class Start(Enum):
""" Interpretations of some prefixes. """
CHAR_start = "CHAR_start"
COMMENT_block_c_start = "COMMENT_block_c_start"
COMMENT_block_ml_start = "COMMENT_block_ml_start"
COMMENT_line_start = "COMMENT_line_start"
COMMENT_rest_start = "COMMENT_rest_start"
DOTINT_start = "DOTINT_start"
FLOAT_dec_start = "FLOAT_dec_start"
IDENT_dlr_start = "IDENT_dlr_start"
IDENT_srp_start = "IDENT_srp_start"
IDENT_sym_start = "IDENT_sym_start"
IDENT_xx_start = "IDENT_xx_start"
INT_oct_start = "INT_oct_start"
QMARKGT_start = "QMARKGT_start"
STRING_start = "STRING_start"
XX_dec_start = "XX_dec_start"
XX_hex_start = "XX_hex_start"
# ### Prefixes lookup table as a tree
TREE = TreeNode()
def add_prefix(prefix, product):
""" Add to TREE. """
assert isinstance(product, (Fin, Start)) or product in NONFINS_TRANSL
add_to_tree(TREE, prefix, product)
def add_prefixes2(char1, char2_set, product):
""" Add "c1" + "c21".."c2n" to TREE. """
for char2 in char2_set:
add_prefix(char1 + char2, product)
def add_prefixes1(char1_set, product):
""" Add "c11".."c1n" to TREE. """
for char1 in char1_set:
add_prefix(char1, product)
# Some prefixes are a product on their own, some prefix starts a product.
add_prefix(EOF, Fin.T_EOF)
# Fin, sorted by "XX"
add_prefix("0", Fin.T_INTZERO)
add_prefix("addr@", Fin.T_ADDRAT)
add_prefix("fold@", Fin.T_FOLDAT)
add_prefix("for*", Fin.T_FORSTAR)
add_prefix("free@", Fin.T_FREEAT)
add_prefix("view@", Fin.T_VIEWAT)
add_prefix("while*", Fin.T_WHILESTAR)
add_prefix("@{", Fin.T_ATLBRACE)
add_prefix("@[", Fin.T_ATLBRACKET)
add_prefix("@(", Fin.T_ATLPAREN)
add_prefix("\\", Fin.T_BACKSLASH_OR_IDENT)
add_prefix("`(", Fin.T_BQUOTELPAREN)
add_prefix(":<", Fin.T_COLONLT)
add_prefix(",", Fin.T_COMMA)
add_prefix(",(", Fin.T_COMMALPAREN)
add_prefix("#[", Fin.T_HASHLBRACKET)
add_prefix("$", Fin.T_IDENT_sym)
add_prefix("{", Fin.T_LBRACE)
add_prefix("[", Fin.T_LBRACKET)
add_prefix("(", Fin.T_LPAREN)
add_prefix("%(", Fin.T_PERCENTLPAREN)
add_prefix("'{", Fin.T_QUOTELBRACE)
add_prefix("'[", Fin.T_QUOTELBRACKET)
add_prefix("'(", Fin.T_QUOTELPAREN)
add_prefix("}", Fin.T_RBRACE)
add_prefix("]", Fin.T_RBRACKET)
add_prefix(")", Fin.T_RPAREN)
add_prefix(";", Fin.T_SEMICOLON)
# NonFin, Sorted by "XX"
add_prefix("abst@ype", NonFin.ABST0YPE)
add_prefix("absviewt@ype", NonFin.ABSVIEWT0YPE)
add_prefix("absvt@ype", NonFin.ABSVIEWT0YPE)
add_prefix("case-", NonFin.CASE_neg)
add_prefix("case+", NonFin.CASE_pos)
add_prefix("fix@", NonFin.FIXAT)
add_prefix("lam@", NonFin.LAMAT)
add_prefix("llam@", NonFin.LLAMAT)
add_prefix("prop-", NonFin.PROP_neg)
add_prefix("prop+", NonFin.PROP_pos)
add_prefix("t0ype-", NonFin.T0YPE_neg)
add_prefix("t0ype+", NonFin.T0YPE_pos)
add_prefix("t@ype", NonFin.T0YPE)
add_prefix("t@ype-", NonFin.T0YPE_neg)
add_prefix("t@ype+", NonFin.T0YPE_pos)
add_prefix("type-", NonFin.TYPE_neg)
add_prefix("type+", NonFin.TYPE_pos)
add_prefix("val-", NonFin.VAL_neg)
add_prefix("val+", NonFin.VAL_pos)
add_prefix("view-", NonFin.VIEW_neg)
add_prefix("view+", NonFin.VIEW_pos)
add_prefix("viewt0ype-", NonFin.VIEWT0YPE_neg)
add_prefix("viewt0ype+", NonFin.VIEWT0YPE_pos)
add_prefix("viewt@ype", NonFin.VIEWT0YPE)
add_prefix("viewt@ype-", NonFin.VIEWT0YPE_neg)
add_prefix("viewt@ype+", NonFin.VIEWT0YPE_pos)
add_prefix("viewtype-", NonFin.VIEWTYPE_neg)
add_prefix("viewtype+", NonFin.VIEWTYPE_pos)
add_prefix("vt0ype-", NonFin.VIEWT0YPE_neg)
add_prefix("vt0ype+", NonFin.VIEWT0YPE_pos)
add_prefix("vt@ype", NonFin.VIEWT0YPE)
add_prefix("vt@ype-", NonFin.VIEWT0YPE_neg)
add_prefix("vtype-", NonFin.VIEWTYPE_neg)
add_prefix("vt@ype+", NonFin.VIEWT0YPE_pos)
add_prefix("vtype+", NonFin.VIEWTYPE_pos)
# Start, Sorted by Start.XX
add_prefix("'", Start.CHAR_start)
add_prefix("/*", Start.COMMENT_block_c_start)
add_prefix("(*", Start.COMMENT_block_ml_start)
add_prefix("//", Start.COMMENT_line_start)
add_prefix("////", Start.COMMENT_rest_start)
add_prefixes2(".", DIGIT, Start.DOTINT_start)
add_prefix("0.", Start.FLOAT_dec_start)
add_prefixes2("0", E, Start.FLOAT_dec_start)
add_prefixes2("$", IDENTFST, Start.IDENT_dlr_start)
add_prefixes2("#", IDENTFST, Start.IDENT_srp_start)
add_prefixes2("$", SYMBOLIC, Start.IDENT_sym_start)
add_prefixes1(SYMBOLIC, Start.IDENT_sym_start)
add_prefixes1(IDENTFST, Start.IDENT_xx_start)
add_prefixes2("0", OCTAL, Start.INT_oct_start)
add_prefix('?>', Start.QMARKGT_start)
add_prefix('"', Start.STRING_start)
add_prefixes1(DIGIT - {"0"}, Start.XX_dec_start) # "0" is another prefix.
add_prefixes2("0", X, Start.XX_hex_start)
# Additions
# ============================================================================
COMMENTS = {Fin.T_COMMENT_block, Fin.T_COMMENT_line, Fin.T_COMMENT_rest}
ERRORS = {Fin.T_IDENT_srp}
IDENT_EXTS = {
"car!",
"cdr!",
"fprint!",
"fprintln!",
"gprint!",
"gprintln!",
"iscons!",
"islist!",
"isnil!",
"prerr!",
"prerrln!",
"print!",
"println!",
"tupz!"}
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for v1 of the Timesketch API."""
from __future__ import print_function
import json
import os
import re
import mock
from flask_restful import fields
from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED
from timesketch.lib.definitions import HTTP_STATUS_CODE_OK
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
from timesketch.api.v1.resources import ResourceMixin
class TypescriptDefinitionTest(BaseTest):
def test_definition_is_in_sync_with_api_schema(self):
"""Parse Typescript definition for API types and compare it to
api schema defined in ResourceMixin.
"""
# pylint: disable=redefined-outer-name,missing-docstring
def parse_definition():
this_dir = os.path.dirname(__file__)
definition_path = os.path.join(
this_dir, '..', '..', 'ui', 'api', 'models.ts')
with open(definition_path) as definition_file:
definition = definition_file.read()
interfaces = r'interface\s+([a-zA-Z_]+)\s*\{([^\}]*)\}'
fields = r'^\s*([a-zA-Z_]+)\s*\:\s*(.*?)\s*((?:\[\])?)\s*$'
parsed_definition = {}
for interface in re.finditer(interfaces, definition):
interface_name, interface_body = interface.groups()
parsed_interface = {}
for field in re.finditer(fields, interface_body, re.MULTILINE):
field_name, field_type, is_array = field.groups()
if is_array:
parsed_interface[field_name] = {'[i]': field_type}
else:
parsed_interface[field_name] = field_type
parsed_definition[interface_name] = parsed_interface
return parsed_definition
def resolve_references(definition):
def resolve(x):
if isinstance(x, str) and x in definition:
return definition[x]
if isinstance(x, dict):
return {k: resolve(v) for k, v in x.items()}
return x
for _ in range(10):
definition = {k: resolve(v) for k, v in definition.items()}
return definition
def parse_api_schema():
def format_fields(fields):
return {k: format_field(v) for k, v in fields.items()}
def format_field(field):
# pylint: disable=unidiomatic-typecheck
typemap = {
fields.Integer: 'fields.Integer',
fields.String: 'fields.String',
fields.Boolean: 'fields.Boolean',
fields.DateTime: 'fields.DateTime',
}
if isinstance(field, type):
return typemap[field]
if type(field) in typemap:
return typemap[type(field)]
elif isinstance(field, fields.Nested):
return format_fields(field.nested)
elif isinstance(field, fields.List):
return {'[i]': format_field(field.container)}
else:
assert False
api_schema = {
k: format_fields(v)
for k, v in ResourceMixin.__dict__.items()
if k.endswith('_fields')
}
return api_schema
def flatten_dict(d):
result = {}
for k, v in d.items():
if isinstance(v, dict):
v = flatten_dict(v)
v = {('%s.%s' % (k, kk)): vv for kk, vv in v.items()}
result.update(v)
else:
result[k] = v
return result
def fix_array_access(d):
return {k.replace('.[i]', '[i]'): v for k, v in d.items()}
def key(entry):
return entry[0].replace('_fields', '').lower()
def consecutive_pairs(li):
for i in range(1, len(li)):
yield li[i-1], li[i]
def match_type(t1, t2):
types = [
('fields.Integer', 'number'),
('fields.String', 'string'),
('fields.Boolean', 'boolean'),
('fields.DateTime', 'DateTime'),
]
return (t1, t2) in types or (t2, t1) in types
definition = parse_definition()
definition = resolve_references(definition)
definition = flatten_dict(definition)
definition = fix_array_access(definition)
api_schema = parse_api_schema()
api_schema = flatten_dict(api_schema)
api_schema = fix_array_access(api_schema)
entries = sorted(
list(definition.items()) + list(api_schema.items()), key=key)
errors = 0
for entry1, entry2 in consecutive_pairs(entries):
if key(entry1) == key(entry2):
if not match_type(entry1[1], entry2[1]):
print('')
print('Type mismatch:')
print('%s: %s' % entry1)
print('%s: %s' % entry2)
errors += 1
definition.pop(entry1[0], None)
definition.pop(entry2[0], None)
api_schema.pop(entry1[0], None)
api_schema.pop(entry2[0], None)
if definition:
print('')
print(
'The following fields are present in Typescript definition'
' but are not present in ResourceMixin API schema:'
)
for item in definition.items():
print('%s: %s' % item)
if api_schema:
print('')
print(
'The following fields are present in ResourceMixin API schema'
' but are not present in Typescript definition:'
)
for item in api_schema.items():
print('%s: %s' % item)
self.assertEqual(len(definition) + len(api_schema) + errors, 0)
class ResourceMixinTest(BaseTest):
"""Test ResourceMixin."""
def test_to_json_empty_list(self):
"""Behavior of to_json when given an empty list."""
response = ResourceMixin().to_json([])
self.assertEqual(response.json, {
'meta': {},
'objects': [],
})
class SketchListResourceTest(BaseTest):
"""Test SketchListResource."""
resource_url = u'/api/v1/sketches/'
def test_sketch_list_resource(self):
"""Authenticated request to get list of sketches."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json[u'objects'][0]), 2)
result = sorted(i['name'] for i in response.json[u'objects'][0])
self.assertEqual(result, [u'Test 1', u'Test 3'])
self.assert200(response)
def test_sketch_post_resource(self):
"""Authenticated request to create a sketch."""
self.login()
data = dict(name=u'test', description=u'test')
response = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
class SketchResourceTest(BaseTest):
"""Test SketchResource."""
resource_url = u'/api/v1/sketches/1/'
def test_sketch_resource(self):
"""Authenticated request to get a sketch."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json[u'objects']), 1)
self.assertEqual(len(response.json[u'objects'][0][u'timelines']), 1)
self.assertEqual(response.json[u'objects'][0][u'name'], u'Test 1')
self.assert200(response)
def test_sketch_acl(self):
"""
Authenticated request to get a sketch that the user do not have read
permission on.
"""
self.login()
response = self.client.get(u'/api/v1/sketches/2/')
self.assert403(response)
class ViewListResourceTest(BaseTest):
"""Test ViewListResource."""
resource_url = u'/api/v1/sketches/1/views/'
def test_post_view_list_resource(self):
"""Authenticated request to create a view."""
self.login()
data = dict(
name=u'test',
new_searchtemplate=False,
query=u'test',
filter={},
dsl={})
response = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
data[u'from_searchtemplate_id'] = 1
response_with_searchtemplate = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
self.assertEquals(response_with_searchtemplate.status_code,
HTTP_STATUS_CODE_CREATED)
class ViewResourceTest(BaseTest):
"""Test ViewResource."""
resource_url = u'/api/v1/sketches/1/views/1/'
def test_view_resource(self):
"""Authenticated request to get a view."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json[u'objects']), 1)
self.assertEqual(response.json[u'objects'][0][u'name'], u'View 1')
self.assert200(response)
def test_post_view_resource(self):
"""Authenticated request to update a view."""
self.login()
data = dict(name=u'test', query=u'test', filter=u'{}')
response = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
def test_invalid_user_in_view(self):
"""Authenticated request to get a view for another user."""
self.login()
response = self.client.get(u'/api/v1/sketches/1/views/3/')
self.assert403(response)
def test_invalid_view(self):
"""Authenticated request to get a view for non existing view."""
self.login()
response = self.client.get(u'/api/v1/sketches/1/views/2/')
self.assert404(response)
class SearchTemplateResourceTest(BaseTest):
"""Test Search template resource."""
resource_url = u'/api/v1/searchtemplate/1/'
def test_searchtemplate_resource(self):
"""Authenticated request to get a search template."""
self.login()
response = self.client.get(self.resource_url)
self.assertEqual(len(response.json[u'objects']), 1)
self.assertEqual(response.json[u'objects'][0][u'name'], u'template')
self.assert200(response)
def test_invalid_searchtemplate(self):
"""Authenticated request to get a non existing search template."""
self.login()
response = self.client.get(u'/api/v1/searchtemplate/2/')
self.assert404(response)
class ExploreResourceTest(BaseTest):
"""Test ExploreResource."""
resource_url = u'/api/v1/sketches/1/explore/'
expected_response = {
u'meta': {
u'timeline_names': {
u'test': u'Timeline 1'
},
u'timeline_colors': {
u'test': u'FFFFFF'
},
u'es_total_count': 1,
u'es_time': 5
},
u'objects': [{
u'sort': [1410593223000],
u'_type': u'plaso_event',
u'_source': {
u'timestamp': 1410593222543942,
u'message': u'Test event',
u'label': [u'__ts_star'],
u'timestamp_desc': u'Content Modification Time',
u'datetime': u'2014-09-13T07:27:03+00:00'
},
u'_score': u'null',
u'selected': False,
u'_index': u'test',
u'_id': u'test'
}]
}
@mock.patch(u'timesketch.api.v1.resources.ElasticsearchDataStore',
MockDataStore)
def test_search(self):
"""Authenticated request to query the datastore."""
self.login()
data = dict(query=u'test', filter={})
response = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
self.assertDictEqual(response.json, self.expected_response)
self.assert200(response)
class AggregationResourceTest(BaseTest):
"""Test ExploreResource."""
resource_url = u'/api/v1/sketches/1/aggregation/'
@mock.patch(u'timesketch.api.v1.resources.ElasticsearchDataStore',
MockDataStore)
def test_heatmap_aggregation(self):
"""Authenticated request to get heatmap aggregation."""
self.login()
data = dict(query=u'test', filter={}, aggtype=u'heatmap')
response = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
self.assert200(response)
class EventResourceTest(BaseTest):
"""Test EventResource."""
resource_url = u'/api/v1/sketches/1/event/'
expected_response = {
u'objects': {
u'timestamp_desc': u'',
u'timestamp': 1410895419859714,
u'label': u'',
u'source_long': u'',
u'source_short': u'',
u'es_index': u'',
u'es_id': u'',
u'message': u'',
u'datetime': u'2014-09-16T19:23:40+00:00'
}
}
@mock.patch(u'timesketch.api.v1.resources.ElasticsearchDataStore',
MockDataStore)
def test_get_event(self):
"""Authenticated request to get an event from the datastore."""
self.login()
response = self.client.get(self.resource_url +
u'?searchindex_id=test&event_id=test')
self.assertDictContainsSubset(self.expected_response, response.json)
self.assert200(response)
@mock.patch(u'timesketch.api.v1.resources.ElasticsearchDataStore',
MockDataStore)
def test_invalid_index(self):
"""
Authenticated request to get an event from the datastore, but in the
wrong index.
"""
self.login()
response_400 = self.client.get(
self.resource_url + u'?searchindex_id=wrong_index&event_id=test')
self.assert400(response_400)
class EventAnnotationResourceTest(BaseTest):
"""Test EventAnnotationResource."""
resource_url = u'/api/v1/sketches/1/event/annotate/'
@mock.patch(u'timesketch.api.v1.resources.ElasticsearchDataStore',
MockDataStore)
def test_post_annotate_resource(self):
"""Authenticated request to create an annotation."""
self.login()
for annotation_type in [u'comment', u'label']:
event = {
u'_type': u'test_event',
u'_index': u'test',
u'_id': u'test'
}
data = dict(
annotation=u'test',
annotation_type=annotation_type,
events=[event])
response = self.client.post(
self.resource_url,
data=json.dumps(data),
content_type=u'application/json')
self.assertIsInstance(response.json, dict)
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
def test_post_annotate_invalid_index_resource(self):
"""
Authenticated request to create an annotation, but in the wrong index.
"""
self.login()
data = dict(
annotation=u'test',
annotation_type=u'comment',
event_id=u'test',
searchindex_id=u'invalid_searchindex')
response = self.client.post(
self.resource_url,
data=json.dumps(data),
content_type=u'application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_BAD_REQUEST)
class SearchIndexResourceTest(BaseTest):
"""Test SearchIndexResource."""
resource_url = u'/api/v1/searchindices/'
@mock.patch(u'timesketch.api.v1.resources.ElasticsearchDataStore',
MockDataStore)
def test_post_create_searchindex(self):
"""Authenticated request to create a searchindex."""
self.login()
data = dict(
searchindex_name=u'test3', es_index_name=u'test3', public=False)
response = self.client.post(
self.resource_url,
data=json.dumps(data),
content_type=u'application/json')
self.assertIsInstance(response.json, dict)
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
class TimelineListResourceTest(BaseTest):
"""Test TimelineList resource."""
resource_url = u'/api/v1/sketches/1/timelines/'
def test_add_existing_timeline_resource(self):
"""Authenticated request to add a timeline to a sketch."""
self.login()
data = dict(timeline=1)
response = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_OK)
def test_add_new_timeline_resource(self):
"""Authenticated request to add a timeline to a sketch."""
self.login()
data = dict(timeline=2)
response = self.client.post(
self.resource_url,
data=json.dumps(data, ensure_ascii=False),
content_type=u'application/json')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_CREATED)
|
# dalla mail di Michele:
# POST https://climbdev.smartcommunitylab.it/v2/api/event/TEST/adca3db3-68d1-4197-b834-a45d61cf1c21/vlab
# Header: "Authorization","Bearer 831a2cc0-48bd-46ab-ace1-c24f767af8af"
# Body:
# [
# {
# "wsnNodeId" : "<id>",
# "eventType" : 901,
# "timestamp" : <timestamp>,
# "payload" : {
# "passengerId" : "<PASSWORD>",
# "latitude" : 46.0678106,
# "longitude" : 11.1515548,
# "accuracy" : 18.5380001068115
# }
# }
# ]
# contact data array formed by:
# {
# "wsnNodeId" : <BeaconID> "string"
# "eventType" : 901,
# "timestamp" : <timestamp>,
# "payload" : {
# "EndNodeID" : "string"
# "lastRSSI" : <int>
# "maxRSSI" : <int>
# "pktCounter" : <int>
# }
# }
# versione funzionante
# payloadData = [{
# "wsnNodeId" : "Node01",
# "eventType" : 901,
# "timestamp" : 1511361257,
# "payload" : {
# "passengerId" : "a<PASSWORD>-ab7a-446f-a8c2-4c20<PASSWORD>5c5",
# "latitude" : 46.0678106,
# "longitude" : 11.1515548,
# "accuracy" : 18.5380001068115
# }
# }]
# POST requests with data
# case 1: header no specified content type, request with json=payloadData where payloadData is python list and dict
# headers = {'Authorization': 'Bearer 831a2cc0-48bd-46ab-ace1-c24f767af8af'}
# fakeDataPayload = [
# {
# "wsnNodeId" : "Beaconid_01",
# "eventType" : EVENT_BECON_CONTACT,
# "timestamp" : timeStart,
# "payload" : {
# "EndNodeID": "VelaLab_EndNode_03",
# "lastRSSI": -30,
# "maxRSSI": -20,
# "pktCounter" : 15
# }
# },
# {
# "wsnNodeId" : "Beaconid_01",
# "eventType" : EVENT_BECON_CONTACT,
# "timestamp" : timeStart,
# "payload" : {
# "EndNodeID" : "VelaLab_EndNode_04",
# "lastRSSI" : -31,
# "maxRSSI" : -21,
# "pktCounter" : 16
# }
# }
# ]
#
# r = requests.post(urlDev, json=fakeDataPayload, headers=headers)
# case 2: header specified content type 'application/json', encode payloadData to jsonData (from python list and dict to json), request with data=jsonData
# headers = {'Authorization': 'Bearer 831a2cc0-48bd-46ab-ace1-c24f767af8af', 'Content-Type': 'application/json'}
# fakeDataPayload = [
# {
# "wsnNodeId" : "Beaconid_01",
# "eventType" : EVENT_BECON_CONTACT,
# "timestamp" : timeStart,
# "payload" : {
# "EndNodeID": "VelaLab_EndNode_03",
# "lastRSSI": -30,
# "maxRSSI": -20,
# "pktCounter" : 15
# }
# },
# {
# "wsnNodeId" : "Beaconid_01",
# "eventType" : EVENT_BECON_CONTACT,
# "timestamp" : timeStart,
# "payload" : {
# "EndNodeID" : "VelaLab_EndNode_04",
# "lastRSSI" : -31,
# "maxRSSI" : -21,
# "pktCounter" : 16
# }
# }
# ]
#
# jsonData = json.dumps(fakeDataPayload)
# r = requests.post(urlDev, data=jsonData, headers=headers)
import requests
import json
import time
EVENT_BECON_CONTACT = 901;
print("Creating url and header...")
urlDev_CLIMB = 'https://climbdev.smartcommunitylab.it/v2/api/event/TEST/adca3db3-68d1-4197-b834-a45d61cf1c21/vlab'
urlDev = 'https://climbdev.smartcommunitylab.it/v2/api/event/TEST/4220a8bb-3cf5-4076-b7bd-9e7a1ff7a588/vlab'
urlProd = ' https://climb.smartcommunitylab.it/v2/api/event/TEST/17ee8383-4cb0-4f58-9759-1d76a77f9eff/vlab'
headers = {'Authorization': 'Bearer 831a2cc0-48bd-46ab-ace1-c24f767af8af'}
# headers = {'Authorization': 'Bearer 831a2cc0-48bd-46ab-ace1-c24f767af8af', 'Content-Type': 'application/json'}
timeStart = int(time.time())
print("Timestamp :", timeStart)
lastRSSI = -30
maxRSSI = -20
fakeDataPayloadSingle = [{
"wsnNodeId" : 'Beaconid_01',
"eventType" : 901,
"timestamp" : 112345,
"payload" : {
"EndNodeID": 'VelaLab_EndNode_05',
"lastRSSI": -30,
"maxRSSI": -20,
"pktCounter" : 15
}
}]
fakeDataPayload = [
{
"wsnNodeId" : "Beaconid_01",
"eventType" : EVENT_BECON_CONTACT,
"timestamp" : timeStart,
"payload" : {
"EndNodeID": "VelaLab_EndNode_03",
"lastRSSI": -30,
"maxRSSI": -20,
"pktCounter" : 15
}
},
{
"wsnNodeId" : "Beaconid_01",
"eventType" : EVENT_BECON_CONTACT,
"timestamp" : timeStart,
"payload" : {
"EndNodeID" : "VelaLab_EndNode_04",
"lastRSSI" : -31,
"maxRSSI" : -21,
"pktCounter" : 16
}
},
{
"wsnNodeId" : "Beaconid_01",
"eventType" : EVENT_BECON_CONTACT,
"timestamp" : timeStart,
"payload" : {
"EndNodeID" : "VelaLab_EndNode_05",
"lastRSSI" : -32,
"maxRSSI" : -22,
"pktCounter" : 17
}
}
]
print("\nData:", fakeDataPayload)
r = requests.post(urlDev, json=fakeDataPayload, headers=headers)
# jsonData = json.dumps(fakeDataPayload)
# print("\njsonData:", jsonData)
# r = requests.post(urlDev, data=jsonData, headers=headers)
print("\nResponse:", r.text)
# other request uses (not to use)
# r = requests.post(url, data=payloadData, headers=headers)
# print("\nResponse:")
# print(r.text)
# r = requests.post(url, data=jsonData, headers=headers)
# print("\nResponse:")
# print(r.text)
|
"""VTA related intrinsics"""
from __future__ import absolute_import as _abs
import tvm
def gemm(env, mock=False):
"""Matrix-matrix multiply intrinsic
Parameters
----------
env : Environment
The Environment
mock : bool
Whether create a mock version.
"""
wgt_lanes = env.WGT_ELEM_BITS // env.WGT_WIDTH
assert wgt_lanes == env.BLOCK_OUT * env.BLOCK_IN
wgt_shape = (env.BLOCK_OUT, env.BLOCK_IN)
assert wgt_shape[0] * wgt_shape[1] == wgt_lanes
inp_lanes = env.INP_ELEM_BITS // env.INP_WIDTH
assert inp_lanes == env.BATCH * env.BLOCK_IN
inp_shape = (env.BATCH, env.BLOCK_IN)
assert inp_shape[0] * inp_shape[1] == inp_lanes
out_lanes = env.ACC_ELEM_BITS // env.ACC_WIDTH
assert out_lanes == env.BATCH * env.BLOCK_OUT
out_shape = (env.BATCH, env.BLOCK_OUT)
assert out_shape[0] * out_shape[1] == out_lanes
wgt = tvm.placeholder((wgt_shape[0], wgt_shape[1]),
dtype="int%d" % env.WGT_WIDTH,
name=env.wgt_scope)
inp = tvm.placeholder((inp_shape[0], inp_shape[1]),
dtype="int%d" % env.INP_WIDTH,
name=env.inp_scope)
k = tvm.reduce_axis((0, wgt_shape[1]), name="k")
out_dtype = "int%d" % env.ACC_WIDTH
out = tvm.compute((out_shape[0], out_shape[1]),
lambda i, j: tvm.sum(inp[i, k].astype(out_dtype) *
wgt[j, k].astype(out_dtype),
axis=[k]),
name="out")
wgt_layout = tvm.decl_buffer(
wgt.shape, wgt.dtype, env.wgt_scope,
scope=env.wgt_scope, offset_factor=wgt_lanes, data_alignment=wgt_lanes)
inp_layout = tvm.decl_buffer(
inp.shape, inp.dtype, env.inp_scope,
scope=env.inp_scope, offset_factor=inp_lanes, data_alignment=inp_lanes)
out_layout = tvm.decl_buffer(
out.shape, out.dtype, env.acc_scope,
scope=env.acc_scope, offset_factor=out_lanes, data_alignment=out_lanes)
def intrin_func(ins, outs):
"""Matrix-matrix multiply intrinsic function"""
dinp, dwgt = ins
dout = outs[0]
def instr(index):
"""Generate matrix-matrix multiply VTA instruction"""
irb = tvm.ir_builder.create()
dev = env.dev
irb.scope_attr(dev.vta_axis, "coproc_scope",
dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope",
dev.vta_push_uop)
if index in (0, 2):
irb.emit(tvm.call_extern(
"int32", "VTAUopPush",
0, 0,
dout.access_ptr("rw", "int32"),
dinp.access_ptr("r", "int32"),
dwgt.access_ptr("r", "int32"),
0, 0, 0))
else:
irb.emit(tvm.call_extern(
"int32", "VTAUopPush",
0, 1,
dout.access_ptr("rw", "int32"),
0,
0,
0, 0, 0))
return irb.get()
# return a triple of normal-set, reset, update
nop = tvm.make.Evaluate(0)
if mock:
return (nop, nop, nop)
return (instr(0), instr(1), instr(2))
return tvm.decl_tensor_intrin(out.op, intrin_func,
name="GEMM",
binds={inp: inp_layout,
wgt: wgt_layout,
out: out_layout})
|
# Importing all modules
import shutil
from PIL import Image , ImageQt
from GalleryMan.themes.filters import Filters
from GalleryMan.assets.QtHelpers import PopUpMessage
from PyQt5.QtCore import QPropertyAnimation, Qt
from PyQt5.QtGui import QCursor, QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QGraphicsOpacityEffect, QLabel, QPushButton, QVBoxLayout
from GalleryMan.assets.QtImageProcessor import ImageProcessor
import os
# Animation class for unhide and hide
class Animation:
def fade(self, widget , start=1 , end=0):
self.effect = QGraphicsOpacityEffect()
widget.setGraphicsEffect(self.effect)
self.animation = QPropertyAnimation(self.effect, b"opacity")
self.animation.setDuration(200)
self.animation.setStartValue(start)
self.animation.setEndValue(end)
return self.animation
def unfade(self, widget , start=0 , end=1):
self.effect = QGraphicsOpacityEffect()
widget.setGraphicsEffect(self.effect)
self.animation = QPropertyAnimation(self.effect, b"opacity")
self.animation.setDuration(200)
self.animation.setStartValue(start)
self.animation.setEndValue(end)
return self.animation
class PaletteView:
def __init__(self, main_window, image, out_widget , config ) -> None:
# Make all args global
self.edited = False
self.config = config
self.out_widget = out_widget
self.main_window = main_window
# Initialize the image processor class
self.processors = ImageProcessor()
# Add image
self.processors.add_image(image)
def blur(self):
# Make the edited variable True
self.edited = True
# Animation Callback
def callback():
# Blur the imahe
self.out_widget.set_pixmap(self.createPixmap(self.processors.blur()))
# Set pixmap
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
# Partial unhide
Animation.unfade(Animation , self.out_widget , 0.5).start()
# Partial hide while the image is being processed
self.animations = Animation.fade(Animation , self.out_widget , end=0.5)
# Call callback on finish
self.animations.finished.connect(callback)
# Start the animation
self.animations.start()
def sharp(self):
# Make the edited variable True
self.edited = True
# Animation Callback
def callback():
# Blur the imahe
self.out_widget.set_pixmap(self.createPixmap(self.processors.sharpen()))
# Set pixmap
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
# Partial unhide
Animation.unfade(Animation , self.out_widget , 0.5).start()
# Partial hide while the image is being processed
self.animations = Animation.fade(Animation , self.out_widget , end=0.5)
# Call callback on finish
self.animations.finished.connect(callback)
# Start the animation
self.animations.start()
def increaseBrightness(self):
# Make the edited variable True
self.edited = True
# Animation Callback
def callback():
# Blur the imahe
self.out_widget.set_pixmap(self.createPixmap(self.processors.increaseBrightness()))
# Set pixmap
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
# Partial unhide
Animation.unfade(Animation , self.out_widget , 0.5).start()
# Partial hide while the image is being processed
self.animations = Animation.fade(Animation , self.out_widget , end=0.5)
# Call callback on finish
self.animations.finished.connect(callback)
# Start the animation
self.animations.start()
def increaseContrast(self):
# Make the edited variable True
self.edited = True
# Animation Callback
def callback():
# Blur the imahe
self.out_widget.set_pixmap(self.createPixmap(self.processors.increaseContrast()))
# Set pixmap
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
# Partial unhide
Animation.unfade(Animation , self.out_widget , 0.5).start()
# Partial hide while the image is being processed
self.animations = Animation.fade(Animation , self.out_widget , end=0.5)
# Call callback on finish
self.animations.finished.connect(callback)
# Start the animation
self.animations.start()
def increaseExposure(self):
# Make the edited variable True
self.edited = True
# Animation Callback
def callback():
# Blur the imahe
self.out_widget.set_pixmap(self.createPixmap(self.processors.increaseExposure()))
# Set pixmap
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
# Partial unhide
Animation.unfade(Animation , self.out_widget , 0.5).start()
# Partial hide while the image is being processed
self.animations = Animation.fade(Animation , self.out_widget , end=0.5)
# Call callback on finish
self.animations.finished.connect(callback)
# Start the animation
self.animations.start()
def createPixmap(self , image: Image):
if image.mode == "RGB":
r, g, b = image.split()
image = Image.merge("RGB", (b, g, r))
elif image.mode == "RGBA":
r, g, b, a = image.split()
image = Image.merge("RGBA", (b, g, r, a))
elif image.mode == "L":
image = image.convert("RGBA")
im2 = image.convert("RGBA")
data = im2.tobytes("raw", "RGBA")
qim = QImage(data, image.size[0], image.size[1], QImage.Format_ARGB32)
pixmap = QPixmap.fromImage(qim)
return pixmap
class FilterView:
def __init__(self, main_window, out_widget , scrollArea , icons , callback) -> None:
# Make args global
self.original = scrollArea.width()
self.scrollArea = scrollArea
self.out_widget = out_widget
self.popup = PopUpMessage()
self.animation = Animation()
self.imageProcessor = Filters(Image.open(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png")).convert("RGBA"))
self.callback = callback
self.edited = False
self.main_window = main_window
def partial_hide(self):
return Animation.fade(Animation , self.out_widget , 1 , 0.5)
def partial_unhide(self):
Animation.unfade(Animation , self.out_widget , 0.5).start()
def shady(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.shady())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def sepia(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.sepia())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def cherry(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.cherry())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def underwater(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.underwater())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def purple(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.purple())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def pink(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.pink())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def dark(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.dark())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def clear(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.clear())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def realistic(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.realistic())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def cool_filter(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.cool())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def clear_filter(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.clear())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def grayscale(self):
# Make edited variable true
self.edited = True
# Animation callback
def callback():
# Apply effect
self.image = ImageQt.ImageQt(self.imageProcessor.grayscale())
# Apply the updated pixmap to the render area
self.out_widget.setPixmap(QPixmap.fromImage(self.image))
self.out_widget.pixmap().save(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.partial_unhide()
# partial hide while the image is being processed
self.animation = self.partial_hide()
# Start the animation
self.animation.start()
# Callback
self.animation.finished.connect(callback)
def show_dialog(self , dialog: QDialog , parent , functions):
self.dialog = dialog
self.dialog.setParent(parent)
# Set up the dialog
layout = QVBoxLayout()
label = QLabel(text="Do you want to apply the viewed filter?")
label.setAlignment(Qt.AlignCenter)
label.setStyleSheet("""
QLabel{
font-size: 20px;
color: #D8DEE9;
font-family: "SauceCodePro Nerd Font"
}
""")
layout.addWidget(label)
self.dialog.setFixedWidth(700)
self.dialog.setFixedHeight(120)
self.buttons = QDialogButtonBox()
# Buttons
for text , func in zip([" Continue Changes" , " Discard Changes"] , functions):
button = QPushButton()
button.clicked.connect(func)
button.setText(text)
self.buttons.addButton(button , QDialogButtonBox.ActionRole)
button.setFlat(True)
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet('font-size: 20px; color: #D8DEE9')
layout.addWidget(self.buttons)
self.dialog.setLayout(layout)
self.dialog.exec_()
return self.dialog
def remove_self(self):
if(self.edited):
self.show_dialog(QDialog() , self.main_window , [self.useUpdate , self.removeUpdated])
else:
self.scrollArea.setFixedWidth(self.original)
self.callback()
def useUpdate(self):
self.dialog.hide()
self.callback()
shutil.copy(os.path.join("GalleryMan" , "assets" , "currently_edited.png") , os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png"))
self.out_widget.set_pixmap(QPixmap(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png")))
def removeUpdated(self):
self.dialog.hide()
self.out_widget.setPixmap(QPixmap(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png")))
self.callback()
os.remove(os.path.join(os.path.expanduser("~") , ".galleryman" , "data" , "processed_image.png")) |
import codecs, json
import csv, io
import string, random, secrets
from django.http import JsonResponse,HttpResponse
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from django.views.decorators.http import require_POST, require_http_methods
from django.core.exceptions import ValidationError
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.core.files.storage import FileSystemStorage
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from .forms import SignUpForm
@csrf_exempt
@require_http_methods(['POST', 'GET'])
def analyze(request):
if request.method == "GET":
return JsonResponse({
'No Data': 'Upload a CSV File'
})
else:
csv_file = request.FILES['file']
# check if it is a csv file
if not csv_file.name.endswith('.csv'):
messages.error(request, 'THIS IS NOT A CSV FILE')
randString1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 7))
randString2 = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 7))
secureStr = ''.join((secrets.choice(string.ascii_letters) for i in range(6)))
randomName= randString1 + randString2 + secureStr + '.csv'
print("Single String: ", randomName)
fname = csv_file.name
filepath = r'G:/New folder (3)/sentimentVenv/scrapy-with-django-master/media/' + randomName
fs = FileSystemStorage()
fs.save(randomName, csv_file)
reader = csv.reader(csv_file)
data_set = csv_file.read().decode('UTF-8')
print(" ::::::::::::::::::::::::::::::: HELLO ::::::::::::::::::::::")
sa_load = tf.keras.models.load_model('G:/New folder (3)/sentimentVenv/scrapy-with-django-master/boardCrawler/sentiment_analysis.hdf5', compile=False)
encoder = tfds.features.text.TokenTextEncoder.load_from_file('G:/New folder (3)/sentimentVenv/scrapy-with-django-master/boardCrawler/sa_encoder.vocab')
df = pd.read_csv(filepath,encoding='utf-8')
df.head()
df['Rating'].value_counts()
print(df['Rating'].value_counts())
print(">>>>>>>>>>>>>>>>> I am <NAME> <<<<<<<<<<<<<<<<<<<<<<<< ")
rev = df['Reviews']
def pad_to_size(vec, size):
zeros = [0] * (size -len(vec))
vec.extend(zeros)
return vec
def predict_fn_one_sentiment(pred_text):
encoded_pred_text = encoder.encode(pred_text)
print(encoded_pred_text)
encoded_pred_text = pad_to_size(encoded_pred_text,100)
print(encoded_pred_text)
encoded_pred_text = tf.cast(encoded_pred_text, tf.float32)
prediction = sa_load.predict(tf.expand_dims(encoded_pred_text, 0))
if prediction > 1:
print("Sentiment of this review is positive :)")
return prediction
if prediction >= 0 and prediction <= 2:
print("Sentiment of this review is Netural,just satisfying")
return prediction
if prediction < 1:
print("Sentiment of this product is negetive :(")
return prediction
positiveCount=0
negativeCount=0
neutralCount=0
def predict_fn(pred_text):
nonlocal positiveCount,negativeCount,neutralCount
for rev in pred_text:
#print(rev)
encoded_pred_text = encoder.encode(rev)
encoded_pred_text = pad_to_size(encoded_pred_text,300)
#print(encoded_pred_text)
encoded_pred_text = tf.cast(encoded_pred_text, tf.float32)
predictions = sa_load.predict(tf.expand_dims(encoded_pred_text, 0))
print(predictions)
if predictions > 2:
positiveCount += 1
if predictions >= 0 and predictions <= 2:
neutralCount += 1
if predictions < 0:
negativeCount += 1
return positiveCount,negativeCount,neutralCount
predictions = predict_fn(rev)
print("PREDICTION >>>...............::::: <<<<<<<<<<<<<<<< ", predictions)
# pred_text=input("Write sentense to check sentiment: ")
# prediction=predict_fn_one_sentiment(pred_text)
# print(prediction)
print(positiveCount)
print(negativeCount)
print(neutralCount)
sentimentString =""
sentimentObj={}
if positiveCount > negativeCount and positiveCount > neutralCount:
sentimentString = "Most of the users are happy with your product."
sentimentObj.update({
'comment':sentimentString,
'flag' : 1
})
print(sentimentObj)
if negativeCount > positiveCount and negativeCount> neutralCount:
sentimentString = "Most of the users are unhappy with your product"
sentimentObj.update({
'comment':sentimentString,
'flag' : -1
})
if neutralCount > positiveCount and neutralCount> negativeCount:
sentimentString = "Your product is just satisfactory"
sentimentObj.update({
'comment':sentimentString,
'flag' : 0
})
rating = df['Rating'].value_counts().to_json()
ratingCounts = json.loads(rating)
totalReviews = len(df.index)
return JsonResponse({
'posNegReviews' : {
'positive':positiveCount,
'negative': negativeCount,
'neutral' :neutralCount
},
'ratingCounts': ratingCounts,
'totalReviews' : totalReviews,
'sentiment': sentimentObj,
})
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=<PASSWORD>)
login(request, user)
return redirect('home')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
|
<filename>examples/QKD/e91.py<gh_stars>10-100
from qunetsim.backends import EQSNBackend
from qunetsim.components import Host
from qunetsim.components import Network
from qunetsim.objects import Logger
from qunetsim.objects import Qubit
import random
import numpy as np
Logger.DISABLED = True
def expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, base_alice, base_bob):
list = [0, 0, 0, 0]
for i in range(len(result_string_alice)):
if bases_string_alice[i] == base_alice and bases_string_bob[i] == base_bob:
if result_string_alice[i] == '0' and result_string_bob[i] == '0':
list[0] += 1
elif result_string_alice[i] == '0' and result_string_bob[i] == '1':
list[1] += 1
elif result_string_alice[i] == '1' and result_string_bob[i] == '0':
list[2] += 1
elif result_string_alice[i] == '1' and result_string_bob[i] == '1':
list[3] += 1
return list
def chsh(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob):
listA1B1 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a1', 'b1')
listA1B3 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a1', 'b3')
listA3B1 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a3', 'b1')
listA3B3 = expected_value(result_string_alice, result_string_bob, bases_string_alice, bases_string_bob, 'a3', 'b3')
coefA1B1 = (listA1B1[0] - listA1B1[1] - listA1B1[2] + listA1B1[3]) / sum(listA1B1)
coefA1B3 = (listA1B3[0] - listA1B3[1] - listA1B3[2] + listA1B3[3]) / sum(listA1B3)
coefA3B1 = (listA3B1[0] - listA3B1[1] - listA3B1[2] + listA3B1[3]) / sum(listA3B1)
coefA3B3 = (listA3B3[0] - listA3B3[1] - listA3B3[2] + listA3B3[3]) / sum(listA3B3)
return coefA1B1 - coefA1B3 + coefA3B1 + coefA3B3
def alice(alice, bob, number_of_entanglement_pairs):
angles = [0, np.pi/4, np.pi/2]
bases_choice = [random.randint(1,3) for i in range(number_of_entanglement_pairs)]
test_results_alice = []
test_bases_alice = []
sifted_key_alice = []
for i in range(number_of_entanglement_pairs):
qubit_a = Qubit(alice)
qubit_b = Qubit(alice)
# preparation of singlet state (1/sqrt(2))*(|01> - |10>)
qubit_a.X()
qubit_b.X()
qubit_a.H()
qubit_a.cnot(qubit_b)
print('Sending EPR pair %d' % (i + 1))
_, ack_arrived = alice.send_qubit(bob, qubit_b, await_ack=True)
if ack_arrived:
#rotate qubit and measure
base_a = bases_choice[i]
qubit_a.rz(angles[base_a - 1])
meas_a = qubit_a.measure()
ack_arrived = alice.send_classical(bob, base_a, await_ack=True)
if not ack_arrived:
print("Send data failed!")
message = alice.get_next_classical(bob, wait=2)
if message is not None:
base_b = message.content
if (base_a == 2 and base_b == 1) or (base_a == 3 and base_b == 2):
sifted_key_alice.append(meas_a)
elif (base_a == 1 and base_b == 1) or (base_a == 1 and base_b == 3) or (base_a == 3 and base_b == 1) or (base_a == 3 and base_b == 3):
test_bases_alice.append('a'+str(base_a))
test_results_alice.append(str(meas_a))
else:
print("The message did not arrive")
else:
print('The EPR pair was not properly established')
ack_arrived = alice.send_classical(bob, (test_results_alice, test_bases_alice), await_ack=True)
if not ack_arrived:
print("Send data failed!")
print("Sifted_key_alice: ", sifted_key_alice)
def bob(bob, alice, number_of_entanglement_pairs):
angles = [np.pi/4, np.pi/2, 3*(np.pi/4)]
bob_bases = [random.randint(1,3) for i in range(number_of_entanglement_pairs)]
test_result_bob = []
test_bases_bob = []
sifted_key_bob = []
for i in range(number_of_entanglement_pairs):
qubit_b = bob.get_data_qubit(alice, wait=5)
if qubit_b is not None:
base_b = bob_bases[i]
#rotate qubit and measure
qubit_b.rz(angles[base_b - 1])
meas_b = qubit_b.measure()
message = bob.get_next_classical(alice, wait=2)
if message is not None:
base_a = message.content
ack_arrived = bob.send_classical(alice, base_b, await_ack=True)
if not ack_arrived:
print("Send data failed!")
if (base_a == 2 and base_b == 1) or (base_a == 3 and base_b == 2):
sifted_key_bob.append(1 - meas_b)
elif (base_a == 1 and base_b == 1) or (base_a == 1 and base_b == 3) or (base_a == 3 and base_b == 1) or (base_a == 3 and base_b == 3):
test_bases_bob.append('b'+str(base_b))
test_result_bob.append(str(meas_b))
else:
print("Host 2 did not receive the measurement base of alice")
else:
print('Host 2 did not receive an EPR pair')
message = bob.get_next_classical(alice, wait=2)
if message is not None:
test_result_alice, test_bases_alice = message.content
print(chsh(test_result_alice, test_result_bob, test_bases_alice, test_bases_bob))
print("sifted_key_bob: ", sifted_key_bob)
else:
print("Host 2 did not receive the data to compute the chsh value")
def main():
network = Network.get_instance()
backend = EQSNBackend()
number_of_entanglement_pairs = 50
nodes = ['A', 'B']
network.start(nodes, backend)
network.delay = 0.1
host_A = Host('A', backend)
host_A.add_connection('B')
host_A.delay = 0
host_A.start()
host_B = Host('B', backend)
host_B.add_connection('A')
host_B.delay = 0
host_B.start()
network.add_host(host_A)
network.add_host(host_B)
t1 = host_A.run_protocol(alice, (host_B.host_id, number_of_entanglement_pairs))
t2 = host_B.run_protocol(bob, (host_A.host_id, number_of_entanglement_pairs))
t1.join()
t2.join()
network.stop(True)
if __name__ == '__main__':
main()
|
<filename>conanfile.py
from conans import ConanFile, CMake, tools
from conans.tools import download, unzip
import os
class RttrConan(ConanFile):
name = "rttr"
version = "0.9.6"
description = "Conan package for rttr."
url = "https://github.com/rttrorg/rttr"
license = "MIT"
settings = "arch", "build_type", "compiler", "os"
generators = "cmake"
options = {"shared": [True, False],
"build_unit_tests": [True, False],
"build_with_static_runtime_libs": [True, False],
"build_with_rtti": [True, False],
"build_benchmarks": [True, False],
"build_examples": [True, False],
"build_documentation": [True, False],
"build_installer": [True, False],
"build_package": [True, False],
"use_pch": [True, False],
"custom_doxygen_style": [True, False],
"build_website_docu": [True, False]}
default_options = "shared=True", "build_unit_tests=False", "build_with_static_runtime_libs=False", "build_with_rtti=True", "build_benchmarks=False", "build_examples=False", "build_documentation=False", "build_installer=True", "build_package=True", "use_pch=True", "custom_doxygen_style=True", "build_website_docu=False"
def source(self):
project_folder = "%s-%s" % (self.name, self.version)
zip_name = "v%s.zip" % self.version
download ("%s/archive/%s" % (self.url, zip_name), zip_name, verify=True)
unzip (zip_name)
os.unlink(zip_name)
tools.replace_in_file("%s/CMakeLists.txt" % (project_folder), '''project ("rttr" LANGUAGES CXX)''',
'''project ("rttr" LANGUAGES CXX)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.definitions["BUILD_STATIC"] = not self.options.shared
cmake.definitions["BUILD_RTTR_DYNAMIC"] = self.options.shared
cmake.definitions["BUILD_UNIT_TESTS"] = self.options.build_unit_tests
cmake.definitions["BUILD_WITH_STATIC_RUNTIME_LIBS"] = self.options.build_with_static_runtime_libs
cmake.definitions["BUILD_WITH_RTTI"] = self.options.build_with_rtti
cmake.definitions["BUILD_BENCHMARKS"] = self.options.build_benchmarks
cmake.definitions["BUILD_EXAMPLES"] = self.options.build_examples
cmake.definitions["BUILD_DOCUMENTATION"] = self.options.build_documentation
cmake.definitions["BUILD_INSTALLER"] = self.options.build_installer
cmake.definitions["BUILD_PACKAGE"] = self.options.build_package
cmake.definitions["USE_PCH"] = self.options.use_pch
cmake.definitions["CUSTOM_DOXYGEN_STYLE"] = self.options.custom_doxygen_style
cmake.definitions["BUILD_WEBSITE_DOCU"] = self.options.build_website_docu
project_folder = "%s-%s" % (self.name, self.version)
cmake.configure( source_folder="%s" % (project_folder))
cmake.build()
def package(self):
project_folder = "%s-%s" % (self.name, self.version)
include_folder = "%s/src/rttr" % (project_folder)
self.copy("*.h" , dst="include/rttr", src=include_folder)
self.copy("registration", dst="include/rttr", src=include_folder)
self.copy("type", dst="include/rttr", src=include_folder)
self.copy("*.h", dst="include/rttr", src="src/rttr")
self.copy("*.a" , dst="lib", keep_path=False)
self.copy("*.so*" , dst="lib", keep_path=False)
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
def package_info(self):
self.cpp_info.libdirs = ["lib", "bin"]
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Linux":
self.cpp_info.libs += ["dl"]
|
<reponame>ddomhoff/tmtoolkit
# -*- coding: utf-8 -*-
"""
requires "europarl_raw" corpus to be downloaded via `nltk.download()`
"""
import os
import time
import logging
from random import sample
import nltk
import lda
from tmtoolkit.corpus import Corpus
from tmtoolkit.preprocess import TMPreproc
from tmtoolkit.dtm import save_dtm_to_pickle, load_dtm_from_pickle
from tmtoolkit.lda_utils.common import print_ldamodel_topic_words, print_ldamodel_doc_topics, save_ldamodel_to_pickle
FILES = """ep-00-01-17.de
ep-00-01-18.de
ep-00-01-19.de
ep-00-01-20.de
ep-00-01-21.de
ep-00-02-02.de
ep-00-02-03.de
ep-00-02-14.de
ep-00-02-15.de
ep-00-02-16.de""".split('\n')
FILEIDS = ['german/' + f for f in FILES]
DTM_PICKLE = 'data/read_preproc_lda_de_dtm.pickle'
LDA_PICKLE = 'data/read_preproc_lda_de_lda.pickle'
logging.basicConfig(level=logging.DEBUG)
tmtoolkit_log = logging.getLogger('tmtoolkit')
tmtoolkit_log.setLevel(logging.DEBUG)
tmtoolkit_log.propagate = True
if __name__ == '__main__': # this is necessary for multiprocessing on Windows!
if os.path.exists(DTM_PICKLE):
print("loading DTM data from pickle file '%s'..." % DTM_PICKLE)
dtm, vocab, doc_labels = load_dtm_from_pickle(DTM_PICKLE)
else:
europarl = nltk.corpus.util.LazyCorpusLoader('europarl_raw',
nltk.corpus.EuroparlCorpusReader,
fileids=FILEIDS)
corpus = Corpus({f: europarl.raw(f_id) for f, f_id in zip(FILES, FILEIDS)})
print("all loaded documents:")
for dl, text in corpus.docs.items():
print("%s: %d chars" % (dl, len(text)))
print("-----")
start_time = time.time()
preproc = TMPreproc(corpus.docs, language=u'german')
print('tokenizing...')
preproc.tokenize()
print('POS tagging...')
preproc.pos_tag()
print('lemmatization...')
preproc.lemmatize()
print('lowercase transform...')
preproc.tokens_to_lowercase()
print('cleaning...')
preproc.clean_tokens()
proc_time = time.time() - start_time
print('-- processing took %f sec. so far' % proc_time)
preproc.save_state('data/read_preproc_lda_de_state.pickle')
print('token samples:')
for dl, tokens in preproc.tokens_with_pos_tags.items():
print("> %s:" % dl)
print(">>", sample(tokens, 10))
print('generating DTM...')
doc_labels, vocab, dtm = preproc.get_dtm()
print("saving DTM data to pickle file '%s'..." % DTM_PICKLE)
save_dtm_to_pickle(dtm, vocab, doc_labels, DTM_PICKLE)
print("running LDA...")
model = lda.LDA(n_topics=30, n_iter=500)
model.fit(dtm)
# print topic-word distributions with respective probabilities
print_ldamodel_topic_words(model, vocab)
# print document-topic distributions with respective probabilities
print_ldamodel_doc_topics(model, doc_labels)
print("saving LDA model to pickle file '%s'..." % LDA_PICKLE)
save_ldamodel_to_pickle(LDA_PICKLE, model, vocab, doc_labels)
print("done.")
|
# (C) Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "<NAME>, <NAME>"
from collections import deque
from six.moves import xrange, zip
from copy import deepcopy
from functools import partial
from .. import arraymath as amath
from ..time_series.time_series_model import StochasticTimeSeriesModel
from ..base.sgd import AdaGrad
DEBUG = False
def sigmoid(x):
"""
Numerically stable implementation of sigmoid
sigmoid(x) = 1 / (1 + exp(-x)
Namely,
if x >= 0:
z = exp(-x)
return 1 / (1 + z)
else:
z = exp(x)
return z / (1 + z)
"""
return amath.exp(amath.minimum(0, x)) / (1 + amath.exp(-abs(x)))
class VectorRegression(StochasticTimeSeriesModel):
"""
Vector regression for time series with unit variance
Parameters
----------
in_dim : int
dimension of input time-series
out_dim : int, optional
dimension of target time-series
order : int, optional
order of the auto-regressive model
SGD : Instance of SGD.SGD, optional
Instance of a stochastic gradient method, default to AdaGrad()
L1 : float, optional
strength of L1 regularization
L2 : float, optional
strength of L2 regularization
use_bias: boolean, optional
whether to use bias parameters
sigma : float, optional
standard deviation of initial values of weight parameters
random : arraymath.random, optional
random number generator
Attributes
----------
len_fifo : int
order
in_dim : int
in_dim
out_dim : int
out_dim
variables : dict
dictionary of model parameters
variables["W"] : array, shape (len_fifo, in_dim, out_dim)
variables["W"][l] corresponds to the weight from the input observed at
time step t - l - 1 to the mean at time step t (current time).
variables["b"] : array, shape (out_dim,)
variables["b"] corresponds to the bias to out_pattern.
fifo : deque
FIFO queue storing len_fifo in_patterns, each in_pattern has shape (in_dim,).
"""
def __init__(self, in_dim, out_dim=None, order=1, SGD=None, L1=0.,
L2=0., use_bias=True, sigma=0, random=None):
if out_dim is None:
out_dim = in_dim
self.in_dim = in_dim
self.out_dim = out_dim
self.len_fifo = order
self.init_state()
# y ~ N(mu, s)
# mu = b + sum_n W[n] x[n]
self.variables = dict()
if self.len_fifo > 0 and self.in_dim > 0 and self.out_dim > 0:
if sigma <= 0:
self.variables["W"] \
= amath.zeros((self.len_fifo, self.in_dim, self.out_dim),
dtype=float)
else:
if random is None:
random = amath.random.RandomState(0)
self.variables["W"] \
= random.normal(0, sigma,
(self.len_fifo, self.in_dim, self.out_dim))
if use_bias and self.out_dim > 0:
self.variables["b"] = amath.zeros((self.out_dim,), dtype=float)
if SGD is None:
SGD = AdaGrad()
self.SGD = SGD.set_shape(self.variables)
self.L2 = dict()
self.L1 = dict()
for key in self.variables:
self.L1[key] = L1
self.L2[key] = L2
StochasticTimeSeriesModel.__init__(self)
def init_state(self):
"""
Initializing FIFO queues
"""
self.fifo = amath.FIFO((max(0, self.len_fifo), self.in_dim))
def _update_state(self, in_pattern):
"""
Updating FIFO queue by appending in_pattern
Parameters
----------
in_pattern : array, shape (in_dim,)
in_pattern to be appended to fifo.
Returns
-------
popped_in_pattern : array, shape (in_dim,)
in_pattern popped from fifo.
"""
assert in_pattern.shape == (self.in_dim,), \
"in_pattern must have shape (in_dim,):" + str(in_pattern.shape)
if len(self.fifo) > 0:
popped_in_pattern = self.fifo.push(in_pattern)
return popped_in_pattern
if len(self.fifo) == 0:
return in_pattern
def _get_delta(self, out_pattern, expected=None, weightLL=False):
"""
Getting deltas, how much we change parameters by learning a given
out_pattern
Parameters
----------
out_pattern : array, shape (out_dim,)
out_pattern observed at the current time
expected : array, shape (out_dim,), optional
out_pattern expected by the current model.
to be computed if not given.
weightLL : boolean, optional
whether to weight the delta by log-likelihood of out_pattern
Returns
-------
dict
dictionary of deltas with name of a variable as a key
"""
assert out_pattern.shape == (self.out_dim,), \
"out_pattern must have shape (out_dim,)"
if expected is not None:
assert expected.shape == (self.out_dim,), \
"expected must have shape (out_dim,)"
if self.SGD.get_learning_rate() == 0:
return None
gradient = self._get_gradient(out_pattern, expected)
func_gradients = partial(
self._func_gradients,
order=self.len_fifo, in_dim=self.in_dim, out_dim=self.out_dim, fifo=self.fifo, out_pattern=out_pattern)
self.SGD.update_state(gradient, self.variables, func_gradients)
delta = self.SGD.get_delta()
if weightLL:
LL = self.get_LL(out_pattern)
for key in delta:
delta[key] *= LL
return delta
@staticmethod
def _func_gradients(params, order, in_dim, out_dim, fifo, out_pattern):
"""
Compute gradient with given output pattern.
Parameters
----------
params : Dictionary[str, amath.ndarray]
Dictionary of parameters
order : int
Order of regression
in_dim : int
Dimensionality of input
out_dim : int
Dimensionality of output
fifo : Iterable[amath.ndarray]
FIFO queue containing past observations
out_pattern : amath.ndarray
Expected pattern of output
Returns
-------
gradients : Dictionary[str, amath.ndarray]
Dictionary of gradients
"""
fifo_array = amath.array(fifo)
L = order
N = out_dim
mu = amath.zeros((N, ))
if "b" in params:
mu[:] = params["b"].ravel()
if L > 0:
mu += amath.tensordot(fifo_array, params["W"], axes=2)
if DEBUG:
if "b" in params:
mu_naive = deepcopy(params["b"]).ravel()
else:
mu_naive = amath.zeros((N,))
for d in xrange(L):
mu_naive = mu_naive + fifo[d].dot(params["W"][d])
assert amath.allclose(mu, mu_naive), "ERROR: mu has a bug"
expected = mu
dx = out_pattern - expected # just to avoid redundant computation
gradient = dict()
if "b" in params:
gradient["b"] = dx
if "W" in params:
gradient["W"] = fifo_array[:, :, amath.newaxis] \
* dx[np.newaxis, amath.newaxis, :]
if DEBUG:
grad_W_naive = amath.array([fifo[d].reshape((in_dim, 1)) * dx
for d in range(len(fifo))])
assert amath.allclose(gradient["W"], grad_W_naive), \
"gradient[\"W\"] has a bug. \n{}\n{}\n{}".format(
gradient["W"], grad_W_naive, fifo)
return gradient
def _get_gradient(self, out_pattern, expected=None, applyL2=True):
"""
Computing the gradient of log likelihood
Parameters
----------
out_pattern : array, shape (out_dim,)
out_pattern observed at the current time
expected : array, shape (out_dim,), optional
out_pattern expected by the current model.
to be computed if not given.
applyL2 : boolean, optional
if False, do not apply L2 regularization even if self.L2 > 0
Returns
-------
dict
dictionary of gradients with name of a variable as a key
"""
assert out_pattern.shape == (self.out_dim,), \
"out_pattern must have shape (out_dim,)"
if expected is not None:
assert expected.shape == (self.out_dim,), \
"expected must have shape (out_dim,)"
if expected is None:
expected = self._get_mean()
dx = out_pattern - expected # just to avoid redundant computation
gradient = dict()
if "b" in self.variables:
gradient["b"] = dx
if "W" in self.variables:
gradient["W"] = amath.op.mult_2d_1d_to_3d(self.fifo.to_array(), dx)
if DEBUG:
grad_W_naive = [self.fifo.to_array()[d].reshape((self.M, 1))
* dx for d in range(self.L)]
assert amath.allclose(gradient["W"], grad_W_naive), \
"gradient[\"W\"] has a bug. \n{}\n{}\n{}".format(
gradient["W"], grad_W_naive, self.fifo)
if applyL2:
self.SGD.apply_L2_regularization(gradient, self.variables, self.L2)
return gradient
def _update_parameters(self, delta):
"""
Updating parameters by delta
Parameters
----------
delta : dict
dictionary of deltas for all variables
"""
if delta is not None:
self.SGD.update_with_L1_regularization(self.variables, delta, self.L1)
def get_LL(self, out_pattern):
"""
Computing the total LL of an out_pattern
Parameters
----------
out_pattern : array, length out_dim
out_pattern observed at the current time
Returns
-------
float
total log likelihood of the out_pattern
"""
if not out_pattern.shape == (self.out_dim,):
raise ValueError("out_pattern must have shape (out_dim,)")
mu = self._get_mean()
LL = - 0.5 * (out_pattern-mu)**2 - 0.5 * amath.log(2 * amath.pi)
return amath.sum(LL)
def predict_next(self):
"""
Predicting next out_pattern with the estimated mean
Returns
-------
array, shape (out_dim, )
prediction
"""
return self._get_mean()
def _get_mean(self):
"""
Computing estimated mean
Returns
-------
array, shape (out_dim,)
estimated mean, or expected out_pattern in this case
"""
return self._get_conditional_negative_energy()
def _get_conditional_negative_energy(self):
"""
Computing the conditional negative energy given fired
Returns
-------
array, shape (out_dim,)
fundamental output
"""
mu = amath.zeros((self.out_dim, ))
if "b" in self.variables:
mu[:] = amath.array(self.variables["b"]).ravel()
if "W" in self.variables:
mu += amath.tensordot(self.fifo.to_array(),
self.variables["W"],
axes=2)
if DEBUG:
if "b" in self.variables:
mu_naive = deepcopy(self.variables["b"]).ravel()
else:
mu_naive = amath.zeros((self.out_dim,))
for d in xrange(self.len_fifo):
mu_naive = mu_naive + self.fifo.to_array()[d].dot(
self.variables["W"][d])
assert amath.allclose(mu, mu_naive), "ERROR: mu has a bug"
return mu
def set_learning_rate(self, rate):
"""
Setting the learning rate
Parameters
----------
rate : float
learning rate
"""
self.SGD.set_learning_rate(rate)
def _get_sample(self):
"""
getting the next sample
Returns
-------
array, shape (out_dim,)
mu + n, where n is sampled from the standard normal distribution.
"""
mu = self._get_mean()
sample = self.random.normal(mu)
return sample
def get_sparsity(self, exclude=[]):
"""
getting the sparsity of variables
Parameters
----------
exclude : list, optional
list of the name of variables that should not be considered
Returns
-------
float
fraction of variables that are zeros
"""
nnz = 0 # number of nonzero elements
nz = 0 # number of zero elements
for key in self.variables:
if key in exclude:
continue
nnz += amath.sum(self.variables[key] != 0)
nz += amath.sum(self.variables[key] == 0)
sparsity = float(nz) / (nnz + nz)
return sparsity
def get_input_dimension(self):
"""
Getting the dimension of the input sequence
Returns
-------
in_dim : int
dimension of input sequence
"""
return self.in_dim
def get_target_dimension(self):
"""
Getting the dimension of the target sequence
Returns
-------
out_dim : int
dimension of target sequence
"""
return self.out_dim
class MultiTargetVectorRegression(StochasticTimeSeriesModel):
"""
Vector regression for multiple target time series with unit variance
Parameters
----------
in_dim : int
dimension of input time-series
out_dim : list of int, optional
list of dimensions of target time-series
order : int, optional
order of the auto-regressive model
SGDs : list of SGD
list of objects of stochastic gradient method
order : int, optional
order of the auto-regressive model
L1 : float, optional
strength of L1 regularization
L2 : float, optional
strength of L2 regularization
use_bias: boolean, optional
whether to use bias parameters
Attributes
----------
layers : list of VectorRegression
"""
def __init__(self, in_dim, out_dims, SGDs, order=1, L1=0., L2=0.,
use_bias=True):
self.layers = [VectorRegression(in_dim, out_dim, order, SGD, L1, L2,
use_bias)
for (out_dim, SGD) in zip(out_dims, SGDs)]
# Only layer 0 has the internal states, which are shared among
# all layers
for i in xrange(1, len(self.layers)):
self.layers[i].fifo = self.layers[0].fifo
StochasticTimeSeriesModel.__init__(self)
def init_state(self):
"""
Initializing FIFO queues
"""
self.layers[0].init_state()
def _update_state(self, in_pattern):
"""
Updating FIFO queue by appending in_pattern
Parameters
----------
in_pattern : array, shape (in_dim,)
in_pattern to be appended to fifo.
Returns
-------
popped_in_pattern : array, shape (in_dim,)
in_pattern popped from fifo.
"""
return self.layers[0]._update_state(in_pattern)
def _get_delta(self, out_patterns, expecteds=None, weightLLs=None):
"""
Getting deltas, how much we change parameters by learning a given
out_pattern
Parameters
----------
out_patterns : list, length len(layers)
out_patterns[l] : array, shape (layers[l].out_dim,)
out_pattern of layer l observed at the current time
expecteds : list
expecteds[l] : array, shape (layers[l].out_dim,), optional
out_pattern of layer l expected by the current model.
to be computed if not given.
weightLLs : list, optional
weightLLs[l] : whether to weight delta by log likelihood of
out_patterns[l]
Returns
-------
list of dict
list of dictionary of deltas with name of a variable as a key
"""
assert len(out_patterns) == len(self.layers), \
"length of out_patterns must match number of layers"
if expecteds is None:
expecteds = [None] * len(self.layers)
if weightLLs is None:
weightLLs = [False] * len(self.layers)
assert len(expecteds) == len(self.layers), \
"length of expected must match number of layers"
return [layer._get_delta(out_pattern, expected, weightLL)
for (layer, out_pattern, expected, weightLL)
in zip(self.layers, out_patterns, expecteds, weightLLs)]
def _get_gradient(self, out_patterns, expecteds=None):
"""
Computing the gradient of log likelihood
Parameters
----------
out_patterns : list, length len(layers)
out_patterns[l] : array, shape (layers[l].out_dim,)
out_pattern of layer l observed at the current time
expecteds : list
expecteds[l] : array, shape (layers[l].out_dim,), optional
out_pattern of layer l expected by the current model.
to be computed if not given.
Returns
-------
list of dict
list of dictionary of gradients with name of a variable as a key
"""
assert len(out_patterns) == len(self.layers), \
"length of out_patterns must match number of layers"
if expecteds is not None:
assert len(expecteds) == len(self.laners), \
"length of expected must match number of layers"
return [layer._get_gradient(out_pattern, expected)
for (layer, out_pattern, expected)
in zip(self.layers, out_patterns, expecteds)]
def _get_gradient_for_layer(self, out_pattern, layer, expected):
"""
Computing the gradient of log likelihood
Parameters
----------
out_pattern : array, shape (layers[layer].out_dim,)
out_pattern of the layer observed at the current time
layer : int
index of the layer
expected : array, shape (layers[layer].out_dim)
expected pattern of the layer
Returns
-------
list of dict
list of dictionary of gradients with name of a variable as a key
"""
return self.layers[layer]._get_gradient(out_pattern, expected)
def _update_parameters(self, deltas):
"""
Updating parameters by deltas
Parameters
----------
delta : list of dict
list of dictionary of deltas for all variables
"""
assert len(deltas) == len(self.layers), \
"length of deltas must match number of layers"
if deltas is not None:
for (layer, delta) in zip(self.layers, deltas):
layer._update_parameters(delta)
def get_LL(self, out_patterns):
"""
Computing the total LL of an out_pattern
Parameters
----------
out_patterns : list, length len(layers)
out_patterns[l] : array, shape (layers[l].out_dim,)
out_pattern of layer l observed at the current time
Returns
-------
list of float
list of total log likelihood of the out_pattern
"""
if not len(out_patterns) == len(self.layers):
raise ValueError("length of out_patterns must match number of "
"layers")
return [layer.get_LL(out_pattern)
for (layer, out_pattern) in zip(self.layers, out_patterns)]
def predict_next(self):
"""
Predicting next out_pattern with the estimated mean
Returns
-------
list of array, length len(layers)
list of prediction
"""
return [layer._get_mean() for layer in self.layers]
def _get_mean(self):
"""
Computing estimated mean
Returns
-------
mu : list of array, length len(layers)
list of estimated mean
"""
return [layer._get_mean() for layer in self.layers]
def _get_conditional_negative_energy(self):
"""
Computing the conditional negative energy given fired
Returns
-------
list of array, length len(layers)
list of fundamental output
"""
return [layer._get_conditional_negative_energy()
for layer in self.layers]
def set_learning_rate(self, rates):
"""
Setting the learning rate
Parameters
----------
rate : list of float, length len(layers)
list of learning rate
"""
for (layer, rate) in zip(self.layers, rates):
layer.set_learning_rate(rate)
def get_sparsity(self, excludes=[]):
"""
getting the sparsity of variables
Parameters
----------
excludes : list, optional
list of the name of variables that should not be considered
Returns
-------
list of float
list of fraction of variables that are zeros
"""
return [layer.get_sparsity(excludes) for layer in self.layers]
def get_input_dimension(self):
"""
Getting the dimension of input sequence
Returns
-------
in_dim : int
dimension of input sequence
"""
return self.layers[0].get_input_dimension()
def get_target_dimension(self):
"""
Getting the dimension of target sequence
Returns
-------
out_dim : int
dimension of target sequence
"""
return [layer.get_target_dimension() for layer in self.layers]
def _get_sample(self):
"""
Getting samples from each layer
Returns
-------
list of arrays, length len(layers)
"""
return [layer._get_sample() for layer in self.layers]
class VectorRegressionWithVariance(VectorRegression):
"""
Vector regression for time series.
The variance is also a model parameter to be estimated.
Parameters
----------
in_dim : int
dimension of input time-series
out_dim : int, optional
dimension of target time-series
order : int, optional
order of the auto-regressive model
SGD : object of SGD.SGD, optional
object of a stochastic gradient method, default to AdaGrad()
L1 : float, optional
strength of L1 regularization
L2 : float, optional
strength of L2 regularization
Attributes
----------
len_fifo : int
order
in_dim : int
in_dim
out_dim : int
out_dim
variables : dict
dictionary of model parameters
variables["W"] : array, shape (len_fifo, in_dim, out_dim)
variables["W"][l] corresponds to the weight from the input observed at
time step t - l - 1 to the mean at time step t (current time).
variables["b"] : array, shape (1, out_dim)
variables["b"] corresponds to the bias to out_pattern.
variables["s"] : array, shape (1, out_dim)
variables["s"][n] corresponds to the standard deviation of
out_pattern[n] (or scale parameter in other words)
fifo : deque
FIFO queue storing len_fifo in_patterns, each in_pattern has shape (in_dim,).
"""
def __init__(self, in_dim, out_dim=None, order=1, SGD=None, L1=0.,
L2=0.):
VectorRegression.__init__(self, in_dim, out_dim, order)
self.variables["s"] = amath.ones((1, self.out_dim), dtype=float)
if SGD is None:
SGD = AdaGrad()
self.SGD = SGD.set_shape(self.variables)
for key in self.variables:
self.L1[key] = L1
self.L2[key] = L2
self.L1["s"] = 0.
self.L2["s"] = 0.
def _get_gradient(self, out_pattern, expected=None):
"""
Computing the gradient of log likelihood
Parameters
----------
out_pattern : array, length out_dim
out_pattern observed at the current time
expected : array, length out_dim, optional
out_pattern expected by the current model.
to be computed if not given.
Returns
-------
dict
dictionary of gradients with name of a variable as a key
"""
assert out_pattern.shape == (self.out_dim,), \
"out_pattern must have shape (out_dim,)"
if expected is None:
expected = self._get_mean()
gradient = VectorRegression._get_gradient(self, out_pattern,
expected, False)
x = out_pattern.reshape((1, self.out_dim))
NATURAL_GRADIENT = True
if NATURAL_GRADIENT:
dxdx = (x - expected)**2
gradient["s"] \
= 0.5 * dxdx / self.variables["s"] \
- 0.5 * self.variables["s"]
# standard deviation "s" can be replaced with variance or precision
# gradient["variance"] = dxdx - self.variables["variance"]
# gradient["precision"] = self.variables["precision"] \
# - dxdx * self.variables["precision"]**2
else:
if "b" in self.variables:
gradient["b"] = amath.op.divide_by_pow(gradient["b"],
self.variables["s"],
2)
gradient["s"] = amath.op.vecreg_gradient_s(self.variables["s"],
out_pattern,
expected)
if self.len_fifo > 0:
gradient["W"] = amath.op.divide_3d_by_1d_pow(
gradient["W"], self.variables["s"], 2)
self.SGD.apply_L2_regularization(gradient, self.variables, self.L2)
return gradient
def _update_parameters(self, delta):
"""
Updating parameters by delta
Parameters
----------
delta : dict
dictionary of arraymath array of amount of changes to the variables
"""
if delta is not None:
VectorRegression._update_parameters(self, delta)
def get_LL(self, out_pattern):
"""
Computing the total LL of an out_pattern
Parameters
----------
out_pattern : array, length out_dim
out_pattern observed at the current time
Returns
-------
float
total log likelihood
"""
if not out_pattern.shape == (self.out_dim,):
raise ValueError("out_pattern must have shape (out_dim,)")
x = out_pattern.reshape((1, self.out_dim))
mu = self._get_mean().reshape((1, self.out_dim))
s = self.variables["s"]
LL = - 0.5 * (x - mu)**2 / s**2 - 0.5 * amath.log(2 * s**2 * amath.pi)
return amath.sum(LL)
def _get_sample(self):
"""
Getting the next sample
Returns
-------
array, shape (out_dim)
mu + n, where n ~ N(0, variables["s"] ** 2)
"""
mu = self._get_mean().reshape((1, self.out_dim))
sigma = self.variables["s"]
sample = self.random.normal(mu, sigma)
sample = sample.reshape(self.out_dim)
return sample
class VectorLogisticRegression(VectorRegression):
"""
Vector logistic regression for time series.
out_pattern ~ Bern(sigmoid(mu)), where mu is updated according to the same
rule as VectorRegression.
Parameters
----------
in_dim : int
dimension of input time-series
out_dim : int, optional
dimension of target time-series
order : int, optional
order of the auto-regressive model
SGD : object of SGD.SGD, optional
object of a stochastic gradient method
L1 : float, optional
strength of L1 regularization
L2 : float, optional
strength of L2 regularization
Attributes
----------
len_fifo : int
order
in_dim : int
in_dim
out_dim : int
out_dim
variables : dict
dictionary of model parameters
variables["W"] : array, shape (len_fifo, in_dim, out_dim)
variables["W"][l] corresponds to the weight from the input observed at
time step t - l - 1 to mu at time step t (current time).
variables["b"] : array, shape (out_dim,)
variables["b"] corresponds to the bias to mu.
fifo : deque
FIFO queue storing len_fifo in_patterns, each in_pattern has shape (in_dim,).
"""
def predict_next(self):
"""
Predicting next out_pattern with the estimated mean
(equivalently, firing probabilities)
Returns
-------
array, shape (out_dim,)
prediction
"""
return self._get_mean()
def get_LL(self, out_pattern):
"""
Computing the total LL of an out_pattern
Parameters
----------
out_pattern : array, length out_dim
out_pattern observed at the current time
Returns
-------
float
total log likelihood
"""
if not out_pattern.shape == (self.out_dim,):
raise ValueError("out_pattern must have shape (out_dim,)")
mu = self._get_conditional_negative_energy()
LL = -mu * out_pattern - amath.log(1. + amath.exp(-mu))
return amath.sum(LL)
def _get_mean(self):
"""
Computing estimated mean
Returns
-------
array, shape (out_dim,)
estimated mean
"""
mu = self._get_conditional_negative_energy()
return sigmoid(mu)
def _get_sample(self):
"""
Getting the next sample
Returns
-------
array, shape (out_dim,)
sampled from Bernouilli distributions with the estimated means
"""
p = self._get_mean()
u = self.random.random_sample(p.shape)
sample = u < p
return sample
class VectorRegressionWithHidden(VectorRegression):
"""
Vector regression with one hidden layer
Parameters
----------
in_dim : int
dimension of input time-series
out_dim : int, optional
dimension of target time-series
dim_hidden : int, optional
dimension of a hidden layer
order : int, optional
order of the auto-regressive model. order >= 1.
SGD : object of SGD.SGD, optional
object of a stochastic gradient method, default to AdaGrad()
L1 : float, optional
strength of L1 regularization
L2 : float, optional
strength of L2 regularization
use_bias: boolean, optional
whether to use bias parameters
sigma : float, optional
standard deviation of initial values of weight parameters
random : arraymath.random, optional
random number generator
Attributes
----------
len_fifo : int
order
in_dim : int
in_dim
out_dim : int
out_dim
dim_hidden : int
dimension of a hidden layer
variables : dict
dictionary of model parameters
variables["W"] : array, shape (len_fifo, in_dim, out_dim)
variables["W"][l] corresponds to the weight from the input observed at
time step t - l - 1 to the mean at time step t (current time).
variables["U"] : array, shape (len_fifo, in_dim, dim_hidden)
variables["U"][l] corresponds to the weight from the input observed at
time step t - l - 1 to the hidden layer at time step t - 1.
variables["V"] : array, shape (dim_hidden, out_dim)
variables["V"] corresponds to the weight from the hidden variables at
time step t - 1 to the output layer at time step t (current time).
variables["b"] : array, shape (out_dim,)
variables["b"] corresponds to the bias to out_pattern.
variables["b_h"] : array, shape (dim_hidden,)
variables["b_h"] corresponds to the bias to the hidden layer.
fifo : deque
FIFO queue storing len_fifo in_patterns, each in_pattern has shape (in_dim,).
"""
def __init__(self, in_dim, out_dim=None, dim_hidden=1, order=1,
SGD=None, L1=0., L2=0., use_bias=True, sigma=0, random=None):
if not order >= 1:
raise ValueError("order must satisfy `order >= 1`.")
super(VectorRegressionWithHidden, self).__init__(in_dim, out_dim,
order, SGD, L1, L2,
use_bias, sigma)
self.dim_hidden = dim_hidden
if sigma <= 0:
self.variables["b_h"] = amath.zeros((self.dim_hidden,),
dtype=float)
self.variables["U"] = amath.zeros(
(self.len_fifo, self.in_dim, self.dim_hidden),
dtype=float)
self.variables["V"] = amath.zeros((self.dim_hidden, self.out_dim),
dtype=float)
else:
if random is None:
random = amath.random.RandomState(0)
self.variables["b_h"] = random.normal(0, sigma, (self.dim_hidden,))
self.variables["U"] \
= random.normal(0, sigma, (self.len_fifo, self.in_dim, self.dim_hidden))
self.variables["V"] \
= random.normal(0, sigma, (self.dim_hidden, self.out_dim))
if SGD is None:
SGD = AdaGrad()
self.SGD = SGD.set_shape(self.variables)
self.L2 = dict()
self.L1 = dict()
for key in self.variables:
self.L1[key] = L1
self.L2[key] = L2
def get_LL(self, out_pattern):
""" get the lower-bound of the log-likelihood
Parameters
----------
out_pattern : array, shape (out_dim,)
out_pattern observed at the current time
Returns
-------
float
the lower-bound of the log-likelihood
"""
return self._get_obj(out_pattern)
def _get_obj(self, out_pattern):
""" compute the lower-bound of the log-likelihood.
Parameters
----------
out_pattern : array, shape (out_dim,)
out_pattern observed at the current time
Returns
-------
float
the lower-bound of the log-likelihood
"""
x_tilde = self._get_x_tilde(out_pattern)
sig_u_tilde = amath.exp(self._get_u_tilde(log_sigmoid=True))
V_times_sig_u_tilde = sig_u_tilde.dot(self.variables["V"])
obj = - 0.5 * self.out_dim * amath.log(2.0 * amath.pi) \
- 0.5 * amath.inner(x_tilde, x_tilde)
obj = obj + amath.inner(x_tilde, V_times_sig_u_tilde)
obj = obj - 0.5 * amath.inner(V_times_sig_u_tilde, V_times_sig_u_tilde)
obj = obj - 0.5 * amath.inner(sig_u_tilde,
(1.0 - sig_u_tilde)
* amath.diag(self.variables["V"].dot(
self.variables["V"].transpose())))
return obj
def _get_gradient(self, out_pattern, expected=None):
""" compute the gradient of the lower boudn with respect to each
parameter
Parameters
----------
out_pattern : array, shape (out_dim,)
out_pattern observed at the current time
expected : array, shape (out_dim,)
expected pattern
Returns
-------
dict
directory of gradients with name of a variable as a key
"""
assert out_pattern.shape == (self.out_dim,), \
"out_pattern must have shape ({},), not {}.".format(
self.out_dim, out_pattern.shape)
x_tilde = self._get_x_tilde(out_pattern)
u_tilde = self._get_u_tilde(log_sigmoid=False)
sig_u_tilde = amath.exp(self._get_u_tilde(log_sigmoid=True))
V_times_sig_u_tilde = sig_u_tilde.dot(self.variables["V"])
mu = self._get_mean()
gradient = dict()
gradient["b"] = out_pattern - mu
gradient["W"] \
= self.fifo.to_array()[:, :, amath.newaxis] \
* gradient["b"][amath.newaxis, amath.newaxis, :]
if DEBUG:
grad_W_naive = amath.array([amath.outer(self.fifo.to_array()[d],
gradient["b"])
for d in xrange(self.len_fifo)])
assert amath.allclose(gradient["W"], grad_W_naive), \
"gradient[\"W\"] has a bug."
gradient["V"] \
= amath.outer(sig_u_tilde, x_tilde) \
- (amath.outer(sig_u_tilde, sig_u_tilde)
+ amath.diag(sig_u_tilde
* (1.0 - sig_u_tilde))).dot(self.variables["V"])
grad_u_tilde \
= sig_u_tilde * (1.0 - sig_u_tilde) \
* (self.variables["V"].dot(x_tilde - sig_u_tilde.dot(self.variables["V"]))
+ amath.diag(self.variables["V"].dot(
self.variables["V"].transpose()))
* (sig_u_tilde - 0.5))
gradient["b_h"] = grad_u_tilde
gradient["U"] \
= self.fifo.to_array()[:, :, amath.newaxis] \
* grad_u_tilde[amath.newaxis, amath.newaxis, :]
if DEBUG:
grad_U_naive = amath.array([amath.outer(self.fifo.to_array()[d],
grad_u_tilde)
for d in xrange(self.len_fifo)])
assert amath.allclose(gradient["U"], grad_U_naive), \
"gradient[\"U\"] has a bug."
return gradient
def init_state(self):
""" init fifo
"""
super(VectorRegressionWithHidden, self).init_state()
def _update_state(self, in_pattern):
"""
Updating FIFO queue by appending in_pattern
Parameters
----------
in_pattern : array, shape (in_dim,)
in_pattern to be appended to fifo.
Returns
-------
popped_in_pattern : array, shape (in_dim,)
in_pattern popped from fifo.
"""
popped_in_pattern \
= super(VectorRegressionWithHidden, self)._update_state(in_pattern)
return popped_in_pattern
def _get_conditional_negative_energy(self):
""" compute mu, which can be used for prediction of the next pattern.
Returns
-------
array, shape (out_dim,)
mu, the mean of the output layer
"""
mu = amath.zeros((self.out_dim, ))
if "b" in self.variables:
mu[:] = self.variables["b"]
mu += amath.tensordot(self.fifo.to_array(), self.variables["W"], axes=2)
if DEBUG:
if "b" in self.variables:
mu_naive = deepcopy(self.variables["b"])
else:
mu_naive = amath.zeros((self.out_dim,))
for d in xrange(self.len_fifo):
mu_naive = mu_naive + self.fifo.to_array()[d].dot(
self.variables["W"][d])
assert amath.allclose(mu, mu_naive), "ERROR: mu has a bug"
sig_u_tilde = amath.exp(self._get_u_tilde(log_sigmoid=True))
mu = mu + sig_u_tilde.dot(self.variables["V"])
return mu
def _get_u_tilde(self, log_sigmoid=False):
""" Compute u_tilde, which determines the energy of the hidden
variables. The energy is defined as the inner product of hidden
variables and u_tilde.
Parameters
----------
log_sigmoid : bool
if True, return log_sigmoid(u_tilde)
Returns
-------
array, shape (dim_hidden,)
u_tilde
"""
u_tilde = amath.zeros(self.dim_hidden)
u_tilde += self.variables["b_h"]
u_tilde += amath.tensordot(self.fifo.to_array(),
self.variables["U"], axes=2)
if DEBUG:
u_tilde_naive = amath.zeros(self.dim_hidden)
u_tilde_naive = u_tilde_naive + self.variables["b_h"]
for d in xrange(self.len_fifo):
u_tilde_naive = u_tilde_naive \
+ self.fifo.to_array()[d].dot(self.variables["U"][d])
assert amath.allclose(u_tilde, u_tilde_naive), \
"ERROR: u_tilde has a bug."
if log_sigmoid:
ll = amath.log_logistic(u_tilde)
u_tilde = amath.array(ll).reshape(u_tilde.shape)
return u_tilde
def _get_x_tilde(self, out_pattern):
""" Compute x_tilde, the difference between out_pattern and mu
(without hidden units)
Parameters
----------
out_pattern : array, shape (out_dim,)
Returns
-------
array, shape (out_dim,)
x[t] - b - sum_d W[d] x[t-d]
"""
assert out_pattern.shape == (self.out_dim,), \
"out_pattern must have shape ({},), not {}.".format(
self.out_dim, out_pattern.shape)
x_tilde = amath.zeros(self.out_dim)
x_tilde += out_pattern
if "b" in self.variables:
x_tilde -= self.variables["b"]
if DEBUG:
x_tilde_naive = deepcopy(x_tilde)
for d in xrange(self.len_fifo):
x_tilde_naive = x_tilde_naive \
- self.fifo.to_array()[d].dot(self.variables["W"][d])
x_tilde -= amath.tensordot(self.fifo.to_array(),
self.variables["W"], axes=2)
if DEBUG:
assert amath.allclose(x_tilde, x_tilde_naive), \
"ERROR: x_tilde has a bug."
return x_tilde
def get_sparsity(self, exclude=[]):
# TODO: implement get_sparsity
raise NotImplementedError("get_sparsity not implemented for VectorRegressionWithHidden")
|
<reponame>Acidburn0zzz/luci
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""API for interacting with the ResultDB service.
Requires `rdb` command in `$PATH`:
https://godoc.org/go.chromium.org/luci/resultdb/cmd/rdb
"""
from google.protobuf import json_format
from recipe_engine import recipe_api
from PB.go.chromium.org.luci.resultdb.proto.v1 import recorder
from . import common
class ResultDBAPI(recipe_api.RecipeApi):
"""A module for interacting with ResultDB."""
# Maximum number of requests in a batch RPC.
_BATCH_SIZE = 500
# Prefix of an invocation name.
_INVOCATION_NAME_PREFIX = 'invocations/'
# Expose serialize and deserialize functions.
serialize = staticmethod(common.serialize)
deserialize = staticmethod(common.deserialize)
Invocation = common.Invocation
@property
def current_invocation(self):
return self.m.buildbucket.build.infra.resultdb.invocation
@property
def enabled(self):
return self.current_invocation != ''
def assert_enabled(self):
assert self.enabled, (
'ResultDB integration was not enabled for this build. '
'See go/lucicfg#luci.builder and go/lucicfg#resultdb.settings'
)
def include_invocations(self, invocations, step_name=None):
"""Shortcut for resultdb.update_included_invocations()."""
return self.update_included_invocations(
add_invocations=invocations, step_name=step_name)
def exclude_invocations(self, invocations, step_name=None):
"""Shortcut for resultdb.update_included_invocations()."""
return self.update_included_invocations(
remove_invocations=invocations, step_name=step_name)
def update_included_invocations(self,
add_invocations=None,
remove_invocations=None,
step_name=None):
"""Add and/or remove included invocations to/from the current invocation.
Args:
add_invocations (list of str): invocation id's to add to the current
invocation.
remove_invocations (list of str): invocation id's to remove from the
current invocation.
This updates the inclusions of the current invocation specified in the
LUCI_CONTEXT.
"""
self.assert_enabled()
if not (add_invocations or remove_invocations):
# Nothing to do.
return
names = lambda ids: ['invocations/%s' % id for id in ids or []]
req = recorder.UpdateIncludedInvocationsRequest(
including_invocation=self.current_invocation,
add_invocations=names(add_invocations),
remove_invocations=names(remove_invocations),
)
self._rpc(
step_name or 'resultdb.update_included_invocations',
'luci.resultdb.v1.Recorder',
'UpdateIncludedInvocations',
json_format.MessageToDict(req),
include_update_token=True,
step_test_data=lambda: self.m.raw_io.test_api.stream_output('{}'))
def exonerate(self, test_exonerations, step_name=None):
"""Exonerates test variants in the current invocation.
Args:
test_exonerations (list): A list of test_result_pb2.TestExoneration.
step_name (str): name of the step.
"""
def args(test_exonerations, step_name):
req = recorder.BatchCreateTestExonerationsRequest(
invocation=self.current_invocation,
request_id=self.m.uuid.random(),
)
for te in test_exonerations:
req.requests.add(test_exoneration=te)
return [
step_name, 'luci.resultdb.v1.Recorder', 'BatchCreateTestExonerations',
json_format.MessageToDict(req),
True, lambda: self.m.raw_io.test_api.stream_output('{}')
]
if not test_exonerations:
return
self.assert_enabled()
step_name = step_name or 'resultdb.exonerate'
if len(test_exonerations) <= self._BATCH_SIZE:
self._rpc(*args(test_exonerations, step_name))
return
# Sends requests in batches.
remaining = test_exonerations
i = 0
with self.m.step.nest(step_name):
while remaining:
batch = remaining[:self._BATCH_SIZE]
remaining = remaining[self._BATCH_SIZE:]
self.m.futures.spawn(self._rpc, *args(batch, 'batch (%d)' % i))
i += 1
def invocation_ids(self, inv_names):
"""Returns invocation ids by parsing invocation names.
Args:
inv_names (list of str): resultdb invocation names.
Returns:
A list of invocation_ids.
"""
assert all(isinstance(name, str) for name in inv_names), inv_names
assert all(name.startswith(
self._INVOCATION_NAME_PREFIX) for name in inv_names), inv_names
return [name[len(self._INVOCATION_NAME_PREFIX):] for name in inv_names]
def query(self,
inv_ids,
variants_with_unexpected_results=False,
limit=None,
step_name=None):
"""Returns test results in the invocations.
Most users will be interested only in results of test variants that had
unexpected results. This can be achieved by passing
variants_with_unexpected_results=True. This significantly reduces output
size and latency.
Example:
results = api.resultdb.query(
[
# invocation id for a swarming task.
'task-chromium-swarm.appspot.com-deadbeef',
# invocation id for a buildbucket build.
'build-234298374982'
],
variants_with_unexpected_results=True,
)
Args:
inv_ids (list of str): ids of the invocations.
variants_with_unexpected_results (bool): if True, return only test
results from variants that have unexpected results.
limit (int): maximum number of test results to return.
Defaults to 1000.
step_name (str): name of the step.
Returns:
A dict {invocation_id: api.Invocation}.
"""
assert len(inv_ids) > 0
assert all(isinstance(id, str) for id in inv_ids), inv_ids
assert limit is None or limit >= 0
limit = limit or 1000
args = [
'-json',
'-n', str(limit),
]
if variants_with_unexpected_results:
args += ['-u']
args += list(inv_ids)
step_res = self._run_rdb(
subcommand='query',
args=args,
step_name=step_name,
stdout=self.m.raw_io.output(add_output_log=True),
step_test_data=lambda: self.m.raw_io.test_api.stream_output(''),
)
return common.deserialize(step_res.stdout)
##############################################################################
# Implementation details.
def _rpc(self,
step_name,
service,
method,
req,
include_update_token=False,
step_test_data=None):
"""Makes a ResultDB RPC.
Args:
step_name (str): name of the step.
service (string): the full name of a service, e.g.
"luci.resultdb.v1.ResultDB".
method (string): the name of the method, e.g. "GetInvocation".
req (dict): request message.
include_update_token (bool): A flag to indicate if the RPC requires the
update token of the invocation.
Returns:
A dict representation of the response message.
"""
args = [service, method]
if include_update_token:
args.append('-include-update-token')
step_res = self._run_rdb(
subcommand='rpc',
step_name=step_name,
args=args,
stdin=self.m.json.input(req),
stdout=self.m.json.output(),
step_test_data=step_test_data,
)
return step_res.stdout
def _run_rdb(self,
subcommand,
step_name=None,
args=None,
stdin=None,
stdout=None,
step_test_data=None,
timeout=None):
"""Runs rdb tool."""
cmdline = ['rdb', subcommand] + (args or [])
return self.m.step(
step_name or ('rdb ' + subcommand),
cmdline,
infra_step=True,
stdin=stdin,
stdout=stdout,
step_test_data=step_test_data,
timeout=timeout,
)
def wrap(self,
cmd,
test_id_prefix='',
base_variant=None,
test_location_base='',
base_tags=None,
coerce_negative_duration=False,
include=False,
realm='',
location_tags_file='',
require_build_inv=True,
):
"""Wraps the command with ResultSink.
Returns a command that, when executed, runs cmd in a go/result-sink
environment. For example:
api.step('test', api.resultdb.wrap(['./my_test']))
Args:
cmd (list of strings): the command line to run.
test_id_prefix (str): a prefix to prepend to test IDs of test results
reported by cmd.
base_variant (dict): variant key-value pairs to attach to all test results
reported by cmd. If both base_variant and a reported variant have a
value for the same key, the reported one wins.
Example:
base_variant={
'bucket': api.buildbucket.build.builder.bucket,
'builder': api.buildbucket.builder_name,
}
test_location_base (str): the base path to prepend to the test location
file name with a relative path. The value must start with "//".
base_tags (list of (string, string)): tags to attach to all test results
reported by cmd. Each element is a tuple of (key, value), and a key
may be repeated.
coerce_negative_duration (bool): If true, negative duration values will
be coerced to 0. If false, tests results with negative duration values
will be rejected with an error.
include (bool): If true, a new invocation will be created and included
in the parent invocation.
realm (str): realm used for the new invocation created if `include=True`.
Default is the current realm used in buildbucket.
location_tags_file (str): path to the file that contains test location
tags in JSON format.
require_build_inv(bool): flag to control if the build is required to have
an invocation.
"""
if require_build_inv:
self.assert_enabled()
assert isinstance(test_id_prefix, (type(None), str)), test_id_prefix
assert isinstance(base_variant, (type(None), dict)), base_variant
assert isinstance(cmd, (tuple, list)), cmd
assert isinstance(test_location_base, (type(None), str)), test_location_base
assert not test_location_base or test_location_base.startswith(
'//'), test_location_base
assert isinstance(base_tags, (type(None), list)), base_tags
assert isinstance(coerce_negative_duration, bool), coerce_negative_duration
assert isinstance(include, bool), include
assert isinstance(realm, (type(None), str)), realm
assert isinstance(location_tags_file, (type(None), str)), location_tags_file
ret = ['rdb', 'stream']
if test_id_prefix:
ret += ['-test-id-prefix', test_id_prefix]
for k, v in sorted((base_variant or {}).iteritems()):
ret += ['-var', '%s:%s' % (k, v)]
if test_location_base:
ret += ['-test-location-base', test_location_base]
for k, v in sorted(base_tags or []):
ret += ['-tag', '%s:%s' % (k, v)]
if coerce_negative_duration:
ret += ['-coerce-negative-duration']
if include:
ret += [
'-new', '-realm', realm or self.m.buildbucket.builder_realm,
'-include'
]
if location_tags_file:
ret += ['-location-tags-file', location_tags_file]
ret += ['--'] + list(cmd)
return ret
def config_test_presentation(self, column_keys=(), grouping_keys=('status',)):
"""Specifies how the test results should be rendered.
Args:
column_keys:
A list of keys that will be rendered as 'columns'. status is always the
first column and name is always the last column (you don't need to
specify them). A key must be one of the following:
1. 'v.{variant_key}': variant.def[variant_key] of the test variant (e.g.
v.gpu).
grouping_keys:
A list of keys that will be used for grouping tests. A key must be one
of the following:
1. 'status': status of the test variant.
2. 'name': name of the test variant.
3. 'v.{variant_key}': variant.def[variant_key] of the test variant (e.g.
v.gpu).
Caveat: test variants with only expected results are not affected by
this setting and are always in their own group.
"""
# To be consistent with the lucicfg implementation, set the test
# presentation config only when it's not the default value.
if list(column_keys) == [] and list(grouping_keys) == ['status']:
return
# Validate column_keys.
for k in column_keys:
assert k.startswith('v.')
# Validate grouping_keys.
for k in grouping_keys:
assert k in ['status', 'name'] or k.startswith('v.')
# The fact that it sets a property value is an implementation detail.
res = self.m.step('set test presentation config', cmd=None)
prop_name = '$recipe_engine/resultdb/test_presentation'
res.presentation.properties[prop_name] = {
'column_keys': column_keys,
'grouping_keys': grouping_keys,
}
|
##
# <NAME>
# SoundBoard runs on an OSX machine and plays sounds from the command line
# when told to by HTTP requests. Multiple clients can control it and
# play sound effects through the included HTML interface.
# V 2.0
##
import web
import os
import sys
if sys.version_info < (2,6):
import simplejson as json
else:
import json
localPath = os.path.dirname( os.path.realpath( __file__ ) )
## ------------SETTINGS----------------------
if os.name == 'nt':
soundPlayer = "playwav" # Update "playwav" if you want to use a different audio player.
osSpeakCommand = "SayStatic"
else: #Assuming it's OSX
soundPlayer = "play" # Update "play" if you want to use a different audio player.
osSpeakCommand = "say"
soundEffectsDirectory = 'sounds'
cmdLinePlaySoundCommand = os.path.join(localPath, soundPlayer)
## ------------SETTINGS----------------------
def walkSoundDirectories(soundEffectsDirectory):
folderArray = []
for dirName, subdirList, fileList in os.walk(soundEffectsDirectory):
responseObject = {}
responseObject["folderName"] = os.path.basename(dirName)
responseObject["soundFiles"] = []
for fname in fileList:
if not fname.startswith("."):
soundObj = []
soundObj.append(fname)
soundObj.append(fname[:-4].replace("_", " ").title())
responseObject["soundFiles"].append(soundObj)
folderArray.append(responseObject)
return folderArray
class playLocalSound:
def playSound(self, sndName):
print sndName
if sndName.startswith(soundEffectsDirectory):
sndName = sndName[len(soundEffectsDirectory)+1:]
if (os.path.exists((os.path.join(localPath, soundEffectsDirectory, sndName )))):
os.popen(cmdLinePlaySoundCommand + ' "' + os.path.join(localPath, soundEffectsDirectory, sndName) + '"')
def GET(self, name):
if not name:
return "Missing Sound Name"
self.playSound(name)
return "Sound played."
class playRemoteSound:
def GET(self, sndName):
if sndName.startswith(soundEffectsDirectory):
sndName = sndName[len(soundEffectsDirectory)+1:]
try:
if (os.path.exists((os.path.join(localPath, soundEffectsDirectory, sndName )))):
f = open(os.path.join(localPath, soundEffectsDirectory, sndName), 'rb')
return f.read()
except:
return '' # you can send an 404 error here if you want
class getSoundList:
def GET(self):
return json.dumps(walkSoundDirectories(soundEffectsDirectory))
class speak:
def POST(self, words):
user_data = web.input()
print user_data.words
if user_data.words:
os.popen(osSpeakCommand + " " + user_data.words)
raise web.seeother('/static/index.html')
class index:
def GET(self):
raise web.seeother('/static/index.html')
class Upload:
def POST(self):
x = web.input(myfile={})
f = open(os.path.join(localPath, soundEffectsDirectory, x['myfile'].filename), 'wb')
f.write(x['myfile'].value)
f.close()
raise web.seeother('/static/index.html')
def notfound():
raise web.seeother('/static/index.html')
urls = (
'/getSounds/', 'getSoundList',
'/speak/(.*)', 'speak',
'/upload/', 'Upload',
'/preview/(.*)', 'playRemoteSound',
'/play/(.*)', 'playLocalSound',
'/','index'
)
app = web.application(urls, globals())
app.notfound = notfound
if __name__ == "__main__":
app.run() |
<gh_stars>0
from django.db import models
from dictionary.views import get_def_for_tooltip
import json
from search.models import TableNames
from tools.app_utils import parse_form_type
import settings
import opus_support
class ParamInfo(models.Model):
"""
This model describes every searchable param in the database.
Each has attributes like display, display order, query type, slug, etc.
"""
category_name = models.CharField(max_length=150)
name = models.CharField(max_length=87)
form_type = models.CharField(max_length=100, blank=True, null=True)
display = models.CharField(max_length=1)
display_results = models.IntegerField()
disp_order = models.IntegerField()
label = models.CharField(max_length=240, blank=True, null=True)
label_results = models.CharField(max_length=240, blank=True, null=True)
slug = models.CharField(max_length=255, blank=True, null=True)
old_slug = models.CharField(max_length=255, blank=True, null=True)
units = models.CharField(max_length=75, blank=True, null=True)
ranges = models.TextField()
field_hints1 = models.CharField(max_length=255, blank=True, null=True)
field_hints2 = models.CharField(max_length=255, blank=True, null=True)
intro = models.CharField(max_length=1023, blank=True, null=True)
tooltip = models.CharField(max_length=255, blank=True, null=True)
dict_context = models.CharField(max_length=255, blank=True, null=True)
dict_name = models.CharField(max_length=255, blank=True, null=True)
dict_context_results = models.CharField(max_length=255, blank=True, null=True)
dict_name_results = models.CharField(max_length=255, blank=True, null=True)
sub_heading = models.CharField(max_length=150, blank=True, null=True)
timestamp = models.DateTimeField()
class Meta:
db_table = ('param_info')
ordering = ('category_name', 'sub_heading', 'disp_order')
def __unicode__(self):
return u"%s" % self.name
def param_qualified_name(self):
return self.category_name + '.' + self.name
def get_tooltip(self):
definition = get_def_for_tooltip(self.dict_name, self.dict_context)
return definition
def get_tooltip_results(self):
if self.dict_name_results:
definition = get_def_for_tooltip(self.dict_name_results,
self.dict_context_results)
else:
definition = get_def_for_tooltip(self.dict_name, self.dict_context)
return definition
def body_qualified_label(self):
# Append "[Ring]" or "[<Surface Body>]" or "[Mission]" or "[Instrument]"
if self.label is None: # pragma: no cover
return None
append_to_label = None
pretty_name = (TableNames.objects
.get(table_name=self.category_name).label)
pretty_name = pretty_name.replace(' Surface Geometry Constraints', '')
pretty_name = pretty_name.replace(' Geometry Constraints', '')
pretty_name = pretty_name.replace(' Mission Constraints', '')
pretty_name = pretty_name.replace(' Constraints', '')
if pretty_name == 'Surface':
return self.label
return self.label + ' [' + pretty_name + ']'
def body_qualified_label_results(self):
# Append "[Ring]" or "[<Surface Body>]" or "[Mission]" or "[Instrument]"
if self.label_results is None:
return None
append_to_label = None
pretty_name = (TableNames.objects
.get(table_name=self.category_name).label)
pretty_name = pretty_name.replace(' Surface Geometry Constraints', '')
pretty_name = pretty_name.replace(' Geometry Constraints', '')
pretty_name = pretty_name.replace(' Mission Constraints', '')
pretty_name = pretty_name.replace(' Constraints', '')
if pretty_name in ['General', 'PDS', 'Wavelength', 'Image',
'Occultation', 'Surface']:
return self.label_results
return self.label_results + ' [' + pretty_name + ']'
def get_units(self):
# Put parentheses around units (units)
if self.units:
return ('('
+ opus_support.UNIT_CONVERSION[self.units]['display_name']
+ ')')
else:
return ''
def fully_qualified_label_results(self):
ret = self.body_qualified_label_results()
if ret is None: # pragma: no cover
return None
units = self.get_units()
if units != '':
ret += ' '+units
return ret
def is_string(self):
(form_type, form_type_func,
form_type_format) = parse_form_type(self.form_type)
return form_type == 'STRING'
def is_string_or_mult(self):
(form_type, form_type_func,
form_type_format) = parse_form_type(self.form_type)
return form_type == 'STRING' or form_type in settings.MULT_FORM_TYPES
def get_ranges_info(self):
"""
Get the ranges info except units & qtype
"""
ranges = {}
if self.ranges:
ranges = json.loads(self.ranges)
return ranges
|
# Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ferry.fabric.com import robust_com
import json
import logging
import os
import re
import sys
from subprocess import Popen, PIPE
DOCKER_SOCK='unix:////var/run/ferry.sock'
class DockerInstance(object):
""" Docker instance """
def __init__(self, json_data=None):
if not json_data:
self.container = ''
self.vm = 'local'
self.service_type = None
self.host_name = None
self.manage_ip = None
self.external_ip = None
self.internal_ip = None
self.ports = {}
self.image = ''
self.keydir = None
self.keyname = None
self.privatekey = None
self.volumes = None
self.default_user = None
self.name = None
self.args = None
self.tunnel = False
else:
self.container = json_data['container']
self.vm = json_data['vm']
self.service_type = json_data['type']
self.host_name = json_data['hostname']
self.manage_ip = json_data['manage_ip']
self.external_ip = json_data['external_ip']
self.internal_ip = json_data['internal_ip']
self.ports = json_data['ports']
self.image = json_data['image']
self.default_user = json_data['user']
self.name = json_data['name']
self.args = json_data['args']
self.keydir = json_data['keydir']
self.keyname =json_data['keyname']
self.privatekey = json_data['privatekey']
self.volumes = json_data['volumes']
self.tunnel = json_data['tunnel']
"""
Return in JSON format.
"""
def json(self):
json_reply = { '_type' : 'docker',
'manage_ip' : self.manage_ip,
'external_ip' : self.external_ip,
'internal_ip' : self.internal_ip,
'ports' : self.ports,
'hostname' : self.host_name,
'container' : self.container,
'vm' : self.vm,
'image' : self.image,
'type': self.service_type,
'keydir' : self.keydir,
'keyname' : self.keyname,
'privatekey' : self.privatekey,
'volumes' : self.volumes,
'user' : self.default_user,
'name' : self.name,
'args' : self.args,
'tunnel' : self.tunnel }
return json_reply
""" Alternative API for Docker that uses external commands """
class DockerCLI(object):
def __init__(self, registry=None):
# self.docker = 'docker-ferry -H=' + DOCKER_SOCK
self.docker = 'docker -H=' + DOCKER_SOCK
self.version_cmd = 'version'
self.start_cmd = 'start'
self.run_cmd = 'run -privileged'
self.build_cmd = 'build -privileged'
self.inspect_cmd = 'inspect'
self.images_cmd = 'images'
self.commit_cmd = 'commit'
self.push_cmd = 'push'
self.pull_cmd = 'pull'
self.stop_cmd = 'stop'
self.tag_cmd = 'tag'
self.rm_cmd = 'rm'
self.ps_cmd = 'ps'
self.info_cmd = 'info'
self.login_cmd = 'login'
self.daemon = '-d'
self.interactive = '-i'
self.tty = '-t'
self.port_flag = ' -p'
self.expose_flag = ' -expose'
self.volume_flag = ' -v'
self.cid_file = ' --cidfile'
self.lxc_flag = ' -lxc-conf'
self.disable_net = ' -n=false'
self.host_flag = ' -h'
self.fs_flag = ' -s'
self.env_flag = ' -e'
self.registry = registry
self.docker_user = 'root'
def _execute_cmd(self, cmd, server=None, user=None, read_output=True):
"""
Execute the command on the server via ssh.
"""
if not server:
# The server is not supplied, so just execute
# the command locally.
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
if read_output:
out = proc.stdout.read()
err = proc.stderr.read()
else:
# Do not store results in hosts file or warn about
# changing ssh keys. Also use the key given to us by the fabric.
flags = " -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "
flags += " -i " + self.key
# If the user is given explicitly use that. Otherwise use the
# default user (which is probably root).
if user:
ip = user + '@' + server
else:
ip = self.docker_user + '@' + server
flags += " -t -t " + ip
# Wrap the command around an ssh command.
ssh = 'ssh ' + flags + ' \'%s\'' % cmd
logging.warning(ssh)
# All the possible errors that might happen when
# we try to connect via ssh.
if read_output:
out, err, success = robust_com(ssh)
else:
# The user does not want us to read the output.
# That means we can't really check for errors :(
proc = Popen(ssh, stdout=PIPE, stderr=PIPE, shell=True)
if read_output:
# Read both the standard out and error.
return out, err
else:
# The user does not want to read the output.
return proc
def get_fs_type(self, server=None):
"""
Get the backend driver docker is using.
"""
cmd = self.docker + ' ' + self.info_cmd + ' | grep Driver | awk \'{print $2}\''
logging.warning(cmd)
output, _ = self._execute_cmd(cmd)
return output.strip()
def version(self, server=None):
"""
Fetch the current docker version.
"""
cmd = self.docker + ' ' + self.version_cmd + ' | grep Client | awk \'{print $3}\''
logging.warning(cmd)
output, _ = self._execute_cmd(cmd, server)
return output.strip()
def list(self, server=None):
"""
List all the containers.
"""
cmd = self.docker + ' ' + self.ps_cmd + ' -q'
logging.warning(cmd)
output, _ = self._execute_cmd(cmd, server)
output = output.strip()
# There is a container ID for each line
return output.split()
def images(self, image_name=None, server=None):
"""
List all images that match the image name
"""
cmd = self.docker + ' ' + self.images_cmd + ' | awk \'{print $1}\''
if image_name:
cmd = cmd + ' | grep ' + image_name
output, _ = self._execute_cmd(cmd, server)
logging.warning(cmd)
return output.strip()
def build(self, image, docker_file=None, server=None):
"""
Build a new image from a Dockerfile
"""
path = '.'
if docker_file != None:
path = docker_file
cmd = self.docker + ' ' + self.build_cmd + ' -t %s %s' % (image, path)
logging.warning(cmd)
output, _ = self._execute_cmd(cmd, server)
def _get_default_run(self, container):
cmd = self.docker + ' ' + self.inspect_cmd + ' ' + container.container
logging.warning(cmd)
output, _ = self._execute_cmd(cmd)
data = json.loads(output.strip())
cmd = data[0]['Config']['Cmd']
return json.dumps( {'Cmd' : cmd} )
def login(self, user, password, email, registry, server=None):
"""
Login to a remote registry.
"""
cmd = self.docker + ' ' + self.login_cmd + ' -u %s -p %s -e %s %s' % (user, password, email, registry)
logging.warning(cmd)
output, _ = self._execute_cmd(cmd)
if output.strip() == "Login Succeeded":
return True
else:
logging.error(output.strip())
return False
def _continuous_print(self, process, msg):
while True:
try:
out = process.stdout.read(161)
if out == '':
break
else:
logging.warning(out)
except IOError as e:
logging.warning(e)
try:
errmsg = process.stderr.readline()
if errmsg and errmsg != '':
logging.error(errmsg)
return False
else:
logging.warning("downloaded image!")
except IOError:
pass
return True
def push(self, image, registry=None, server=None):
"""
Push an image to a remote registry.
"""
if registry:
raw_image_name = image.split("/")[1]
new_image = "%s/%s" % (registry, raw_image_name)
tag = self.docker + ' ' + self.tag_cmd + ' ' + image + ' ' + new_image
logging.warning(tag)
self._execute_cmd(tag, server, read_output=False)
else:
new_image = image
push = self.docker + ' ' + self.push_cmd + ' ' + new_image
logging.warning(push)
child = self._execute_cmd(push, server, read_output=False)
return self._continuous_print(child, "uploading image...")
def pull(self, image, server=None):
"""
Pull a remote image to the local registry.
"""
pull = self.docker + ' ' + self.pull_cmd + ' ' + image
logging.warning(pull)
child = self._execute_cmd(pull, server, read_output=False)
return self._continuous_print(child, "downloading image...")
def commit(self, container, snapshot_name, server=None):
"""
Commit a container
"""
default_run = self._get_default_run(container)
run_cmd = "-run='%s'" % default_run
# Construct a new container using the given snapshot name.
cmd = self.docker + ' ' + self.commit_cmd + ' ' + run_cmd + ' ' + container.container + ' ' + snapshot_name
logging.warning(cmd)
self._execute_cmd(cmd, server)
def stop(self, container, server=None):
"""
Stop a running container
"""
cmd = self.docker + ' ' + self.stop_cmd + ' ' + container
logging.warning(cmd)
self._execute_cmd(cmd, server)
def remove(self, container, server=None):
"""
Remove a container
"""
cmd = self.docker + ' ' + self.rm_cmd + ' ' + container
logging.warning(cmd)
self._execute_cmd(cmd, server)
def start(self, image, container, service_type, keydir, keyname, privatekey, volumes, args, server=None, user=None, inspector=None, background=False):
"""
Start a stopped container.
"""
cmd = self.docker + ' ' + self.start_cmd + ' ' + container
logging.warning(cmd)
if background:
proc = self._execute_cmd(cmd, server, user, False)
container = None
else:
output, _ = self._execute_cmd(cmd, server, user, True)
container = output.strip()
# Now parse the output to get the IP and port
return inspector.inspect(image = image,
container = container,
keydir = keydir,
keyname = keyname,
privatekey = privatekey,
volumes = volumes,
service_type = service_type,
args = args)
def run(self, service_type, image, volumes, keydir, keyname, privatekey, open_ports, host_map=None, expose_group=None, hostname=None, default_cmd=None, args=None, lxc_opts=None, server=None, user=None, inspector=None, background=False, simulate=False):
"""
Start a brand new container
"""
flags = self.daemon
# Specify the hostname (this is optional)
if hostname != None:
flags += self.host_flag
flags += ' %s ' % hostname
# Add all the bind mounts
if volumes != None:
for v in volumes.keys():
flags += self.volume_flag
flags += ' %s:%s' % (v, volumes[v])
# Add the key directory
if keydir != None:
for v in keydir.keys():
flags += self.volume_flag
flags += ' %s:%s' % (keydir[v], v)
flags += self.env_flag
flags += ' \"KEY=%s\"' % keyname
# Add the lxc options
if lxc_opts != None:
flags += self.disable_net
for o in lxc_opts:
flags += self.lxc_flag
flags += ' \"%s\"' % o
# See if we need to pass in the external
# Docker registry URL.
if self.registry:
flags += self.env_flag
flags += ' \"DOCKER_REGISTRY=%s\"' % self.registry
# If there's not a default command, just
# make it blank.
if not default_cmd:
default_cmd = ''
# The user does not want to print out the output (makes sense
# if the container "eats" up the physical network device). However
# we should still store the container ID somewhere.
if background:
flags += self.cid_file + " /ferry/containers/container.pid"
# Now construct the final docker command.
cmd = self.docker + ' ' + self.run_cmd + ' ' + flags + ' ' + image + ' ' + default_cmd
logging.warning(cmd)
# Check if this is a simulated run. If so,
# just return None.
if simulate:
return None
if background:
proc = self._execute_cmd(cmd, server, user, False)
container = None
else:
output, error = self._execute_cmd(cmd, server, user, True)
err = error.strip()
if re.compile('[/:\s\w]*Can\'t connect[\'\s\w]*').match(err):
logging.error("Ferry docker daemon does not appear to be running")
return None
elif re.compile('Unable to find image[\'\s\w]*').match(err):
logging.error("%s not present" % image)
return None
container = output.strip()
return inspector.inspect(image, container, keydir, keyname, privatekey, volumes, hostname, open_ports, host_map, service_type, args, server)
def _get_lxc_net(self, lxc_tuples):
for l in lxc_tuples:
if l['Key'] == 'lxc.network.ipv4':
ip = l['Value'].split('/')[0]
return ip
return None
class DockerInspector(object):
def __init__(self, cli):
self.cli = cli
def inspect(self, image, container, keydir=None, keyname=None, privatekey=None, volumes=None, hostname=None, open_ports=[], host_map=None, service_type=None, args=None, server=None):
"""
Inspect a container and return information on how
to connect to the container.
"""
cmd = self.cli.docker + ' ' + self.cli.inspect_cmd + ' ' + container
logging.warning(cmd)
output, _ = self.cli._execute_cmd(cmd, server)
data = json.loads(output.strip())
instance = DockerInstance()
if type(data) is list:
data = data[0]
# Check if the container is running. It is an error
# if the container is not running.
if not bool(data['State']['Running']):
logging.error("container for %s is not running" % image)
return None
# Otherwise start collecting the various container information.
instance.container = container
instance.image = data['Config']['Image']
instance.internal_ip = data['NetworkSettings']['IPAddress']
# If we've used the lxc config, then the networking information
# will be located somewhere else.
if instance.internal_ip == "":
instance.internal_ip = self.cli._get_lxc_net(data['HostConfig']['LxcConf'])
if hostname:
instance.host_name = hostname
else:
# Need to inspect to get the hostname.
instance.host_name = data['Config']['Hostname']
instance.service_type = service_type
instance.args = args
if len(open_ports) == 0:
port_mapping = data['HostConfig']['PortBindings']
if port_mapping:
instance.ports = port_mapping
else:
for p in open_ports:
if host_map and p in host_map:
instance.ports[p] = host_map[p]
else:
instance.ports[p] = []
# Add any data volume information.
if volumes:
instance.volumes = volumes
else:
instance.volumes = data['Volumes']
# Store the key information.
instance.keydir = keydir
instance.keyname = keyname
instance.privatekey = privatekey
return instance
|
<filename>well_plate_project/data_etl/_3f_cluster_hough.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 12:52:01 2020
@author: modal
"""
import cv2
import matplotlib.pyplot as plt
def cluster_hough(warped):
import numpy as np
#%% CIRCLE RECOGNITION
test = warped.copy();
# test = cv2.cvtColor(test, cv2.COLOR_BGR2GRAY);
l, a, b = cv2.split(cv2.cvtColor(test, cv2.COLOR_BGR2LAB))
test = b
# kernel = np.array([[-1,-1,-1],
# [-1, 9,-1],
# [-1,-1,-1]])
# kernel=np.array([[0,-1,0],[-1,6,-1],[0,-1,0]])
# test = cv2.filter2D(test, -1, kernel) # applying the sharpening kernel to the input image & displaying it.
test = cv2.equalizeHist(test);
# test = cv2.addWeighted(test, 1.2, test, 0, 0)
#TODO !!!
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16,16))
# test = clahe.apply(test)
# lap = cv2.equalizeHist((laplacian).astype('uint8'));
# lap = pd.DataFrame(lap); lap[lap<100] = 0; lap[lap>100]=255
# lap = lap.values; lap = cv2.GaussianBlur(lap, (3, 3), 0)#; img = cv.equalizeHist(img)
plt.figure(figsize=(20,20))
plt.imshow(test)
plt.xticks([]), plt.yticks([])
plt.show()
if len(test.shape)<3:
test=np.expand_dims(test,axis=2)
Z = test.reshape((-1,test.shape[2])) #l.reshape(-1,1)
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 9
res,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape(test.shape)
plt.figure(figsize=(10,10))
plt.imshow(res2)
plt.show()
# test = cv2.bitwise_not(test)
# kernel = np.array([[-1,-1,-1],
# [-1, 9,-1],
# [-1,-1,-1]])
# kernel=np.array([[0,-1,0],[-1,5,-1],[0,-1,0]])
# test = cv2.filter2D(test, -1, kernel) # applying the sharpening kernel to the input image & displaying it.
# res2 = test
circles = cv2.HoughCircles(res2.astype('uint8'), cv2.HOUGH_GRADIENT, 2.7, 85, param1=30,param2=90,minRadius=40,maxRadius=45)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(test,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(test,(i[0],i[1]),2,(0,0,255),3)
plt.figure(figsize=(15,15))
plt.imshow(test)
plt.xticks([]), plt.yticks([])
plt.show()
return test
#%% INIT
def clear_all():
"""Clears all the variables from the workspace of the application."""
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
def load_test_file():
image_file_name = 'a2_a_cropped.jpg'
from well_plate_project.config import data_dir
raw_data_dir = data_dir / 'raw'
path = raw_data_dir / 'EXPERIMENTS'
image_file = raw_data_dir / image_file_name
assert image_file.is_file()
img = cv2.imread(str(image_file))
plt.imshow(img)
plt.show()
return img
if __name__ == "__main__":
clear_all()
image = load_test_file()
print("Testing ... ")
#image = watershed_segmentation(image)
image = cluster_hough(image)
print("Plotting... ")
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.xticks([]), plt.yticks([])
plt.show()
|
import sys, math, logging
__all__ = 'defaults,byteorder,partial'.split(',')
# Setup some version-agnostic types that we can perform checks with
integer_types = (int, long) if sys.version_info.major < 3 else (int,)
string_types = (str, unicode) if sys.version_info.major < 3 else (str,)
class field:
class descriptor(object):
def __init__(self):
self.__value__ = {}
def __set__(self, instance, value):
self.__value__[instance] = value
def __get__(self, instance, type=None):
return self.__value__.get(instance)
def __delete__(self, instance):
raise AttributeError
class __enum_descriptor(descriptor):
__option = set
def option(self, name, documentation=''):
cls = type(self)
res = type(name, cls, {'__doc__': documentation})
self.__option__.add(res)
return res
def __set__(self, instance, value):
if value in self.__option__:
return field.descriptor.__set__(self, instance, value)
raise ValueError("{!r} is not a member of {!r}".format(value, self.__option__))
class __type_descriptor(descriptor):
__type__ = type
def __set__(self, instance, value):
if (hasattr(self.__type__, '__iter__') and type(value) in self.__type__) or isinstance(value, self.__type__):
return field.descriptor.__set__(self, instance, value)
raise ValueError("{!r} is not an instance of {!r}".format(value, self.__type__))
class __set_descriptor(descriptor):
set, get = None, None
def __init__(self):
return
def __set__(self, instance, value):
res = self.__getattribute__('set')
return res.im_func(value) if sys.version_info.major < 3 else res.__func__(value)
def __get__(self, instance, type=None):
res = self.__getattribute__('get')
return res.im_func() if sys.version_info.major < 3 else res.__func__()
class __bool_descriptor(descriptor):
def __set__(self, instance, value):
if not isinstance(value, bool):
logging.warning("rvalue {!r} is not of boolean type. Coercing it into one : ({:s} != {:s})".format(value, type(value).__name__, bool.__name__))
return field.descriptor.__set__(self, instance, bool(value))
class option_t(object): pass
@classmethod
def enum(cls, name, options=(), documentation=''):
base = cls.__enum_descriptor
attrs = dict(base.__dict__)
attrs['__option__'] = set(options)
attrs['__doc__'] = documentation
cons = type(name, (base,), attrs)
return cons()
@classmethod
def option(cls, name, documentation=''):
base = field.option_t
return type(name, (base,), {'__doc__': documentation})
@classmethod
def type(cls, name, subtype, documentation=''):
base = cls.__type_descriptor
attrs = dict(base.__dict__)
attrs['__type__'] = subtype
attrs['__doc__'] = documentation
cons = type(name, (base,), attrs)
return cons()
@classmethod
def set(cls, name, fetch, store, documentation=''):
base = cls.__set_descriptor
attrs = dict(base.__dict__)
attrs['__doc__'] = documentation
attrs['set'] = store
attrs['get'] = fetch
cons = type(name, (base,), attrs)
return cons()
@classmethod
def constant(cls, name, value, documentation=''):
base = cls.descriptor
attrs = dict(base.__dict__)
def raiseAttributeError(self, instance, value):
raise AttributeError
attrs['__set__'] = raiseAttributeError
attrs['__doc__'] = documentation
cons = type(name, (base,), attrs)
return cons()
@classmethod
def bool(cls, name, documentation=''):
base = cls.__bool_descriptor
attrs = dict(base.__dict__)
attrs['__doc__'] = documentation
cons = type(name, (base,), attrs)
return cons()
def namespace(cls):
# turn all instances of things into read-only attributes
readonly = []
if hasattr(property, '__isabstractmethod__'):
readonly.append(property.__isabstractmethod__)
readonly.append(property.deleter)
attributes, properties, subclass = {}, {}, {}
for name, value in cls.__dict__.items():
if hasattr(value, '__name__') and all(not isinstance(value, item.__class__) for item in readonly):
value.__name__ = '.'.join([cls.__name__, name])
if name.startswith('_') or isinstance(value, property):
attributes[name] = value
elif not callable(value) or issubclass(value, field.option_t):
properties[name] = value
else:
subclass[name] = namespace(value)
continue
def collectproperties(object):
result = []
for name, value in object.items():
if isinstance(value, type):
fmt = '<iota>'
elif hasattr(value, '__class__'):
fmt = "{!s}".format(value)
else:
raise ValueError(name)
doc = value.__doc__.split('\n')[0] if value.__doc__ else None
result.append((name, fmt, doc))
return result
def formatproperties(items):
namewidth = max(len(name) for name, _, _ in items) if items else 0
formatwidth = max(len(fmt) for _, fmt, _ in items) if items else 0
result = []
for name, value, documentation in items:
fmt = ("{name:{:d}s} : {value:{:d}s} # {documentation:s}" if documentation else "{name:{:d}s} : {value:{:d}s}").format
result.append(fmt(namewidth, formatwidth, name=name, value=value, documentation=documentation))
return result
def __repr__(self):
formatdescription = ("{{{!s}}} # {}\n" if cls.__doc__ else "{{{!s}}}\n").format
items = collectproperties(properties)
props = formatproperties(items)
subclasses = ["{{{:s}}}\n...".format('.'.join([cls.__name__, name])) for name in subclass.keys()]
res = formatdescription(cls.__name__, cls.__doc__) + '\n'.join(props)
if subclasses:
return res + '\n' + '\n'.join(subclasses) + '\n'
return res + '\n'
def __setattr__(self, name, value):
if name in attributes:
object.__setattr__(self, name, value)
return
raise AttributeError("Configuration '{:s}' does not have field named '{:s}'".format(cls.__name__, name))
attributes['__repr__'] = __repr__
attributes['__setattr__'] = __setattr__
attributes.update((name, property(fget=lambda _, name=name: properties[name])) for name in properties)
attributes.update((name, property(fget=lambda _, name=name: subclass[name])) for name in subclass)
cons = type(cls.__name__, cls.__bases__, attributes)
result = cons()
# Go through the attributes and fix their names so that they display properly
# on both Python2 _and_ Python3. This is because Py3 fucks up their display
# by not including the full contents of the .__name__ property in their output.
for name in properties:
value = getattr(result, name)
fullname = name if not hasattr(value, '__name__') and isinstance(value, object) else value.__name__
components = fullname.rsplit('.', 1)
if len(components) > 1:
prefix, name = components
value.__module__, value.__name__ = '.'.join([value.__module__, prefix]), name
continue
return result
def configuration(cls):
attributes, properties, subclass = dict(cls.__dict__), {}, {}
for name, value in attributes.items():
if name.startswith('_'):
continue
elif isinstance(value, field.descriptor):
properties[name] = value
elif not hasattr(value, '__class__') or (object.__sizeof__(value) == object.__sizeof__(type)):
subclass[name] = configuration(value)
continue
def collectproperties(object, values):
result = []
for name, value in object.items():
documentation = value.__doc__.split('\n')[0] if value.__doc__ else None
result.append((name, values[name], documentation))
return result
def formatproperties(items):
namewidth = max(len(name) for name, _, _ in items)
formatwidth = max(len("{!r}".format(format)) for _, format, _ in items)
result = []
for name, value, documentation in items:
fmt = ("{name:{:d}s} = {value:<{:d}s} # {doc:s}" if documentation else "{name:{:d}s} = {value:<{:d}s}").format
result.append(fmt(namewidth, formatwidth, name=name, value="{!r}".format(value), doc=documentation))
return result
def __repr__(self):
formatdescription = ('[{!s}] # {}\n' if cls.__doc__ else '[{!s}]\n').format
values = {name : getattr(self, name, None) for name in properties}
items = collectproperties(properties, values)
res = formatdescription(cls.__name__, cls.__doc__.split('\n')[0] if cls.__doc__ else None) + '\n'.join(formatproperties(items))
subclasses = ["[{:s}]\n...".format('.'.join([cls.__name__, name])) for name in subclass.keys()]
if subclasses:
return res + '\n' + '\n'.join(subclasses) + '\n'
return res + '\n'
def __setattr__(self, name, value):
if name in attributes:
object.__setattr__(self, name, value)
return
raise AttributeError("Namespace '{:s}' does not have a field named '{:s}'".format(cls.__name__, name))
attributes['__repr__'] = __repr__
attributes['__setattr__'] = __setattr__
attributes.update({name : property(fget=lambda _, name=name: subclass[name]) for name in subclass})
result = type(cls.__name__, cls.__bases__, attributes)
return result()
### constants that can be used as options
@namespace
class byteorder:
'''Byte order constants'''
bigendian = field.option('bigendian', 'Specify big-endian ordering')
littleendian = field.option('littleendian', 'Specify little-endian ordering')
@namespace
class partial:
fractional = field.option('fractional', 'Display the sub-offset as a fraction of a bit (0.0, 0.125, 0.25, ..., 0.875)')
hex = field.option('hex', 'Display the sub-offset in hexadecimal (0.0, 0.2, 0.4, ..., 0.c, 0.e)')
bit = field.option('bit', 'Display the sub-offset as just the bit number (0.0, 0.1, 0.2, ..., 0.7)')
### new-config
@configuration
class defaults:
log = field.type('default-logger', logging.Filterer, 'Default logging facility and level.')
class integer:
size = field.type('integersize', integer_types, 'The word-size of the architecture.')
order = field.enum('byteorder', (byteorder.bigendian, byteorder.littleendian), 'The byteorder to use for new integers and pointers.')
class ptype:
clone_name = field.type('clone_name', string_types, 'The formatspec to use when mangling the name during the cloning a type (will only affect newly cloned).')
noncontiguous = field.bool('noncontiguous', 'Allow optimization for non-contiguous ptype.container elements.')
class pint:
bigendian_name = field.type('bigendian_name', string_types, 'The formatspec to use when mangling the names for integers that are big-endian.')
littleendian_name = field.type('littleendian_name', string_types, 'The formatspec to use when mangling the names for integers that are little-endian.')
class parray:
break_on_max_count = field.bool('break_on_max_count', 'If a dynamic array is larger than max_count, then raise an exception.')
max_count = field.type('max_count', integer_types, 'Notify via a warning (exception if \'break_on_max_count\') when length is larger than max_count.')
class pstruct:
use_offset_on_duplicate = field.bool('use_offset_on_duplicate', 'If a name is duplicated, suffix it with the field offset (otherwise its index).')
class display:
show_module_name = field.bool('show_module_name', 'Include the full module name when displaying a summary.')
show_parent_name = field.bool('show_parent_name', 'Include the parent name when displaying a summary.')
mangle_with_attributes = field.bool('mangle_with_attributes', 'Allow instance attribute names to be used in the name-mangling formatspecs (cloning or byteorder).')
class hexdump:
'''Formatting for a hexdump'''
width = field.type('width', integer_types)
threshold = field.type('threshold', integer_types)
class threshold:
'''Width and Row thresholds for displaying summaries'''
summary = field.type('summary_threshold', integer_types, 'Maximum number of bytes for a summary before shortening it with \'summary_message\'.')
summary_message = field.type('summary_threshold_message', string_types, 'Formatspec to use before summary has reached its threshold.')
details = field.type('details_threshold', integer_types, 'Maximum number of bytes for details before replacing it with \'details_message\'.')
details_message = field.type('details_threshold_message', string_types, 'Formatspec to use before details have reached their threshold.')
class pbinary:
'''How to display attributes of an element containing binary fields which might not be byte-aligned'''
offset = field.enum('offset', (partial.bit, partial.fractional, partial.hex), 'The format to use when displaying the sub-offset for binary types.')
bigendian_name = field.type('bigendian_name', string_types, 'The formatspec to use for elements which are read most-significant to least-significant.')
littleendian_name = field.type('littleendian_name', string_types, 'The formatspec to use for elements which are read least-significant to most-significant.')
def __getsource():
global ptype
return ptype.source
def __setsource(value):
global ptype
if all(hasattr(value, method) for method in ('seek','store','consume')) or isinstance(value, provider.base):
ptype.source = value
return
raise ValueError("Invalid source object")
source = field.set('default-source', __getsource, __setsource, 'Default source used that data will be load from or committed to in new instances.')
try:
from . import ptype
except ImportError:
# XXX: recursive
import ptype
### defaults
# logging
defaults.log = log = logging.getLogger('ptypes')
log.setLevel(logging.root.level)
log.propagate = 1
res = logging.StreamHandler(None)
res.setFormatter(logging.Formatter("[%(created).3f] <%(process)x.%(thread)x> [%(levelname)s:%(name)s] %(message)s", None))
log.addHandler(res)
del(res, log)
# general integers
defaults.integer.size = math.trunc(math.log(2 * (sys.maxsize + 1), 2) // 8)
defaults.integer.order = byteorder.littleendian if sys.byteorder == 'little' else byteorder.bigendian if sys.byteorder == 'big' else None
# display
defaults.display.show_module_name = False
defaults.display.show_parent_name = False
defaults.display.hexdump.width = 16
defaults.display.hexdump.threshold = 8
defaults.display.threshold.summary = 80
defaults.display.threshold.details = 8
defaults.display.threshold.summary_message = ' ..skipped ~{leftover} bytes.. '
defaults.display.threshold.details_message = ' ..skipped {leftover} rows, {skipped} bytes.. '
defaults.display.mangle_with_attributes = False
# array types
defaults.parray.break_on_max_count = False
defaults.parray.max_count = sys.maxsize
# structures
defaults.pstruct.use_offset_on_duplicate = True
# root types
defaults.ptype.noncontiguous = False
#defaults.ptype.clone_name = 'clone({})'
#defaults.pint.bigendian_name = 'bigendian({})'
#defaults.pint.littleendian_name = 'littleendian({})'
defaults.ptype.clone_name = 'c({})'
# integer types
defaults.pint.bigendian_name = 'be({})' if sys.byteorder.startswith('little') else '{}'
defaults.pint.littleendian_name = 'le({})' if sys.byteorder.startswith('big') else '{}'
# pbinary types
defaults.pbinary.offset = partial.hex
defaults.pbinary.bigendian_name = 'pb({})'
defaults.pbinary.littleendian_name = 'pble({})'
if __name__ == '__main__':
@namespace
class consts:
bigendian = field.option('bigendian', 'Big-endian integers')
littleendian = field.option('littleendian', 'Little-endian integers')
size = 20
whatever = object()
class huh:
what = 5
default = 10
blah = object()
class more:
whee = object()
class blah:
pass
import logging
@configuration
class config(object):
byteorder = field.enum('byteorder', (consts.bigendian, consts.littleendian), 'The endianness of integers/pointers')
integersize = field.type('integersize', integer_types, 'The word-size of the architecture')
class display:
summary = field.type('single-line', integer_types)
details = field.type('multi-line', integer_types)
show_module = field.bool('show-module-name')
def __getlogger():
return logging.root
def __setlogger(value):
logging.root = value
logger = field.set('default-logger', __getlogger, __setlogger, 'Default place to log progress')
#logger = field.type('default-logger', logging.Filterer, 'Default place to log progress')
def __getsource():
return ptype.source
def __setsource(value):
if not isinstance(value, provider.base):
raise ValueError("Invalid source object")
ptype.source = value
source = field.set('default-source', __getsource, __setsource, 'Default source to load/commit data from/to')
#ptypes.config.logger = logging.root
print("{!r}".format(consts))
print("{!r}".format(consts.blah))
print("{!r}".format(consts.huh))
print("{!r}".format(config))
print("{!r}".format(config.display))
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
__author__ = 'isparks'
from datetime import datetime
from random import choice
from string import ascii_letters
from rwslib import RWSConnection
from rwslib.builders import *
from rwslib.rws_requests import PostDataRequest
class Folder(object):
def __init__(self, oid, repeat=None):
self.oid = oid
self.repeat = repeat
self.forms = {}
class Form(object):
def __init__(self, oid, repeat=None):
self.oid = oid
self.repeat = repeat
self.records = {} # Keyed on context item or None.
def add_field(self, row):
field_oid = row['field_oid']
value = row.get('value', None)
measurement_unit = row.get('measurement_unit', None)
specify_value = row.get('specify_value', None)
context_field_oid = row.get('log_context_field', None)
context_field_value = row.get('log_context_value', None)
raw_value = row.get('esource_value')
# Is this the value that we're using to find the log form by context? I.e. unique identifying field.
is_context = field_oid == context_field_oid
# Is it a new value we want to add?
is_new = row.get('is_new', False)
record = self.records.setdefault(context_field_value, Record())
f = Field(field_oid, value, specify_value, measurement_unit, raw=raw_value,
context_item=is_context, is_new=is_new)
# Did we see the context field? If we didn't we will have to add it later
record.has_context_field = record.has_context_field or is_context
# Note that the record takes the last field oid and value it is passed
record.context_field_oid = context_field_oid
record.context_field_value = context_field_value
record.fields.append(f)
class Record(object):
def __init__(self):
self.fields = []
self.has_context_field = False
self.context_field_oid = None
self.context_field_value = None
class Field(object):
def __init__(self, oid, value, specify_value, measurement_unit, raw, context_item=False, is_new=False):
self.oid = oid
self.value = value
self.specify_value = specify_value
self.measurement_unit = measurement_unit
self.context_item = context_item
self.raw = raw # unadulterated eSource value
self.is_new = is_new # On a context item, are we seeking to update or add this?
def make_odm(study_oid, environment, site_oid, subject_oid, mapping,
retrieved_datetime, transfer_user, transfer_identifier, freeze=True):
"""Receives a mapping like:
[
dict(folder_oid="SCRN", form_oid="DM", field_oid="SEX", value="M", cdash_domain="DM", cdash_element="SEX"),
dict(folder_oid="SCRN", form_oid="DM", field_oid="DOB", value="1965-02-09", cdash_domain="DM",
cdash_element="DOB"),
...
]
Unpacks this into a ODM Message broken up by [folder][form][record][field]
"""
# Sort unstructured dicts into hierarchy of objects to send
folders = {} # Map of folders to forms to records to fields
for row in mapping:
folder_oid = row.get('folder_oid', 'SUBJECT')
folder = folders.get(folder_oid, False)
if not folder:
folder = Folder(folder_oid)
folders[folder_oid] = folder
form_oid = row.get('form_oid')
form = folder.forms.get(form_oid, False)
if not form:
form = Form(form_oid)
folder.forms[form_oid] = form
# add_field sorts into appropriate records
form.add_field(row)
# Now loop through our structure and build ODM
study_events = []
for folder_oid in folders:
folder = folders[folder_oid]
study_event = StudyEventData(folder.oid, study_event_repeat_key=None) # TODO: Folder repeat key?
study_events.append(study_event)
# Loop through forms in folder
for form_oid in folder.forms:
form = folder.forms[form_oid]
# Add formdata to study event
formdata = FormData(form.oid, transaction_type="Update")
study_event << formdata
# Loop through records we gathered
for record_context in form.records:
record = form.records[record_context]
params = {}
if record_context is not None:
# Log line?
params['oid'] = "{0}_LOG_LINE".format(form_oid)
ig = ItemGroupData()
# Add itemgroupdata to formdata
formdata << ig
# Add all items to itemgroupdata along with external audits to show where they came from
for field in record.fields:
transaction_type = None
if field.context_item:
if field.is_new:
ig.transaction_type = 'Upsert'
else:
# We want to do a seek an update
transaction_type = "Context"
ig.transaction_type = 'Update'
ig.item_group_repeat_key = '@CONTEXT'
ehr_message = "Import from EHR: EHR Source Value %s -> Submitted value: %s" % (field.raw, field.value)
item_data = ItemData(field.oid,
field.value,
specify_value=field.specify_value,
transaction_type=transaction_type,
freeze=freeze)(
AuditRecord(used_imputation_method=False,
identifier=transfer_identifier,
include_file_oid=False)(
UserRef(transfer_user),
LocationRef(site_oid),
ReasonForChange(ehr_message),
# Any string, just becomes part of documentation in Audit trail
DateTimeStamp(retrieved_datetime)
)
)
# Measurement unit related to this value?
if field.measurement_unit is not None:
item_data << MeasurementUnitRef(field.measurement_unit)
# Add to itemgroup
ig << item_data
# In context update situation we need to pass the value of the conext field with transaction type
# of context. So if that is not one of the fields passed in we need to include it for this record
if not record.has_context_field and record_context is not None:
# create the itemdata element, add the mdsol:Freeze attribute
ig << ItemData(record.context_field_oid, record.context_field_value, transaction_type="Context",
freeze=freeze)
ig.item_group_repeat_key = '@CONTEXT'
odm = ODM("EHRImport")(
ClinicalData(study_oid, environment)(
SubjectData(site_oid, subject_oid, transaction_type="Update", subject_key_type='SubjectUUID')(*study_events)
)
)
return odm
def audit_id():
"""
:return: An audit ID, a (hopefully) unique string of characters acceptable as an external audit ID to Rave.
I don't think this is required but it's good to have the traceability from Rave data back to the
EHR import/transaction that sent it.
"""
ret = []
for i in range(15):
ret.append(choice(ascii_letters))
return 'audit_{0}'.format(''.join(ret))
if __name__ == '__main__':
mapping_values = [
dict(folder_oid="SCREEN", form_oid="DM", field_oid="BRTHDTC", value="01 JAN 1980"),
# NB. Need to know date format
dict(folder_oid="SCREEN", form_oid="DM", field_oid="SEX", value="MALE"), # Male
dict(folder_oid="SCREEN", form_oid="DM", field_oid="RACE", value="5", specify_value="Mixed Race"),
# 3=White 5=Other Specify
dict(folder_oid="VISIT01", form_oid="VS", field_oid="PULSE", value="82"),
dict(folder_oid="VISIT01", form_oid="VS", field_oid="TEMP", value="33.1", measurement_unit="Celsius"),
# For AE's going to use AEACNOTH as a context value for now. Later we'll need a surrogate. AEACNOTH is just a handy text field.
dict(folder_oid="SUBJECT", form_oid="AE", field_oid="AETERM", value="AE 1", log_context_field="AEACNOTH",
log_context_value="XX1"),
dict(folder_oid="SUBJECT", form_oid="AE", field_oid="AESTDTC", value="02 Jan 1970",
log_context_field="AEACNOTH", log_context_value="XX1"),
# This is a context field. When is_new = True the record get inserted. If is_new = True then it's used to identify the record to update
dict(folder_oid="SUBJECT", form_oid="AE", field_oid="AEACNOTH", value="XX1", log_context_field="AEACNOTH",
log_context_value="XX1", is_new=False),
dict(folder_oid="SUBJECT", form_oid="AE", field_oid="AETERM", value="AE 3", log_context_field="AEACNOTH",
log_context_value="XX3"),
dict(folder_oid="SUBJECT", form_oid="AE", field_oid="AESTDTC", value="03 Feb 1981",
log_context_field="AEACNOTH", log_context_value="XX3"),
dict(folder_oid="SUBJECT", form_oid="AE", field_oid="AEACNOTH", value="XX3", log_context_field="AEACNOTH",
log_context_value="XX3", is_new=True),
# Must send a line like this if you want to CREATE a record.
]
odm = make_odm("Mediflex", "Dev", "MDSOL", "222 IJS", mapping_values, datetime.now(), "EHR Import User", audit_id())
request = PostDataRequest(str(odm))
print(str(odm))
rave = RWSConnection('innovate', 'username', 'password')
print(rave.send_request(request))
|
# project_server.py
from flask import Flask, request, jsonify
from datetime import datetime
from pymodm import connect, MongoModel, fields
import PIL
connect("mongodb+srv://brad_howard:<EMAIL>"
"/final_database?retryWrites=true&w=majority")
app = Flask(__name__)
class Patient(MongoModel):
mr_number = fields.IntegerField(primary_key=True)
name = fields.CharField()
heart_rates = fields.ListField()
medical_images = fields.ListField()
ECG_images = fields.ListField()
datetimes = fields.ListField()
def __init__():
print("Server is on.")
def add_new_patient_to_db(in_dict):
"""Adds a new patient to the database
Every time the user wants to add a new patient
to the patient database, this function
must be called. This function reads in from the user the
any available information. This information could be
medical record number, patient name, a medical image,
an ECG image, and a heart rate. The minimum requirement
is to have a medical record number within the
dictionary.
Parameters
----------
in_dict : dict
Gives the patient information
Returns
-------
bool
True if successful
"""
new_patient = Patient()
keys_present = check_keys(in_dict)
for key in keys_present:
if key == "medical_record_number":
new_patient.mr_number = in_dict[key]
elif key == "patient_name":
new_patient.name = in_dict[key]
elif key == "medical_image":
new_patient.medical_images = [in_dict[key]]
elif key == "heart_rate":
new_patient.heart_rates = [in_dict[key]]
recorded_datetime = datetime.now()
string_recorded_datetime = datetime.strftime(
recorded_datetime, "%Y-%m-%d %H:%M:%S")
new_patient.datetimes = [string_recorded_datetime]
elif key == "ECG_image":
new_patient.ECG_images = [in_dict[key]]
new_patient.save()
return True
def edit_existing_patient(in_dict):
"""Edits a patient that already exists in the database
If the user wants to edit a patient that already exists
in the database, they must use this function. This
function searches the database for the patient
with the medical record number matching the one
within the input dictionary. Once it finds this
patient, it updates their information, adding
whatever is contained within the input dictionary.
If there is a problem with the information,
the function will notify the user. Otherwise,
it will return True.
Parameters
----------
in_dict : dict
Gives the patient information
Returns
-------
bool
True if successful
"""
keys_present = check_keys(in_dict)
for key in keys_present:
if key == "patient_name":
existing_patient = Patient.objects.raw({"_id": in_dict
["medical_record_number"]
}).first()
existing_patient.name = in_dict["patient_name"]
existing_patient.save()
elif key == "medical_image":
existing_patient = Patient.objects.raw({"_id": in_dict
["medical_record_number"]
})
existing_patient.update({"$push": {"medical_images":
in_dict['medical_image']}})
elif key == "heart_rate":
existing_patient = Patient.objects.raw({"_id": in_dict
["medical_record_number"]
})
existing_patient.update({"$push": {"heart_rates":
in_dict['heart_rate']}})
recorded_datetime = datetime.now()
string_recorded_datetime = datetime.strftime(
recorded_datetime, "%Y-%m-%d %H:%M:%S")
existing_patient.update({"$push":
{"datetimes": string_recorded_datetime}})
elif key == "ECG_image":
existing_patient = Patient.objects.raw({"_id": in_dict
["medical_record_number"]
})
existing_patient.update({"$push": {"ECG_images":
in_dict['ECG_image']}})
return True
def check_keys(in_dict):
"""Checks which keys are present within the input dictionary
This function looks within the input dictionary and checks
which keys are contained within it. Then, with these
keys, it generates a list containing these keys and
returns this list as output.
Parameters
----------
in_dict : dict
Gives the patient information
Returns
-------
list
Contains the keys within the dictionary
"""
my_keys = list(in_dict.keys())
return my_keys
def validate_inputs(in_dict):
"""Validates the inputs of the incoming dictionary
Once it is known which keys are present within the incoming
dictionary, it is necessary to check if the key values are
of the correct type. To make sure that each key value is
correct, it iterates through the key list, extracts the value,
then will continue to the next key if the previous one
is correct. If a key does not contain the correct value
type, then the function will notify the user. Otherwise,
the function will return True.
Parameters
----------
in_dict : dict
Gives the patient information
Returns
-------
bool
True if successful
"""
keys_present = check_keys(in_dict)
for key in keys_present:
if key == "medical_record_number":
if type(in_dict[key]) == str:
in_dict[key] = int(in_dict[key])
continue
elif type(in_dict[key]) == int:
continue
else:
return "There was an unacceptable input, try again"
if key == "patient_name":
if type(in_dict[key]) == str:
continue
else:
return "There was an unacceptable input, try again"
if key == "medical_image":
if type(in_dict[key]) == str:
return "There was an unacceptable input, try again"
elif type(in_dict[key]) == int:
return "There was an unacceptable input, try again"
elif type(in_dict[key]) == tuple:
continue
elif type(in_dict[key] == list):
in_dict[key] = tuple(in_dict[key])
else:
return "There was an unacceptable input, try again"
if key == "heart_rate":
if type(in_dict[key]) == int:
continue
else:
return "There was an unacceptable input, try again"
if key == "ECG_image":
if type(in_dict[key]) == str:
return "There was an unacceptable input, try again"
elif type(in_dict[key]) == int:
return "There was an unacceptable input, try again"
elif type(in_dict[key]) == tuple:
continue
elif type(in_dict[key] == list):
in_dict[key] = tuple(in_dict[key])
else:
return "There was an unacceptable input, try again"
return True
@app.route("/add_new_patient", methods=["POST"])
def post_add_patient_to_db():
"""Posts patient information to the server
This method generates the new patient's
dictionary with all of his/her information, then validates
that all of the information is the correct type. If the
validation stage is satisfied, then the new patient's
dictionary is added to the database.
Parameters
----------
N/A
Returns
-------
String
result of adding a new patient
"""
in_dict = request.get_json()
var = validate_inputs(in_dict)
print(var)
if var is True:
try:
presence_check = Patient.objects.get({"_id":
in_dict
["medical_record_number"]})
except Patient.DoesNotExist:
presence_check = False
if presence_check is not False:
edit_existing_patient(in_dict)
return "Good post made to database", 200
else:
add_new_patient_to_db(in_dict)
return "Good new post made to database", 200
else:
return "Not an acceptable post, try again", 400
def patient_list():
"""Creates a list of all medical record numbers in the database
This method searches through the database to find all of the medical
record numbers present. Once it is able to find all of them, it
puts them all in a list, which is then returned.
Parameters
----------
N/A
Returns
-------
list
Contains all medical record numbers
"""
my_patient_list = list()
for patient in Patient.objects.raw({}):
my_patient_list.append(patient.mr_number)
return my_patient_list
@app.route("/patient_list", methods=["GET"])
def get_patient_list():
"""Gets all medical record numbers from the server
This method asks the server to return all of the
medical record numbers. Once the list is generated,
it returns the list as well as a status code.
Parameters
----------
N/A
Returns
-------
list
Contains all medical record numbers
"""
my_patient_list = patient_list()
print(my_patient_list)
return jsonify(my_patient_list), 200
def name_latest_hr_and_ECG_image(mr_num):
"""Gets name, heart rate, ECG image, and datetime of a patient
This method takes in a specific medical record number as input.
With this medical record number, the function searches through
the database to find the patient with that matching medical
record number. Once this patient is found, this function will
find the patient's name, latest heart rate/ECG image, and
the datetime at which this heart rate/ECG image was put
into the database. With this information, the function
generates a dictionary, which is returned.
Parameters
----------
mr_num: int or String
Contains the medical record number of a patient
Returns
-------
dict
Contains patient information
"""
mr_num = int(mr_num)
patient = Patient.objects.raw({"_id": mr_num}).first()
patient_name = patient.name
patient_heart_rates = patient.heart_rates
patient_ECG_images = patient.ECG_images
patient_datetimes = patient.datetimes
size_of_hr_list = len(patient_heart_rates)
size_of_patient_ECG_images = len(patient_ECG_images)
latest_hr = patient_heart_rates[size_of_hr_list-1]
latest_ECG_image = patient_ECG_images[size_of_patient_ECG_images-1]
latest_datetime = patient_datetimes[size_of_hr_list-1]
out_dict = {"name": patient_name,
"latest_hr": latest_hr,
"latest_ECG_image": latest_ECG_image,
"latest_datetime": latest_datetime}
return out_dict
@app.route("/name_hr_ecg/<mr_num>", methods=["GET"])
def get_name_latest_hr_and_ECG_image(mr_num):
"""Gets patient information from the server
This method takes in a medical record number as input
and uses it to search for the specified patient
within the database. Once this patient is found,
it returns a dictionary containing the patient's
name, latest heart rate/ECG image, and the datetime
of this. If it is unable to create this dictionary,
then it will inform the user.
Parameters
----------
mr_num: int
Contains the medical record number of a patient
Returns
-------
dict
Contains patient information
"""
contents = name_latest_hr_and_ECG_image(mr_num)
if contents:
return jsonify(contents), 200
else:
return "Unable to return the contents, try again", 400
def timestamps_list(mr_num):
"""Generates a list of timestamps for a given patient
This function takes in a medical record number as input.
With this medical record number, this function searches
through the database for that patient and generates
a list of timestamps for that patient. This list is then
returned.
Parameters
----------
mr_num : int
Contains the medical record number of a patient
Returns
-------
list
Contains all timestamps within the database for given patient
"""
mr_num = int(mr_num)
patient = Patient.objects.raw({"_id": mr_num}).first()
patient_timestamp_list = patient.datetimes
return patient_timestamp_list
def ECG_image_list(mr_num):
"""Generates a list of ECG images for a given patient
This function takes in a medical record number as input.
With this medical record number, this function searches
through the database for that patient and generates
a list of ECG images for that patient. This list is then
returned.
Parameters
----------
mr_num : int
Contains the medical record number of a patient
Returns
-------
list
Contains all ECG images within the database for given patient
"""
patient_list = list()
mr_num = int(mr_num)
patient = Patient.objects.raw({"_id": mr_num}).first()
patient_ECG_list = patient.ECG_images
for patient in patient_ECG_list:
patient = patient[0]
patient_list.append(patient)
return patient_list
@app.route("/ECG_timestamps/<mr_num>", methods=["GET"])
def get_timestamps_list(mr_num):
"""Gets a list of ECG images from the server
This function takes in a medical record number as input.
With this medical record number, this function asks
the server to generate a list of ECG images for the given
patient. Once this list is generated, the server returns
the list. If it is unable to generate the list, it will
notify the user.
Parameters
----------
mr_num : int
Contains the medical record number of a patient
Returns
-------
list
Contains all ECG images within the database for given patient
"""
timestamps = timestamps_list(mr_num)
images = ECG_image_list(mr_num)
contents = {"timestamps": timestamps,
"ECG_images": images}
if contents:
return jsonify(contents), 200
else:
return "Unable to retrieve list of timestamps", 400
def medical_image_list(mr_num):
"""Generates a list of medical images for a given patient
This function takes in a medical record number as input.
With this medical record number, this function searches
through the database for that patient and generates
a list of medical images for that patient. This list is then
returned.
Parameters
----------
mr_num : int
Contains the medical record number of a patient
Returns
-------
list
Contains all medical images within the database for given patient
"""
patient_list = list()
mr_num = int(mr_num)
patient = Patient.objects.raw({"_id": mr_num}).first()
patient_image_list = patient.medical_images
for patient in patient_image_list:
patient = patient[0]
patient_list.append(patient)
return patient_list
@app.route("/medical_images/<mr_num>", methods=["GET"])
def get_medical_image_list(mr_num):
"""Gets a list of medical images from the server
This function takes in a medical record number as input.
With this medical record number, this function asks
the server to generate a list of medical images for the given
patient. Once this list is generated, the server returns
the list. If it is unable to generate the list, it will
notify the user.
Parameters
----------
mr_num : int
Contains the medical record number of a patient
Returns
-------
list
Contains all medical images within the database for given patient
"""
contents = medical_image_list(mr_num)
if contents:
return jsonify(contents), 200
else:
return "Unable to retrieve list of medical images", 400
def validate_ECG_image_timestamp(in_dict):
"""Validates the inputs of the incoming dictionary
This function receives a dictionary as input. Within this
dictionary are a patient's medical record number as well
as a specific timestamp for that patient. This function
checks to ensure that the medical record number is an int
and checks to ensure that the timestamp is a string. If
these are not the case, it will inform the user. Otherwise,
it will return True
Parameters
----------
in_dict : dict
Gives the patient medical record number and timestamp
Returns
-------
bool
True if successful
"""
my_keys = list(in_dict.keys())
for key in my_keys:
if key == "patient":
if type(in_dict[key]) == int:
continue
else:
return "A valid patient id was not provided, try again"
if key == "timestamp":
if type(in_dict[key]) == str:
continue
else:
return "A valid timestamp was not provided, try again"
else:
return "The input dictionary has unusable information, try again"
return True
def ECG_image_timestamp(in_dict):
"""Finds an ECG image based on a specific patient timestamp
This function takes in a dictionary containing a patient
medical record number as well as a specific timestamp as
input. With this information, it searches through the database
to find that patient. Once the patient is found, it searches through
the list of timestamps for that patient and finds the specified one
within the input dictionary. Once this timestamp is found, its index
is determined. This index is then used to find the ECG image
that was inputted at this datetime. Once the image is found,
it is returned.
Parameters
----------
in_dict : dict
Gives the patient medical record number and timestamp
Returns
-------
tuple
Contains file name and its corresponding base64 string
"""
patient_id = in_dict["patient"]
patient_timestamp = in_dict["timestamp"]
patient = Patient.objects.raw({"_id": patient_id}).first()
patient_timestamps = patient.datetimes
patient_ECG_images = patient.ECG_images
index = patient_timestamps.index(patient_timestamp)
patient_ECG_output = patient_ECG_images[index]
return patient_ECG_output
@app.route("/ECG_image_timestamp", methods=["POST"])
def post_ECG_image_timestamp():
"""Gets an ECG image based on a specific patient timestamp from server
This function generates an ECG image based on a specific timestamp
for a specific patient. It gets a medical record number and uses it
to find a patient. Once the patient is found, it searches through
the list of timestamps for that patient and finds the specified one
within the input dictionary. Once this timestamp is found, its index
is determined. This index is then used to find the ECG image
that was inputted at this datetime. Once the image is found,
it is returned.
Parameters
----------
N/A
Returns
-------
tuple
Contains file name and its corresponding base64 string
"""
in_dict = request.get_json()
tester = validate_ECG_image_timestamp(in_dict)
print(tester)
if tester is True:
patient_ECG_output = ECG_image_timestamp(in_dict)
patient_ECG_output = tuple(patient_ECG_output)
return jsonify(patient_ECG_output), 200
else:
return "Not a valid input, try again", 400
def validate_medical_image_specific(in_dict):
"""Validates the inputs of the incoming dictionary
This function receives a dictionary as input. Within this
dictionary are a patient's medical record number as well
as a specific file name for that patient. This function
checks to ensure that the medical record number is an int
and checks to ensure that the file name is a string. If
these are not the case, it will inform the user. Otherwise,
it will return True
Parameters
----------
in_dict : dict
Gives the patient medical record number and file name
Returns
-------
bool
True if successful
"""
my_keys = list(in_dict.keys())
for key in my_keys:
if key == "patient":
if type(in_dict[key]) == int:
continue
else:
return "A valid patient id was not provided, try again"
if key == "file_name":
if type(in_dict[key]) == str:
continue
else:
return "A valid filename was not provided, try again"
else:
return "The input dictionary has unusable information, try again"
return True
def medical_image_filename(in_dict):
"""Generates a medical image based on a file name
This function receives a dictionary as input. Within this
dictionary are a patient's medical record number as well
as a file name for a medical image for that patient.
With this information, it searches through the database
to find that patient. Once the patient is found, it searches
through the list of medical images for that patient and finds
the specified one within the input dictionary.
Once this medical image is found, its index
is determined. This index is then used to find the medical image
that was inputted with this file name. Once the image is found,
it is returned.
Parameters
----------
in_dict : dict
Gives the patient medical record number and file name
Returns
-------
bool
True if successful
"""
patient_file_names = list()
patient_images = list()
patient_id = in_dict["patient"]
patient_filename = in_dict["file_name"]
patient = Patient.objects.raw({"_id": patient_id}).first()
patient_medical_images = patient.medical_images
for image in patient_medical_images:
patient_file_names.append(image[0])
patient_images.append(image[1])
index = patient_file_names.index(patient_filename)
patient_image = patient_medical_images[index]
return patient_image
@app.route("/get_medical_image", methods=["POST"])
def retrieve_medical_image():
"""Gets a medical image based on a file name
This function generates a medical image based on a specific file name
for a specific patient. It gets a medical record number and uses it
to find a patient. Once the patient is found, it searches through
the list of medical images for that patient and finds the specified one
within the input dictionary. Once this file name is found, its index
is determined. This index is then used to find the medical image
that was inputted at this datetime. Once the image is found,
it is returned.
Parameters
----------
N/A
Returns
-------
String
Contains name of the medical image
"""
in_dict = request.get_json()
var = validate_medical_image_specific(in_dict)
if var is True:
patient_image = medical_image_filename(in_dict)
return jsonify(patient_image), 200
else:
return "Unable to retrieve image", 400
if __name__ == '__main__':
__init__()
app.run()
# patient_list()
|
from collections import defaultdict
from abc import ABC, abstractmethod
import matplotlib.pylab as plot
import numpy as np
github_normal_header = 'https://github.com/'
github_raw_content_header = 'https://raw.githubusercontent.com/'
class ReportGen(ABC):
def __init__(self, max_number_of_classes, repo_location, github_branch):
self.max_number_of_classes = max_number_of_classes
self.dimensions = None
self.repo_location = repo_location
self.github_branch = github_branch
def get_categories_pair(self, my_dict):
"""
Function to get the get the dimensions values in the same order they are stored in the self.dimension variable
:param my_dict: Is a dictionary containing a projects dimensions as keys and it's values as the content
:return: Array containing the values of the different dimensions
"""
category_value = []
for category in self.dimensions:
category_value.append(my_dict[category])
return category_value
def generate_radarchart(self, project_name, portfolio_info):
"""
Function to generate the radarchart images, storing them into a default folder where all the radarchart will be stored
:param portfolio_info: PorfotolioData structure containing all necessary information for the generation of the radarchar
:param project_name: String containing the name of the project that you want to generate the radarchart for
"""
ax = plot.subplot(polar="True")
values = self.get_categories_pair(portfolio_info.get_analysis_projects_info()[project_name])
N = len(self.dimensions)
values += values[:1]
angles = [n / float(N) * 2 * np.pi for n in range(N)]
angles += angles[:1]
plot.polar(angles, values)
plot.fill(angles, values, alpha=0.3)
plot.xticks(angles[:-1], self.dimensions)
ax.set_rlabel_position(0)
ax.set_title([project_name])
plot.yticks([0, 1, 2, 3, 4, 5], color="grey", size=7)
plot.savefig('../data/reports/radarchart/' + project_name + '.jpg')
plot.close()
def cluster_issues_per_class(self, portfolio_info):
"""
This function will cluster the issues per class which are stored in the object portfolio_info
:param portfolio_info: PorfotolioData structure containing all necessary information for the generation of the radarchar
:return:
"""
issues = portfolio_info.get_arch_issues()
D = defaultdict(dict)
D_with_rules = defaultdict(dict)
for obj in portfolio_info.get_arch_issues():
D[issues[obj]['component']] = defaultdict(int)
D_with_rules[issues[obj]['component']] = defaultdict(int)
for obj in issues:
D[issues[obj]["component"]]['project'] = issues[obj]['project']
D[issues[obj]["component"]]['component'] = issues[obj]["component"]
D[issues[obj]["component"]]['issue_sum'] = 0
D_with_rules[issues[obj]["component"]][issues[obj]['rule']] += 1
for dimension in self.dimensions:
D[issues[obj]["component"]][dimension] = 0
for k, v in D_with_rules.items():
for k1, v1 in v.items():
for dimension in self.dimensions:
if k1 in self.dimensions[dimension]:
D[k][dimension] += v1
D[k]['issue_sum'] += v1
return D
def sort_by_max_sums_per_project(self, class_ATD_values):
"""
Function that sorts the issue sum of the different projects.
:param class_ATD_values: It should be a Dataframe containing all the cluster issues
:return: It returns a Pandas Dataframe which is sorted by the issue_sum and grouped by the Project
"""
# remove classes with less no violations
class_ATD_values = class_ATD_values[class_ATD_values['issue_sum'] > 0]
# reduce to max_number_of_classes per project (files with higher value)
class_ATD_values = class_ATD_values.sort_values(['project', 'issue_sum'], ascending=False).groupby(
'project').head(self.max_number_of_classes)
# get the name of the class
class_ATD_values['class'] = class_ATD_values['component'].str.split('/').str[-1]
class_ATD_values = self.capitalize_table(class_ATD_values)
class_ATD_values = class_ATD_values.groupby('Project')
return class_ATD_values
def capitalize_table(self, class_ATD_values):
"""
Capitalize a table
:param class_ATD_values:
:return: Capitalized column names
"""
list_of_dimensions = ['class']
for dimension in self.dimensions:
list_of_dimensions.append(dimension)
list_of_dimensions.append('project')
class_ATD_values = class_ATD_values[list_of_dimensions]
# rename columns to prepare for the markdown export
list_of_capitalize_dimensions = []
for element in list_of_dimensions:
list_of_capitalize_dimensions.append(element.capitalize())
dict_of_rename = {}
for capitalize, non_capitalize in zip(list_of_capitalize_dimensions, list_of_dimensions):
dict_of_rename[non_capitalize] = capitalize
dict_of_rename['class'] = 'Class name'
dict_of_rename['component'] = 'Fully qualified class name'
dict_of_rename['issue_sum'] = 'Total issues'
class_ATD_values = class_ATD_values.rename(columns=dict_of_rename,
inplace=False)
return class_ATD_values
@abstractmethod
def generate_report(self, project, portfolio_info):
"""
This function Generates the report and stores it in a default location
:param portfolio_info: PortfolioData object containing the already analysed data
:param project: Name of the project to make a report for
"""
pass
@abstractmethod
def get_table_for_project(self, project):
"""
Sets a table for the report that suits the max_number_of_classes
:param project: Name of the project we want the table for
:return: the corresponding format for the specific ReportGen
"""
pass
@abstractmethod
def get_body_comment(self, atdx_value, project_name):
pass
@staticmethod
@abstractmethod
def get_git_command(name, number):
pass
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Learning.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1046, 507)
self.resultSave_Button = QtWidgets.QPushButton(Dialog)
self.resultSave_Button.setEnabled(False)
self.resultSave_Button.setGeometry(QtCore.QRect(330, 460, 150, 40))
self.resultSave_Button.setCheckable(False)
self.resultSave_Button.setObjectName("resultSave_Button")
self.pause_Button = QtWidgets.QPushButton(Dialog)
self.pause_Button.setEnabled(False)
self.pause_Button.setGeometry(QtCore.QRect(170, 460, 150, 40))
self.pause_Button.setObjectName("pause_Button")
self.Status_GroupBox = QtWidgets.QGroupBox(Dialog)
self.Status_GroupBox.setGeometry(QtCore.QRect(10, 10, 1021, 441))
self.Status_GroupBox.setObjectName("Status_GroupBox")
self.displayMode_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.displayMode_Label.setGeometry(QtCore.QRect(890, 100, 81, 16))
self.displayMode_Label.setObjectName("displayMode_Label")
self.displayMode_ComboBox = QtWidgets.QComboBox(self.Status_GroupBox)
self.displayMode_ComboBox.setGeometry(QtCore.QRect(890, 120, 121, 22))
self.displayMode_ComboBox.setObjectName("displayMode_ComboBox")
self.displayMode_ComboBox.addItem("")
self.displayMode_ComboBox.addItem("")
self.displayMode_ComboBox.addItem("")
self.yAxis_label = QtWidgets.QLabel(self.Status_GroupBox)
self.yAxis_label.setGeometry(QtCore.QRect(890, 150, 47, 13))
self.yAxis_label.setObjectName("yAxis_label")
self.label_3 = QtWidgets.QLabel(self.Status_GroupBox)
self.label_3.setGeometry(QtCore.QRect(940, 170, 21, 16))
self.label_3.setObjectName("label_3")
self.yAxisMin_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.yAxisMin_LineEdit.setGeometry(QtCore.QRect(890, 170, 41, 20))
self.yAxisMin_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.yAxisMin_LineEdit.setObjectName("yAxisMin_LineEdit")
self.yAxisMax_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.yAxisMax_LineEdit.setGeometry(QtCore.QRect(960, 170, 41, 20))
self.yAxisMax_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.yAxisMax_LineEdit.setObjectName("yAxisMax_LineEdit")
self.cycle_CheckBox = QtWidgets.QCheckBox(self.Status_GroupBox)
self.cycle_CheckBox.setGeometry(QtCore.QRect(890, 200, 101, 17))
self.cycle_CheckBox.setObjectName("cycle_CheckBox")
self.result_Display_Button = QtWidgets.QPushButton(self.Status_GroupBox)
self.result_Display_Button.setGeometry(QtCore.QRect(942, 217, 70, 23))
self.result_Display_Button.setObjectName("result_Display_Button")
self.totalEpoch_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.totalEpoch_Label.setGeometry(QtCore.QRect(890, 290, 120, 13))
self.totalEpoch_Label.setObjectName("totalEpoch_Label")
self.currentLearningSetup_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.currentLearningSetup_Label.setGeometry(QtCore.QRect(890, 340, 120, 13))
self.currentLearningSetup_Label.setObjectName("currentLearningSetup_Label")
self.currentEpoch_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.currentEpoch_Label.setGeometry(QtCore.QRect(890, 390, 120, 13))
self.currentEpoch_Label.setObjectName("currentEpoch_Label")
self.currentEpoch_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.currentEpoch_LineEdit.setGeometry(QtCore.QRect(890, 410, 121, 20))
self.currentEpoch_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.currentEpoch_LineEdit.setReadOnly(True)
self.currentEpoch_LineEdit.setObjectName("currentEpoch_LineEdit")
self.totalEpoch_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.totalEpoch_LineEdit.setGeometry(QtCore.QRect(890, 310, 121, 20))
self.totalEpoch_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.totalEpoch_LineEdit.setReadOnly(True)
self.totalEpoch_LineEdit.setObjectName("totalEpoch_LineEdit")
self.currentLearningSetup_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.currentLearningSetup_LineEdit.setGeometry(QtCore.QRect(890, 360, 121, 20))
self.currentLearningSetup_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.currentLearningSetup_LineEdit.setReadOnly(True)
self.currentLearningSetup_LineEdit.setObjectName("currentLearningSetup_LineEdit")
self.graph_Widget = QtWidgets.QWidget(self.Status_GroupBox)
self.graph_Widget.setGeometry(QtCore.QRect(10, 20, 871, 411))
self.graph_Widget.setObjectName("graph_Widget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.graph_Widget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 431, 411))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.weightGraphLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.weightGraphLayout.setContentsMargins(0, 0, 0, 0)
self.weightGraphLayout.setObjectName("weightGraphLayout")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.graph_Widget)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(440, 0, 421, 411))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.progressGraphLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.progressGraphLayout.setContentsMargins(0, 0, 0, 0)
self.progressGraphLayout.setObjectName("progressGraphLayout")
self.macro_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.macro_Label.setGeometry(QtCore.QRect(890, 240, 120, 13))
self.macro_Label.setObjectName("macro_Label")
self.macro_LineEdit = QtWidgets.QLineEdit(self.Status_GroupBox)
self.macro_LineEdit.setGeometry(QtCore.QRect(890, 260, 121, 20))
self.macro_LineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.macro_LineEdit.setReadOnly(True)
self.macro_LineEdit.setObjectName("macro_LineEdit")
self.displayWeight_Label = QtWidgets.QLabel(self.Status_GroupBox)
self.displayWeight_Label.setGeometry(QtCore.QRect(890, 20, 81, 16))
self.displayWeight_Label.setObjectName("displayWeight_Label")
self.weightName_ComboBox = QtWidgets.QComboBox(self.Status_GroupBox)
self.weightName_ComboBox.setGeometry(QtCore.QRect(890, 40, 121, 22))
self.weightName_ComboBox.setObjectName("weightName_ComboBox")
self.weight_Display_Button = QtWidgets.QPushButton(self.Status_GroupBox)
self.weight_Display_Button.setGeometry(QtCore.QRect(940, 70, 70, 23))
self.weight_Display_Button.setObjectName("weight_Display_Button")
self.start_Button = QtWidgets.QPushButton(Dialog)
self.start_Button.setGeometry(QtCore.QRect(10, 460, 150, 40))
self.start_Button.setObjectName("start_Button")
self.exit_Button = QtWidgets.QPushButton(Dialog)
self.exit_Button.setGeometry(QtCore.QRect(880, 460, 150, 40))
self.exit_Button.setObjectName("exit_Button")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.resultSave_Button.setText(_translate("Dialog", "Result Save"))
self.pause_Button.setText(_translate("Dialog", "Pause"))
self.Status_GroupBox.setTitle(_translate("Dialog", "Status"))
self.displayMode_Label.setText(_translate("Dialog", "Progress Display"))
self.displayMode_ComboBox.setItemText(0, _translate("Dialog", "Mean Squared Error"))
self.displayMode_ComboBox.setItemText(1, _translate("Dialog", "Cross Entropy"))
self.displayMode_ComboBox.setItemText(2, _translate("Dialog", "Semantic Stress"))
self.yAxis_label.setText(_translate("Dialog", "Y-Axis"))
self.label_3.setText(_translate("Dialog", "~"))
self.yAxisMin_LineEdit.setText(_translate("Dialog", "-0.01"))
self.yAxisMax_LineEdit.setText(_translate("Dialog", "1.01"))
self.cycle_CheckBox.setText(_translate("Dialog", "Use Cycle"))
self.result_Display_Button.setText(_translate("Dialog", "Display"))
self.totalEpoch_Label.setText(_translate("Dialog", "Total Epoch"))
self.currentLearningSetup_Label.setText(_translate("Dialog", "Current Learning Setup"))
self.currentEpoch_Label.setText(_translate("Dialog", "Current Epoch"))
self.currentEpoch_LineEdit.setText(_translate("Dialog", "-"))
self.totalEpoch_LineEdit.setText(_translate("Dialog", "-"))
self.currentLearningSetup_LineEdit.setText(_translate("Dialog", "-"))
self.macro_Label.setText(_translate("Dialog", "Macro Status"))
self.macro_LineEdit.setText(_translate("Dialog", "-"))
self.displayWeight_Label.setText(_translate("Dialog", "Weight Display"))
self.weight_Display_Button.setText(_translate("Dialog", "Display"))
self.start_Button.setText(_translate("Dialog", "Start"))
self.exit_Button.setText(_translate("Dialog", "Exit"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
<reponame>skkapoor/MiningSubjectiveSubgraphPatterns<filename>src/BackgroundDistributions/MaxEntMulti2.py
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
import numpy as np
import math
import networkx as nx
import os
import sys
path = os.getcwd().split('MiningSubjectiveSubgraphPatterns')[0]+'MiningSubjectiveSubgraphPatterns/'
if path not in sys.path:
sys.path.append(path)
from src.BackgroundDistributions.PDClass import PDClass
###################################################################################################################################################################
class MaxEntMulti2U(PDClass):
"""
Background distribution for multigraphs if type of prior belief is 'm' and type of graph is 'undirected'
Parameters
----------
PDClass : src.BackgroundDistributions.PDClass
base class
"""
def __init__(self, G = None):
super().__init__(G)
self.la = None
self.mu = None
self.degreeNeighbor = None
self.degrees = None
self.Neighbors = None
self.jrows = None
self.errors = None
self.ps_la = None
self.ps_mu = None
self.gla = None
self.lprevUpdate = dict()
if G is not None:
self.findMaxEntDistribution()
###################################################################################################################################################################
def findMaxEntDistribution(self):
self.degrees = np.array(list(dict(sorted(dict(self.G.degree()).items())).values()))
self.Neighbors = []
for i in range(self.G.number_of_nodes()):
self.Neighbors.append(len(list(self.G.neighbors(i))))
self.Neighbors = np.array(self.Neighbors)
self.degreeNeighbor = []
for i in range(len(self.degrees)):
self.degreeNeighbor.append(tuple([self.degrees[i], self.Neighbors[i]]))
self.degreeNeighbor = np.array(self.degreeNeighbor)
##############################
prows = self.degreeNeighbor
prowsunique,irows,self.jrows,vrows = np.unique(prows, axis=0, return_index=True, return_inverse=True, return_counts=True)
nunique = len(prowsunique)
self.la = -np.ones(nunique)
self.mu = -np.ones(nunique)
h = np.zeros(nunique)
nit = 1000
tol = 1e-14
self.errors = np.empty(0)
##############################
lb = -5
for k in range(nit):
R = np.multiply(np.outer(np.ones(nunique).T, np.exp(self.la/2)),np.outer(np.exp(self.la/2), np.ones(nunique).T))
S = np.multiply(np.outer(np.ones(nunique).T, np.exp(self.mu/2)),np.outer(np.exp(self.mu/2), np.ones(nunique).T))
ps_la = np.divide(np.multiply(R,S), np.multiply(1-R, 1-np.multiply(R,1-S)))
ps_mu = np.divide(np.multiply(R,S), 1-np.multiply(R,1-S))
gla_la = np.multiply(-prowsunique[:,0]+np.dot(ps_la, vrows)-np.diag(ps_la), vrows)
gla_mu = np.multiply(-prowsunique[:,1]+np.dot(ps_mu, vrows)-np.diag(ps_mu), vrows)
self.gla = np.append(gla_la, gla_mu)
self.errors = np.append(self.errors, np.linalg.norm(self.gla))
H1_u1 = np.dot(np.dot(np.diag(vrows), np.divide(np.multiply(np.multiply(R,S), 1 - np.multiply(np.square(R), 1-S)), np.square(np.multiply(1-R, 1-np.multiply(R,1-S))))), np.diag(vrows))
H1_u2 = np.diag(np.sum(H1_u1, 0)) - np.diag(np.divide(np.diag(H1_u1), vrows))
H1 = H1_u1 + H1_u2
H2_u1 = np.dot(np.dot(np.diag(vrows), np.divide(np.multiply(np.multiply(R,S), 1 - R), np.square(1-np.multiply(R,1-S)))), np.diag(vrows))
H2_u2 = np.diag(np.sum(H2_u1, 0)) - np.diag(np.divide(np.diag(H2_u1), vrows))
H2 = H2_u1 + H2_u2
H3_u1 = np.dot(np.dot(np.diag(vrows), np.divide(np.multiply(R,S), np.square(1-np.multiply(R,1-S)))), np.diag(vrows))
H3_u2 = np.diag(np.sum(H3_u1, 0)) - np.diag(np.divide(np.diag(H3_u1), vrows))
H3 = H3_u1 + H3_u2
H = 0.5 * np.append(np.append(H1, H3, 1), np.append(H3, H2, 1), 0)
delta = np.linalg.lstsq(- H, self.gla, rcond=max(H.shape)*np.finfo(H.dtype).eps)[0]
delta_la = delta[0:nunique]
delta_mu = delta[nunique:nunique+nunique+1]
fbest = 0;
errorbest = self.errors[k];
for f in np.logspace(lb,1,20):
latry=self.la+f*delta_la
mutry=self.mu+f*delta_mu
Rtry = np.multiply(np.outer(np.ones(nunique).T, np.exp(latry/2)),np.outer(np.exp(latry/2), np.ones(nunique).T))
Stry = np.multiply(np.outer(np.ones(nunique).T, np.exp(mutry/2)),np.outer(np.exp(mutry/2), np.ones(nunique).T))
ps_latry = np.divide(np.multiply(Rtry,Stry), np.multiply(1-Rtry, 1-np.multiply(Rtry,1-Stry)))
ps_mutry = np.divide(np.multiply(Rtry,Stry), 1-np.multiply(Rtry,1-Stry))
gla_latry = np.multiply(-prowsunique[:,0]+np.dot(ps_latry, vrows)-np.diag(ps_latry), vrows)
gla_mutry = np.multiply(-prowsunique[:,1]+np.dot(ps_mutry, vrows)-np.diag(ps_mutry), vrows)
glatry = np.append(gla_latry, gla_mutry)
errortry = np.linalg.norm(glatry)
if errortry < errorbest:
fbest = f
errorbest = errortry
if fbest == 0:
if lb>-1000:
lb = lb*2
else:
break
self.la = self.la+fbest*delta_la
self.mu = self.mu+fbest*delta_mu
if self.errors[k] < tol:
break
R = np.multiply(np.outer(np.ones(nunique).T, np.exp(self.la/2)),np.outer(np.exp(self.la/2), np.ones(nunique).T))
S = np.multiply(np.outer(np.ones(nunique).T, np.exp(self.mu/2)),np.outer(np.exp(self.mu/2), np.ones(nunique).T))
self.ps_la = np.divide(np.multiply(R,S), np.multiply(1-R, 1-np.multiply(R,1-S)))
self.ps_mu = np.divide(np.multiply(R,S), 1-np.multiply(R,1-S))
gla_la = np.multiply(-prowsunique[:,0]+np.dot(self.ps_la, vrows)-np.diag(self.ps_la), vrows)
gla_mu = np.multiply(-prowsunique[:,1]+np.dot(self.ps_mu, vrows)-np.diag(self.ps_mu), vrows)
self.gla = np.append(gla_la, gla_mu)
self.errors = np.append(self.errors, np.linalg.norm(self.gla))
###################################################################################################################################################################
def explambda(self, i, j): #This is indeed explambdaR
R = math.exp(self.la[self.jrows[i]]/2)*math.exp(self.la[self.jrows[j]]/2)
return R
###################################################################################################################################################################
def explambdaS(self, i, j):
S = math.exp(self.mu[self.jrows[i]]/2)*math.exp(self.mu[self.jrows[j]]/2)
return S
###################################################################################################################################################################
def returnExpectation(self, R, S):
E = R*S/ ((1-R)*(1-R*(1-S)))
return E
###################################################################################################################################################################
def getExpectation(self, i, j, **kwargs):
kwargs['isSimple'] = False
R = self.getPOS(i, j, **kwargs)
S = self.explambdaS(i, j)
E = self.returnExpectation(R, S)
return E
###################################################################################################################################################################
def updateDistribution(self, pat, idx=None, val_return='save'): #lprevUpdate = list() Each item is a tuple (a, b); a = lambda; b = listofnodes()
numNodes = pat.number_of_nodes()
numEdges = pat.number_of_edges()
nodes = sorted(list(pat.nodes()))
mSmallestLambda = np.min(self.la)
mLargestLambda = np.max(self.la)
epsilon = 1e-7
if math.fabs(mSmallestLambda) > math.fabs(mLargestLambda):
a = epsilon
b = 4*math.fabs(mSmallestLambda)
else:
a = epsilon
b = 4*math.fabs(mLargestLambda)
expLambdaR = [None]*numNodes
expLambdaS = [None]*numNodes
for i in range(numNodes):
expLambdaR[i] = [0.0]*numNodes
expLambdaS[i] = [0.0]*numNodes
for i in range(numNodes):
for j in range(i+1, numNodes):
expLambdaR[i][j] = self.explambdaIncLprev(nodes[i], nodes[j])
expLambdaS[i][j] = self.explambdaS(nodes[i], nodes[j])
if math.fabs(b) > math.fabs(math.log(expLambdaR[i][j])):
b = math.fabs(math.log(expLambdaR[i][j]))
b = b - epsilon
while b-a > 1e-11:
f_a = 0.0
f_b = 0.0
f_c = 0.0
c = (a+b)/2
for i in range(numNodes):
for j in range(i+1, numNodes):
try:
v_aR=expLambdaR[i][j]*math.exp(a)
v_bR=expLambdaR[i][j]*math.exp(b)
v_cR=expLambdaR[i][j]*math.exp(c)
f_a+=self.returnExpectation(v_aR, expLambdaS[i][j])
f_b+=self.returnExpectation(v_bR, expLambdaS[i][j])
f_c+=self.returnExpectation(v_cR, expLambdaS[i][j])
except OverflowError as error:
print(error,a,b)
f_a=f_a-numEdges
f_b=f_b-numEdges
f_c=f_c-numEdges
print('f_a:', f_a, '\t at a:', a)
print('f_c:', f_c, '\t at c:', c)
print('f_b:', f_b, '\t at b:', b,'\n')
if f_c < 0:
a = c
else:
b = c
lambdac = round((a + b) / 2, 10)
if 'save' in val_return:
self.lprevUpdate[idx] = tuple([lambdac, nodes, numEdges])
f_c = 0.0
for i in range(numNodes):
for j in range(i+1, numNodes):
v_cR=expLambdaR[i][j]*math.exp(lambdac)
f_c+=self.returnExpectation(v_cR, expLambdaS[i][j])
f_c = f_c-numEdges
# print('Final lamdba: ',lambdac, f_c, numEdges)
return lambdac
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
class MaxEntMulti2D(PDClass):
"""
Background distribution for multigraphs if type of prior belief is 'm' and type of graph is 'directed'
Parameters
----------
PDClass : src.BackgroundDistributions.PDClass
base class
"""
def __init__(self, G = None):
super().__init__(G)
self.tp = 'D'
self.la_r = None
self.la_c = None
self.mu_r = None
self.mu_c = None
self.jrows = None
self.jcols = None
self.errors = None
self.ps_la = None
self.ps_mu = None
self.gla = None
self.lprevUpdate = dict()
self.indegrees = None
self.outdegrees = None
self.predcount = None
self.succount = None
self.inpred = None
self.outsucc = None
if G is not None:
self.findMaxEntDistribution()
###################################################################################################################################################################
def findMaxEntDistribution(self):
self.indegrees = np.array(list(dict(sorted(dict(self.G.in_degree()).items())).values()))
self.outdegrees = np.array(list(dict(sorted(dict(self.G.out_degree()).items())).values()))
fac = math.log(nx.density(self.G)/(1+nx.density(self.G)))
self.predcount = []
for i in range(self.G.number_of_nodes()):
self.predcount.append(len(list(self.G.predecessors(i))))
self.predcount = np.array(self.predcount)
self.succount = []
for i in range(self.G.number_of_nodes()):
self.succount.append(len(list(self.G.successors(i))))
self.succount = np.array(self.succount)
self.inpred = []
for i in range(len(self.indegrees)):
self.inpred.append(tuple([self.indegrees[i], self.predcount[i]]))
self.inpred = np.array(self.inpred)
self.outsucc = []
for i in range(len(self.outdegrees)):
self.outsucc.append(tuple([self.outdegrees[i], self.succount[i]]))
self.outsucc = np.array(self.outsucc)
n = len(self.indegrees)
m = len(self.outdegrees)
prows = self.outsucc
prowsunique, irows, self.jrows, vrows = np.unique(prows, axis=0, return_index=True, return_inverse=True, return_counts=True)
rownunique = len(prowsunique)
self.la_r = -math.fabs(fac)*np.ones(rownunique)
self.mu_r = np.zeros(rownunique)
rowh = np.zeros(rownunique)
pcols = self.inpred
pcolsunique, icols, self.jcols, vcols = np.unique(pcols, axis=0, return_index=True, return_inverse=True, return_counts=True)
colnunique = len(pcolsunique)
self.la_c = -math.fabs(fac)*np.ones(colnunique)
self.mu_c = np.zeros(colnunique)
colh = np.zeros(colnunique)
loops = np.outer(np.zeros(rownunique), np.zeros(colnunique).T)
for i in range(rownunique):
for j in range(colnunique):
loops[i][j] = len(set(np.where(self.jrows==i)[0]).intersection(set(np.where(self.jcols==j)[0])))
finalmat = np.outer(vrows, vcols) - loops
nit = 1000
tol = 1e-14
self.errors = np.empty(0)
lb = -5
for k in range(nit):
R = np.multiply(np.outer(np.exp(self.la_r/2), np.ones(colnunique).T),np.outer(np.ones(rownunique).T, np.exp(self.la_c/2)))
S = np.multiply(np.outer(np.exp(self.mu_r/2), np.ones(colnunique).T),np.outer(np.ones(rownunique).T, np.exp(self.mu_c/2)))
self.ps_la = np.divide(np.multiply(R,S), np.multiply(1-R, 1-np.multiply(R,1-S)))
self.ps_mu = np.divide(np.multiply(R,S), 1-np.multiply(R,1-S))
gla_t_la = np.multiply(self.ps_la, finalmat)
gla_r_la = np.sum(gla_t_la, 1) - np.multiply(prowsunique[:,0], vrows)
gla_c_la = np.sum(gla_t_la, 0) - np.multiply(pcolsunique[:,0], vcols)
gla_t_mu = np.multiply(self.ps_mu, finalmat)
gla_r_mu = np.sum(gla_t_mu, 1) - np.multiply(prowsunique[:,1], vrows)
gla_c_mu = np.sum(gla_t_mu, 0) - np.multiply(pcolsunique[:,1], vcols)
self.gla = np.append(np.append(gla_r_la, gla_c_la), np.append(gla_r_mu, gla_c_mu))
self.errors = np.append(self.errors, np.linalg.norm(self.gla))
H1_u = np.divide(np.multiply(np.multiply(R,S), 1 - np.multiply(np.square(R), 1-S)), np.square(np.multiply(1-R, 1-np.multiply(R,1-S))))
H2_u = np.divide(np.multiply(np.multiply(R,S), 1 - R), np.square(1-np.multiply(R,1-S)))
H3_u = np.divide(np.multiply(R,S), np.square(1-np.multiply(R,1-S)))
H1_t = np.multiply(H1_u, finalmat)
H2_t = np.multiply(H2_u, finalmat)
H3_t = np.multiply(H3_u, finalmat)
H1 = np.diag(np.sum(H1_t, 1))
H2 = np.diag(np.sum(H1_t, 0))
H3 = np.diag(np.sum(H2_t, 1))
H4 = np.diag(np.sum(H2_t, 0))
H5 = H1_t
H6 = np.diag(np.sum(H3_u, 1))
H7 = H3_u
H8 = H7.T
H9 = np.diag(np.sum(H3_u, 0))
H10 = H2_u
R1 = np.append(np.append(H1, H5, 1), np.append(H6, H7, 1), 1)
R2 = np.append(np.append(H5.T, H2, 1), np.append(H8, H9, 1), 1)
R3 = np.append(np.append(H6.T, H8.T, 1), np.append(H3, H10, 1), 1)
R4 = np.append(np.append(H7.T, H9.T, 1), np.append(H10.T, H4, 1), 1)
H = np.append(np.append(R1, R2, 0), np.append(R3, R4, 0), 0)
delta = np.linalg.lstsq(- H, self.gla, rcond=max(H.shape)*np.finfo(H.dtype).eps)[0]
deltala_r = delta[0:rownunique]
deltala_c = delta[rownunique:rownunique+colnunique]
deltamu_r = delta[rownunique+colnunique:2*rownunique+colnunique]
deltamu_c = delta[2*rownunique+colnunique:2*rownunique+2*colnunique]
fbest = 0;
errorbest = self.errors[k];
for f in np.logspace(lb,1,20):
la_rtry=self.la_r+f*deltala_r
la_ctry=self.la_c+f*deltala_c
mu_rtry=self.mu_r+f*deltamu_r
mu_ctry=self.mu_c+f*deltamu_c
flag = True
for ind1 in range(len(la_rtry)):
for ind2 in range(len(la_ctry)):
if la_rtry[ind1]+la_ctry[ind2]>-1e-15 and finalmat[ind1][ind2]>0.0001:
flag = False
if flag:
Rtry = np.multiply(np.outer(np.exp(la_rtry/2), np.ones(colnunique).T),np.outer(np.ones(rownunique).T, np.exp(la_ctry/2)))
Stry = np.multiply(np.outer(np.exp(mu_rtry/2), np.ones(colnunique).T),np.outer(np.ones(rownunique).T, np.exp(mu_ctry/2)))
ps_latry = np.divide(np.multiply(Rtry,Stry), np.multiply(1-Rtry, 1-np.multiply(Rtry,1-Stry)))
ps_mutry = np.divide(np.multiply(Rtry,Stry), 1-np.multiply(Rtry,1-Stry))
gla_t_latry = np.multiply(ps_latry, finalmat)
gla_r_latry = np.sum(gla_t_latry, 1) - np.multiply(prowsunique[:,0], vrows)
gla_c_latry = np.sum(gla_t_latry, 0) - np.multiply(pcolsunique[:,0], vcols)
gla_t_mutry = np.multiply(ps_mutry, finalmat)
gla_r_mutry = np.sum(gla_t_mutry, 1) - np.multiply(prowsunique[:,1], vrows)
gla_c_mutry = np.sum(gla_t_mutry, 0) - np.multiply(pcolsunique[:,1], vcols)
glatry = np.append(np.append(gla_r_latry, gla_c_latry), np.append(gla_r_mutry, gla_c_mutry))
errortry = np.linalg.norm(glatry)
if errortry < errorbest:
fbest = f
errorbest = errortry
if fbest == 0:
if lb>-1000:
lb = lb*2
else:
break
self.la_r = self.la_r+fbest*deltala_r;
self.la_c = self.la_c+fbest*deltala_c;
self.mu_r = self.mu_r+fbest*deltamu_r;
self.mu_c = self.mu_c+fbest*deltamu_c;
if self.errors[k]/n < tol:
break
R = np.multiply(np.outer(np.exp(self.la_r/2), np.ones(colnunique).T),np.outer(np.ones(rownunique).T, np.exp(self.la_c/2)))
S = np.multiply(np.outer(np.exp(self.mu_r/2), np.ones(colnunique).T),np.outer(np.ones(rownunique).T, np.exp(self.mu_c/2)))
self.ps_la = np.divide(np.multiply(R,S), np.multiply(1-R, 1-np.multiply(R,1-S)))
self.ps_mu = np.divide(np.multiply(R,S), 1-np.multiply(R,1-S))
gla_t_la = np.multiply(self.ps_la, finalmat)
gla_r_la = np.sum(gla_t_la, 1) - np.multiply(prowsunique[:,0], vrows)
gla_c_la = np.sum(gla_t_la, 0) - np.multiply(pcolsunique[:,0], vcols)
gla_t_mu = np.multiply(self.ps_mu, finalmat)
gla_r_mu = np.sum(gla_t_mu, 1) - np.multiply(prowsunique[:,1], vrows)
gla_c_mu = np.sum(gla_t_mu, 0) - np.multiply(pcolsunique[:,1], vcols)
self.gla = np.append(np.append(gla_r_la, gla_c_la), np.append(gla_r_mu, gla_c_mu))
self.errors = np.append(self.errors, np.linalg.norm(self.gla))
###################################################################################################################################################################
def explambda(self, i, j): #This is indeed explambdaR
if i==j:
return 0
R = math.exp(self.la_r[self.jrows[i]]/2)*math.exp(self.la_c[self.jcols[j]]/2)
return R
###################################################################################################################################################################
def explambdaIncLprevS(self, i, j):
if i==j:
return 0
S = math.exp(self.mu_r[self.jrows[i]]/2)*math.exp(self.mu_c[self.jcols[j]]/2)
return S
###################################################################################################################################################################
def returnExpectation(self, R, S):
E = R*S/ ((1-R)*(1-R*(1-S)))
return E
###################################################################################################################################################################
def getExpectation(self, i, j, **kwargs):
kwargs['isSimple'] = False
R = self.getPOS(i, j, **kwargs)
S = self.explambdaS(i, j)
E = self.returnExpectation(R, S)
return E
###################################################################################################################################################################
def updateBackground(self, pat, idx=None, val_return='save'):#(self, pat, idx): #lprevUpdate = list() Each item is a tuple (a, b); a = lambda; b = listofnodes()
mSmallestLambda = np.min(np.array(list(set(self.la_r).union(set(self.la_c)))))
mLargestLambda = np.max(np.array(list(set(self.la_r).union(set(self.la_c)))))
epsilon = 1e-7
if math.fabs(mSmallestLambda) > math.fabs(mLargestLambda):
a = epsilon
b = 4*math.fabs(mSmallestLambda)
else:
a = epsilon
b = 4*math.fabs(mLargestLambda)
inL = dict(pat.in_degree())
outL = dict(pat.out_degree())
inNL = []
outNL = []
for k,v in inL.items():
if v!=0:
inNL.append(k)
for k,v in outL.items():
if v!=0:
outNL.append(k)
numInNodes = len(inNL)
numOutNodes = len(outNL)
numEdges = pat.number_of_edges()
expLambdaR = [None]*numOutNodes
expLambdaS = [None]*numOutNodes
for i in range(numOutNodes):
expLambdaR[i] = [0.0]*numInNodes
expLambdaS[i] = [0.0]*numInNodes
for i in range(numOutNodes):
for j in range(numInNodes):
expLambdaS[i][j] = self.explambdaS(outNL[i], inNL[j])
expLambdaR[i][j] = self.explambdaIncLprev(outNL[i], inNL[j])
if outNL[i]!=inNL[j]:
if expLambdaR[i][j]>0.0 and math.fabs(b) > math.fabs(math.log(expLambdaR[i][j])):
b = math.fabs(math.log(expLambdaR[i][j]))
else:
expLambdaR[i][j] = 0
b = b - epsilon
while b-a > 1e-15:
f_a = 0.0
f_b = 0.0
f_c = 0.0
c = (a+b)/2
for i in range(numOutNodes):
for j in range(numInNodes):
try:
v_aR=expLambdaR[i][j]*math.exp(a)
v_bR=expLambdaR[i][j]*math.exp(b)
v_cR=expLambdaR[i][j]*math.exp(c)
f_a+=self.returnExpectation(v_aR, expLambdaS[i][j])
f_b+=self.returnExpectation(v_bR, expLambdaS[i][j])
f_c+=self.returnExpectation(v_cR, expLambdaS[i][j])
except OverflowError as error:
print(error,a,b)
f_a=f_a-numEdges
f_b=f_b-numEdges
f_c=f_c-numEdges
print('f_a:', f_a, '\t at a:', a)
print('f_c:', f_c, '\t at c:', c)
print('f_b:', f_b, '\t at b:', b,'\n')
if f_c < 0:
a = c
else:
b = c
lambdac = round((a + b) / 2, 10)
if 'save' in val_return:
self.lprevUpdate[idx] = tuple([lambdac, inNL, outNL, numEdges])
f_c = 0.0
for i in range(numOutNodes):
for j in range(numInNodes):
v_cR=expLambdaR[i][j]*math.exp(lambdac)
f_c+=self.returnExpectation(v_cR, expLambdaS[i][j])
f_c = f_c-numEdges
# print('Final lamdba: ',lambdac, f_c, numEdges)
return lambdac
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
################################################################################################################################################################### |
from django.contrib.auth.hashers import make_password
from django.db import IntegrityError
from django.shortcuts import get_object_or_404
from django.contrib.auth import login, logout
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from rest_framework.response import Response
from common.pagination import XadminPageLimitPagination
from common.response import JsonResponse
from ..filters import UserFilter
from ..models import User, Role
from ..serializer import UserSerializer
from common.exceptions import CodeError
class LogoutApiView(APIView):
def get(self, request, **kwargs):
logout(request)
return JsonResponse({"code": 0, "msg": "Success !"})
class UserApiView(ModelViewSet):
queryset = User.objects.order_by("last_login").all()
serializer_class = UserSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = UserFilter
pagination_class = XadminPageLimitPagination
lookup_url_kwarg = "id"
def create(self, request, *args, **kwargs):
code = 0
msg = None
username = request.data.get("username", None)
realname = request.data.get("realname", None)
many = request.data.get("many", 'false')
password = request.data.get("password", None)
phone = request.data.get("phone", None)
email = request.data.get("email", None)
remark = request.data.get("remark", None)
print(request.data)
if many == "true":
msg_list = []
usernames = username.split("|")
realnames = realname.split("|")
if len(usernames) == len(realnames):
for i in range(len(usernames)):
user = User()
try:
user.username = usernames[i]
user.realname = realnames[i]
user.password = <PASSWORD>(password)
user.email = email
user.phone = phone
user.save()
except IntegrityError as e:
msg_list.append("用户" + realnames[i] + "创建失败 " + str(e))
else:
msg = "创建成功 ! ," + "|".join(msg_list)
return JsonResponse(code=code, msg=msg)
else:
return JsonResponse(code=1, msg="账户名与姓名不匹配!")
else:
user = User()
try:
user.username = username
user.realname = realname
user.password = <PASSWORD>)
user.email = email
user.phone = phone
user.info = remark
user.save()
except IntegrityError as e:
code = 1
msg = str(e)
return JsonResponse(code=code, msg=msg)
def update(self, request, *args, **kwargs):
code = 0
msg = "修改成功"
id = self.kwargs.get("id", None)
user = User.objects.filter(id=id).first()
username = request.data.get("username", None)
realname = request.data.get("realname", None)
phone = request.data.get("phone", None)
email = request.data.get("email", None)
remark = request.data.get("remark", None)
if user:
try:
user.username = username
user.realname = realname
user.email = email
user.phone = phone
user.info = remark
user.save()
except IntegrityError as e:
code = 1
msg = str(e)
return JsonResponse(code=code, msg=msg)
msg = "未找到!"
return JsonResponse(code=1, msg=msg)
def delete(self, request, **kwargs):
print(request.data)
if request.data:
id_list = request.data.get("ids", [])
for id in id_list:
user = User.objects.filter(id=id).only("username").first()
if user and user.username != request.user.username:
# 删除的用户不能是当前正在使用的用户
user.delete()
return Response({"message": "Success"})
class UserChaneRole(APIView):
def get(self, request, **kwargs):
id = self.kwargs.get("key")
user = get_object_or_404(User, id=id)
roles = []
for role in Role.objects.all():
d = {
"value": role.id,
"title": role.title
}
roles.append(d)
selected = [role.id for role in user.roles.all()]
msg = {"roles": roles, "selected": selected}
return Response(msg)
def put(self, request, **kwargs):
id = self.kwargs.get("key")
user = get_object_or_404(User, id=id)
selected = request.data.get("selected")
try:
user.roles.set(selected)
except Exception as e:
raise CodeError("修改失败:" + str(e))
return Response({"message": "success!"})
|
class WTAN(Bayes_net_PU):
name = "WTAN"
def __init__(self,alpha = 1,starting_node = 0):
self.alpha = alpha
self.starting_node = starting_node
def Findparent(self, M):
M = M.copy() # to avoid change global M
np.fill_diagonal(M,0)
p = int(M.shape[0])
V = range(p) # set of all nodes
st = self.starting_node
Vnew = [st] # vertex that already found their parent. intitiate it with starting node. TAN randomly choose one
parent = {st:None} # use a dict to show nodes' interdepedency
while set(Vnew) != set(V): # when their are still nodes whose parents are unknown.
index_i = [] # after for loop, has same length as Vnew, shows the closest node that not in Vnew with Vnew.
max_inf = [] # corresponding distance
for i in range(len(Vnew)): # can be paralelled
vnew = Vnew[i]
ListToSorted = [e for e in M[:,vnew]] # does not need int(e)
index = sorted(range(len(ListToSorted)),key = lambda k: ListToSorted[k],reverse = True)
index_i.append([ele for ele in index if ele not in Vnew][0])
max_inf.append(M[index_i[-1],vnew])
index1 = sorted(range(len(max_inf)),key = lambda k: max_inf[k],reverse = True)[0] ## relative position, Vnew[v1,v2] index_i[v4,v5] max_inf[s1,s2] index1 is the position in those 3 list
Vnew.append(index_i[index1]) # add in that node
parent[index_i[index1]] = Vnew[index1] # add direction, it has to be that the new added node is child, otherwise some nodes has 2 parents which is wrong.
return parent
def fit(self,X_L, X_u, pri, M, case_control = True, model_class = LogisticRegression, **kwargs):
""" Implementation of a fitting function.
Get fitted model that predict p(s=1|x), not related to sampling scenario
Parameters
----------
X_l : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input positive labeled samples.
X_u : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input unlabeled samples.
pri : scalar
The prevalence p(y=1)
M : np.matrix, shpae (n_features, n_features)
contact matrix
case_control : Bool
Case control scenario or single-training data scenario
model_class : a sklearn estimator, preferred logistic regression
since it gives calibrated proba, predict p(s=1|x)
**kwargs :
extra parameters for model_class
Returns self
-------
self
"""
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
# parent
parent = self.Findparent(M)
# fit model g(x) = p(s=1|x)
X = np.concatenate((X_L,X_u), axis = 0)
enc = preprocessing.OneHotEncoder(drop='first').fit(X)
X = enc.transform(X).toarray()
# X = pd.DataFrame(X).astype('category') # convert to categorical, for logistic regression to work
y = np.concatenate( (np.repeat('1',X_L.shape[0] ), np.repeat('0',X_u.shape[0]) ),axis = 0)
#
model = model_class(**kwargs)
model.fit(X,y)
# estimate p(s=1)
p_s_1 = X_L.shape[0]/(X_L.shape[0]+X_u.shape[0])
# estimate c
if case_control:
c = p_s_1/(pri*(1-p_s_1) + p_s_1)
else:
c = p_s_1/pri
# estimate w(x)
inx = list(model.classes_ ).index('1')
g_U = model.predict_proba( X[n_L:] )[:,inx] # let us assume it is already calibrated ,it that already calibrated?
w_U = ((1-c)/c) * (g_U/(1-g_U)) # maybe need to normalize
w_U = w_U - min(w_U) # make non-negative
w_U = w_U / max(w_U) # 0-1
# learning the coef_, p(xij|1,xpal), p(xij|0,xpal)
# extreme case: w_U correctly weight positive 1 and negative 0 in U, originally p(xij|1) = N_L(xij)/N_L,
# List_count_1 = {}
List_prob_1 = {} #
#
List_prob_0 = {} # P(xi = j|c=0)
# for root node
root_i = self.starting_node
x_i_L_counter = Counter(X_L[:,root_i])
x_i_values = list(set(X_L[:,root_i]).union(set(X_u[:,root_i])))
X_i_U_1_counter = {val: w_U[X_u[:,root_i] == val].sum() for val in x_i_values}
X_i_U_0_counter = {val: (1-w_U)[X_u[:,root_i] == val].sum() for val in x_i_values}
# part 1, p(xi = j|1) = (N_L(xij) + sum_U_xij(w_U))/( n_L + sum(w_U))
List_prob_1[root_i] = {key: (self.alpha + x_i_L_counter[key] + X_i_U_1_counter[key]) / (n_L + w_U.sum() + self.alpha*len(x_i_values) ) for key in x_i_values}
# part 2, p(xi = j|1)
List_prob_0[root_i] = {key: ( self.alpha + X_i_U_0_counter[key])/ ((1-w_U).sum() + self.alpha*len(x_i_values) ) for key in x_i_values}
# for other nodes
for i in [e for e in range(0,p) if e != root_i]:
x_i_values = list(set(X_L[:,i]).union(X_u[:,i]))
x_i_parent_Value = list(set(X_L[:,parent[i]]).union(X_u[:,parent[i] ] ) )
# part 1, p(xij|1,xkl)
List_prob_1[i] = {v2: {v1: (self.alpha + X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] + w_U[(X_u[:,i] == v1) & (X_u[:,parent[i]] == v2)].sum() ) /
( X_L[(X_L[:,parent[i]] == v2)].shape[0] + w_U[(X_u[:,parent[i]] == v2)].sum()+ self.alpha*len(x_i_values) )
for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 2 , p(xij|0,xkl)
List_prob_0[i] = {v2: {v1: (self.alpha + (1-w_U)[(X_u[:,i] == v1) & (X_u[:,parent[i]] == v2)].sum() ) /
( (1-w_U)[(X_u[:,parent[i]] == v2)].sum() + self.alpha*len(x_i_values) )
for v1 in x_i_values} for v2 in x_i_parent_Value}
self.case_control_ = case_control
self.is_fitted_ = True
self.parent_ = parent
self.case_control_ = case_control
self.List_prob_1_, self.List_prob_0_, self.c_, self.n_features_, self.w_U_, self.prevalence_ = List_prob_1, List_prob_0, c, p, w_U, pri
return self
def predict_proba(self,X):
"""
Return probability estimates for the test vector X. Usually it would be X_unlabeled
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
P(y=1|x) : array-like of shape (n_samples, )
Returns the probability of the samples for positive class in
the model.
"""
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
root_i = self.starting_node
for ins in X:
P1 = self.prevalence_ # don't need copy, immutable
P0 = 1 - P1
# root_i
P1 = P1 * (self.List_prob_1_[root_i][ins[root_i]])
P0 = P0 * (self.List_prob_0_[root_i][ins[root_i]])
for i in [e for e in range(0,self.n_features_) if e != root_i]:
pValue = ins[self.parent_[i]]
P1 = P1 * (self.List_prob_1_[i][pValue][ins[i]])
P0 = P0 * (self.List_prob_0_[i][pValue][ins[i]])
# normalize proba
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
#
Prob_1 = np.array(Prob_1) # for shap
return Prob_1
|
#
#----------------------------------------------------------------------
# Copyright 2007-2011 Mentor Graphics Corporation
# Copyright 2007-2010 Cadence Design Systems, Inc.
# Copyright 2010-2013 Synopsys, Inc.
# Copyright 2013 NVIDIA Corporation
# Copyright 2013 Cisco Systems, Inc.
# Copyright 2019 <NAME>
#
# All Rights Reserved Worldwide
#
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See
# the License for the specific language governing
# permissions and limitations under the License.
#
# python-uvm NOTE: All code ported from SystemVerilog UVM 1.2 to
# python. Original code structures (including comments)
# preserved where possible.
#----------------------------------------------------------------------
"""
Section: Phasing Definition classes
The following class are used to specify a phase and its implied functionality.
"""
from typing import Dict, List
import cocotb
from cocotb.triggers import Event, Combine
from ..macros.uvm_object_defines import uvm_object_utils
from ..macros.uvm_message_defines import uvm_fatal, uvm_info
from ..macros.uvm_callback_defines import uvm_do_callbacks
from .uvm_callback import UVMCallback
from .uvm_cmdline_processor import UVMCmdlineProcessor
from .uvm_debug import uvm_debug
from .uvm_globals import (get_cs, uvm_report_error, uvm_report_info,
uvm_wait_for_nba_region, uvm_zero_delay)
from .uvm_mailbox import UVMMailbox
from .uvm_object import UVMObject
from .uvm_object_globals import (UVM_ALL_DROPPED, UVM_DEBUG, UVM_EQ, UVM_GT, UVM_GTE, UVM_HIGH,
UVM_LOW, UVM_LT, UVM_LTE, UVM_MEDIUM, UVM_NE, UVM_PHASE2STR,
UVM_PHASE_CLEANUP, UVM_PHASE_DOMAIN, UVM_PHASE_DONE,
UVM_PHASE_DORMANT, UVM_PHASE_ENDED, UVM_PHASE_EXECUTING,
UVM_PHASE_IMP, UVM_PHASE_JUMPING, UVM_PHASE_NODE,
UVM_PHASE_READY_TO_END, UVM_PHASE_SCHEDULE, UVM_PHASE_SCHEDULED,
UVM_PHASE_STARTED, UVM_PHASE_SYNCING, UVM_PHASE_TERMINAL,
UVM_PHASE_UNINITIALIZED)
from .uvm_objection import UVMObjection
from .sv import sv
def UVM_PH_TRACE(ID,MSG,PH,VERB):
uvm_info(ID, (sv.sformatf("Phase '%0s' (id=%0d) ", PH.get_full_name(),
PH.get_inst_id()) + MSG), UVM_LOW)
def ph2str(state) -> str:
if state is not None:
return UVM_PHASE2STR[state]
return "<State: NONE>"
class UVMPhaseStateChange(UVMObject):
"""
Class: UVMPhaseStateChange
Phase state transition descriptor.
Used to describe the phase transition that caused a
`UVMPhaseCb.phase_state_changed()` callback to be invoked.
"""
def __init__(self, name="uvm_phase_state_change"):
UVMObject.__init__(self, name)
# Implementation -- do not use directly
self.m_phase = None
self.m_prev_state = None
self.m_jump_to = None
def get_state(self):
"""
Returns the state the phase just transitioned to.
Functionally equivalent to <uvm_phase::get_state()>.
Returns:
"""
return self.m_phase.get_state()
def get_prev_state(self):
"""
Returns the state the phase just transitioned from.
Returns:
"""
return self.m_prev_state
def jump_to(self):
"""
If the current state is `UVM_PHASE_ENDED` or `UVM_PHASE_JUMPING` because of
a phase jump, returns the phase that is the target of jump.
Returns `None` otherwise.
Returns:
"""
return self.m_jump_to
uvm_object_utils(UVMPhaseStateChange)
class UVMPhase(UVMObject):
"""
Class: UVMPhase
This base class defines everything about a phase: behavior, state, and context.
To define behavior, it is extended by UVM or the user to create singleton
objects which capture the definition of what the phase does and how it does it.
These are then cloned to produce multiple nodes which are hooked up in a graph
structure to provide context: which phases follow which, and to hold the state
of the phase throughout its lifetime.
UVM provides default extensions of this class for the standard runtime phases.
VIP Providers can likewise extend this class to define the phase functor for a
particular component context as required.
This base class defines everything about a phase: behavior, state, and context.
To define behavior, it is extended by UVM or the user to create singleton
objects which capture the definition of what the phase does and how it does it.
These are then cloned to produce multiple nodes which are hooked up in a graph
structure to provide context: which phases follow which, and to hold the state
of the phase throughout its lifetime.
UVM provides default extensions of this class for the standard runtime phases.
VIP Providers can likewise extend this class to define the phase functor for a
particular component context as required.
*Phase Definition*
Singleton instances of those extensions are provided as package variables.
These instances define the attributes of the phase (not what state it is in)
They are then cloned into schedule nodes which point back to one of these
implementations, and calls its virtual task or function methods on each
participating component.
It is the base class for phase functors, for both predefined and
user-defined phases. Per-component overrides can use a customized imp.
To create custom phases, do not extend uvm_phase directly: see the
three predefined extended classes below which encapsulate behavior for
different phase types: task, bottom-up function and top-down function.
Extend the appropriate one of these to create a uvm_YOURNAME_phase class
(or YOURPREFIX_NAME_phase class) for each phase, containing the default
implementation of the new phase, which must be a uvm_component-compatible
delegate, and which may be a ~null~ implementation. Instantiate a singleton
instance of that class for your code to use when a phase handle is required.
If your custom phase depends on methods that are not in uvm_component, but
are within an extended class, then extend the base YOURPREFIX_NAME_phase
class with parameterized component class context as required, to create a
specialized functor which calls your extended component class methods.
This scheme ensures compile-safety for your extended component classes while
providing homogeneous base types for APIs and underlying data structures.
*Phase Context*
A schedule is a coherent group of one or mode phase/state nodes linked
together by a graph structure, allowing arbitrary linear/parallel
relationships to be specified, and executed by stepping through them in
the graph order.
Each schedule node points to a phase and holds the execution state of that
phase, and has optional links to other nodes for synchronization.
The main operations are: construct, add phases, and instantiate
hierarchically within another schedule.
Structure is a DAG (Directed Acyclic Graph). Each instance is a node
connected to others to form the graph. Hierarchy is overlaid with m_parent.
Each node in the graph has zero or more successors, and zero or more
predecessors. No nodes are completely isolated from others. Exactly
one node has zero predecessors. This is the root node. Also the graph
is acyclic, meaning for all nodes in the graph, by following the forward
arrows you will never end up back where you started but you will eventually
reach a node that has no successors.
*Phase State*
A given phase may appear multiple times in the complete phase graph, due
to the multiple independent domain feature, and the ability for different
VIP to customize their own phase schedules perhaps reusing existing phases.
Each node instance in the graph maintains its own state of execution.
*Phase Handle*
Handles of this type uvm_phase are used frequently in the API, both by
the user, to access phasing-specific API, and also as a parameter to some
APIs. In many cases, the singleton phase handles can be
used (eg. <uvm_run_phase::get()>) in APIs. For those APIs that need to look
up that phase in the graph, this is done automatically.
"""
m_phase_trace = False
m_use_ovm_run_semantic = False
m_phase_hopper = UVMMailbox()
m_executing_phases: Dict['UVMPhase', bool] = {} # UVMPhase -> bool
#--------------------
# Group: Construction
#--------------------
# Function: new
#
# Create a new phase node, with a name and a note of its type
# name - name of this phase
# type - a value in <uvm_phase_type>
#
def __init__(self, name="uvm_phase", phase_type=UVM_PHASE_SCHEDULE, parent=None):
UVMObject.__init__(self, name)
self.m_phase_type = phase_type
self.m_state = UVM_PHASE_UNINITIALIZED
self.m_ready_to_end_count = 0
self.max_ready_to_end_iter = 20
self.m_successors: Dict['UVMPhase', bool] = {} # UVMPhase -> bit
self.m_predecessors: Dict['UVMPhase', bool] = {} # UVMPhase -> bit
self.m_end_node = None
self.m_sync: List['UVMPhase'] = [] # UVMPhase
self.m_imp = None # UVMPhase to call when we execute this node
self.m_phase_done_event = Event(name + '_phase_done_event')
self.m_phase_synced_event = Event(name + '_phase_synced')
self.m_phase_set_state_event = Event(name + '_set_state_event')
self.phase_done = None # uvm_objection
self.m_phase_proc = None # TODO process
self.m_num_procs_not_yet_returned = 0
self.m_is_task_phase = False
# Implementation - Jumping
self.m_jump_bkwd = False
self.m_jump_fwd = False
self.m_jump_phase = None
self.m_premature_end = False
# The common domain is the only thing that initializes self.m_state. All
# other states are initialized by being 'added' to a schedule.
if (name == "common") and (phase_type == UVM_PHASE_DOMAIN):
self.set_state(UVM_PHASE_DORMANT)
self.m_run_count = 0
self.m_parent = parent
clp = UVMCmdlineProcessor.get_inst()
val = []
if clp.get_arg_value("+UVM_PHASE_TRACE", val):
UVMPhase.m_phase_trace = 1
else:
UVMPhase.m_phase_trace = 0
val = []
if clp.get_arg_value("+UVM_USE_OVM_RUN_SEMANTIC", val):
UVMPhase.m_use_ovm_run_semantic = 1
else:
UVMPhase.m_use_ovm_run_semantic = 0
if parent is None and (phase_type == UVM_PHASE_SCHEDULE or
phase_type == UVM_PHASE_DOMAIN):
#self.m_parent = self
self.m_end_node = UVMPhase(name + ' end', UVM_PHASE_TERMINAL, self)
self.m_successors[self.m_end_node] = True
self.m_end_node.m_predecessors[self] = True
# tpoikela: Used instead of $cast() to check phase type
def is_task_phase(self):
"""
Returns True if the given phase is task (async) phase.
"""
return self.m_is_task_phase
# Function: get_phase_type
# Returns the phase type as defined by <uvm_phase_type>
def get_phase_type(self):
return self.m_phase_type
def get_phase_done_event(self):
return self.m_phase_done_event
def get_phase_synced_event(self):
return self.m_phase_synced_event
def set_state(self, state):
if state is None:
raise Exception('Proper state not given. Must be ' + str(UVM_PHASE2STR))
uvm_debug(self, 'set_state', (self.get_name() + ': ' +
ph2str(self.m_state) + ' => ' + ph2str(state)))
self.m_state = state
self.m_phase_set_state_event.set()
if self.m_state == UVM_PHASE_DONE:
self.m_phase_done_event.set()
if self.m_state >= UVM_PHASE_SYNCING:
self.m_phase_synced_event.set()
# //-------------
# // Group: State
# //-------------
#
# Function: get_state
# Accessor to return current state of this phase
def get_state(self):
return self.m_state
# // Function: get_run_count
# //
# // Accessor to return the integer number of times this phase has executed
# //
def get_run_count(self):
return self.m_run_count
# // Function: find_by_name
# //
# // Locate a phase node with the specified ~name~ and return its handle.
# // With ~stay_in_scope~ set, searches only within this phase's schedule or
# // domain.
# //
# extern function uvm_phase find_by_name(string name, bit stay_in_scope=1)
def find_by_name(self, name: str, stay_in_scope=1):
# TODO: full search
if self.get_name() == name:
return self
find_by_name = self.m_find_predecessor_by_name(name,stay_in_scope,self)
if find_by_name is None:
find_by_name = self.m_find_successor_by_name(name,stay_in_scope,self)
return find_by_name
# Function: find
#
# Locate the phase node with the specified ~phase~ IMP and return its handle.
# With ~stay_in_scope~ set, searches only within this phase's schedule or
# domain.
#
def find(self, phase, stay_in_scope=True):
uvm_debug(self, "find()", "called with self as {}, phase {}".format(self, phase))
if phase is None:
raise Exception('UVMPhase.find(): Phase is None')
# TBD full search
# in_scope = stay_in_scope ? " staying within scope" : ""
# print(("\nFIND node '" + phase.get_name() +"' within " + get_name()
#+ " (scope " + self.m_phase_type.name() +")" + in_scope))
found = None
if phase == self.m_imp or phase == self:
return phase
found = self.m_find_predecessor(phase, stay_in_scope, self)
if found is None:
found = self.m_find_successor(phase, stay_in_scope, self)
return found
# Function: is
#
# returns 1 if the containing uvm_phase refers to the same phase
# as the phase argument, 0 otherwise
#
# extern function bit is(uvm_phase phase)
def _is(self, phase):
return (self.m_imp == phase or self == phase)
# Function: is_before
#
# Returns 1 if the containing uvm_phase refers to a phase that is earlier
# than the phase argument, 0 otherwise
#
def is_before(self, phase):
# $display("this=%s is before phase=%s?",get_name(),phase.get_name())
# TODO: add support for 'stay_in_scope=1' functionality
return not self._is(phase) and self.m_find_successor(phase,0,self) is not None
# Function: is_after
#
# returns 1 if the containing uvm_phase refers to a phase that is later
# than the phase argument, 0 otherwise
#
def is_after(self, phase):
# //$display("this=%s is after phase=%s?",get_name(),phase.get_name())
# // TODO: add support for 'stay_in_scope=1' functionality
return not self._is(phase) and self.m_find_predecessor(phase,0,self) is not None
#-----------------
# Group: Callbacks
#-----------------
#
# Function: exec_func
#
# Implements the functor/delegate functionality for a function phase type
# comp - the component to execute the functionality upon
# phase - the phase schedule that originated this phase call
#
def exec_func(self, comp, phase):
raise Exception('virtual function')
# Function: exec_task
#
# Implements the functor/delegate functionality for a task phase type
# comp - the component to execute the functionality upon
# phase - the phase schedule that originated this phase call
#
# virtual task exec_task(uvm_component comp, uvm_phase phase); endtask
#----------------
# Group: Schedule
#----------------
#
# Function: add
#
# Build up a schedule structure inserting phase by phase, specifying linkage
#
# Phases can be added anywhere, in series or parallel with existing nodes
#
# phase - handle of singleton derived imp containing actual functor.
# by default the new phase is appended to the schedule
# with_phase - specify to add the new phase in parallel with this one
# after_phase - specify to add the new phase as successor to this one
# before_phase - specify to add the new phase as predecessor to this one
#
def add(self, phase, with_phase=None, after_phase=None, before_phase=None):
new_node = None
begin_node = None
end_node = None
tmp_node = None
state_chg = None
if phase is None:
uvm_fatal("PH/NULL", "add: phase argument is null")
if with_phase is not None and with_phase.get_phase_type() == UVM_PHASE_IMP:
nm = with_phase.get_name()
with_phase = self.find(with_phase)
if with_phase is None:
uvm_fatal("PH_BAD_ADD", ("cannot find with_phase '" + nm
+ "' within node '" + self.get_name() + "'"))
if before_phase is not None and before_phase.get_phase_type() == UVM_PHASE_IMP:
nm = before_phase.get_name()
before_phase = self.find(before_phase)
if before_phase is None:
uvm_fatal("PH_BAD_ADD", ("cannot find before_phase '" + nm
+ "' within node '" + self.get_name() + "'"))
if after_phase is not None and after_phase.get_phase_type() == UVM_PHASE_IMP:
nm = after_phase.get_name()
after_phase = self.find(after_phase)
if after_phase is None:
uvm_fatal("PH_BAD_ADD",("cannot find after_phase '" + nm
+ "' within node '" + self.get_name() + "'"))
if with_phase is not None and (after_phase is not None or before_phase
is not None):
uvm_fatal("PH_BAD_ADD",
"cannot specify both 'with' and 'before/after' phase relationships")
if before_phase == self or after_phase == self.m_end_node or with_phase == self.m_end_node:
uvm_fatal("PH_BAD_ADD",
"cannot add before begin node, after end node, or with end nodes")
# If we are inserting a new "leaf node"
if phase.get_phase_type() == UVM_PHASE_IMP:
uvm_debug(self, 'add', 'ph_type == UVM_PHASE_IMP ph_name: ' +
phase.get_name())
new_node = UVMPhase(phase.get_name(),UVM_PHASE_NODE,self)
new_node.m_imp = phase
begin_node = new_node
end_node = new_node
# The phase_done objection is only required
# for task-based nodes
#if ($cast(tp, phase)) begin
if phase.is_task_phase():
#if new_node.get_name() == "run":
# DEPRECATED
# new_node.phase_done = uvm_test_done_objection.get()
#else: # Other task based phase
uvm_debug(self, 'add', ("Adding objection to phase " +
phase.get_name()))
new_node.phase_done = UVMObjection(phase.get_name() + "_objection")
else:
uvm_debug(self, 'add', (phase.get_name() +
" is not task-based phase, so no objections"))
else: # We are inserting an existing schedule
uvm_debug(self, "add", "We are inserting an existing schedule")
begin_node = phase
end_node = phase.m_end_node
phase.m_parent = self
# If no before/after/with specified, insert at end of this schedule
if (with_phase is None) and (after_phase is None) and (before_phase is
None):
uvm_debug(self, 'add', 'All phases null, setting before phase')
before_phase = self.m_end_node
if UVMPhase.m_phase_trace:
typ = phase.get_phase_type()
uvm_report_info("PH/TRC/ADD_PH", (self.get_name() + " (" + self.m_phase_type.name()
+ ") ADD_PHASE: phase=" + phase.get_full_name() + " ("
+ typ.name() + ", inst_id=" + "{}".format(phase.get_inst_id())
+ ")"), UVM_DEBUG)
#" with_phase=", (with_phase == null) ? "null" : with_phase.get_name(),
#" after_phase=", (after_phase == null) ? "null" : after_phase.get_name(),
#" before_phase=", (before_phase == null) ? "null" : before_phase.get_name(),
#" new_node=", (new_node == null) ? "null" : {new_node.get_name(),
# " inst_id=",
# $sformatf("%0d",new_node.get_inst_id())},
#" begin_node=", (begin_node == null) ? "null" : begin_node.get_name(),
#" end_node=", (end_node == null) ? "null" : end_node.get_name()},UVM_DEBUG)
# INSERT IN PARALLEL WITH 'WITH' PHASE
if with_phase is not None:
begin_node.m_predecessors = with_phase.m_predecessors
end_node.m_successors = with_phase.m_successors
for pred in with_phase.m_predecessors:
pred.m_successors[begin_node] = 1
for succ in with_phase.m_successors:
succ.m_predecessors[end_node] = 1
# INSERT BEFORE PHASE
elif before_phase is not None and after_phase is None:
begin_node.m_predecessors = before_phase.m_predecessors
end_node.m_successors[before_phase] = 1
for pred in before_phase.m_predecessors:
del pred.m_successors[before_phase]
pred.m_successors[begin_node] = 1
before_phase.m_predecessors = {}
before_phase.m_predecessors[end_node] = 1
# INSERT AFTER PHASE
elif before_phase is None and after_phase is not None:
end_node.m_successors = after_phase.m_successors
begin_node.m_predecessors[after_phase] = 1
for succ in after_phase.m_successors:
del succ.m_predecessors[after_phase]
succ.m_predecessors[end_node] = 1
after_phase.m_successors = {}
after_phase.m_successors[begin_node] = 1
# IN BETWEEN 'BEFORE' and 'AFTER' PHASES
elif before_phase is not None and after_phase is not None:
if not after_phase.is_before(before_phase):
uvm_fatal("PH_ADD_PHASE", ("Phase '" + before_phase.get_name()
+ "' is not before phase '" + after_phase.get_name() + "'"))
# before and after? add 1 pred and 1 succ
begin_node.m_predecessors[after_phase] = 1
end_node.m_successors[before_phase] = 1
after_phase.m_successors[begin_node] = 1
before_phase.m_predecessors[end_node] = 1
if before_phase in after_phase.m_successors:
del after_phase.m_successors[before_phase]
del before_phase.m_successors[after_phase]
# Transition nodes to DORMANT state
if new_node is None:
tmp_node = phase
else:
tmp_node = new_node
uvm_debug(self, "add", "GOT here. tmp_node is: " + tmp_node.convert2string())
state_chg = UVMPhaseStateChange.type_id.create(tmp_node.get_name())
state_chg.m_phase = tmp_node
state_chg.m_jump_to = None
state_chg.m_prev_state = tmp_node.m_state
tmp_node.m_state = UVM_PHASE_DORMANT
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', tmp_node, state_chg)
# // Function: get_parent
# //
# // Returns the parent schedule node, if any, for hierarchical graph traversal
# //
def get_parent(self):
return self.m_parent
# Function: get_full_name
# Returns the full path from the enclosing domain down to this node.
# The singleton IMP phases have no hierarchy.
#
def get_full_name(self):
if self.m_phase_type == UVM_PHASE_IMP:
return self.get_name()
get_full_name = self.get_domain_name()
sch = self.get_schedule_name()
if sch != "":
get_full_name = get_full_name + "." + sch
if self.m_phase_type != UVM_PHASE_DOMAIN and self.m_phase_type != UVM_PHASE_SCHEDULE:
get_full_name = get_full_name + "." + self.get_name()
return get_full_name
# Function: get_schedule
#
# Returns the topmost parent schedule node, if any, for hierarchical graph traversal
#
def get_schedule(self, hier=False):
sched = self
if hier is True:
while (sched.m_parent is not None and (sched.m_parent.get_phase_type()
== UVM_PHASE_SCHEDULE)):
sched = sched.m_parent
if sched.m_phase_type == UVM_PHASE_SCHEDULE:
return sched
if sched.m_phase_type == UVM_PHASE_NODE:
if (self.m_parent is not None and self.m_parent.m_phase_type !=
UVM_PHASE_DOMAIN):
return self.m_parent
return None
# // Function: get_schedule_name
# //
# // Returns the schedule name associated with this phase node
# //
#
def get_schedule_name(self, hier=False):
#uvm_phase sched
s = ""
sched = self.get_schedule(hier)
if sched is None:
return ""
s = sched.get_name()
while (sched.m_parent is not None and sched.m_parent != sched and
(sched.m_parent.get_phase_type() == UVM_PHASE_SCHEDULE)):
sched = sched.m_parent
sep = ""
if len(s) > 0:
sep = "."
s = sched.get_name() + sep + s
return s
# Function: get_domain
# Returns the enclosing domain
#
def get_domain(self):
phase = self
while (phase is not None) and (phase.m_phase_type != UVM_PHASE_DOMAIN):
phase = phase.m_parent
if phase is None:
return None
return phase
# if(!$cast(get_domain,phase))
# `uvm_fatal("PH/INTERNAL", "get_domain: self.m_phase_type is DOMAIN but $cast to uvm_domain fails")
#
# // Function: get_imp
# //
# // Returns the phase implementation for this this node.
# // Returns ~null~ if this phase type is not a UVM_PHASE_LEAF_NODE.
# //
# extern function uvm_phase get_imp()
#
# // Function: get_domain_name
# //
# // Returns the domain name associated with this phase node
# //
# extern function string get_domain_name()
def get_domain_name(self):
domain = self.get_domain()
if domain is None:
return "unknown"
return domain.get_name()
# // Function: get_adjacent_predecessor_nodes
# //
# // Provides an array of nodes which are predecessors to
# // ~this~ phase node. A 'predecessor node' is defined
# // as any phase node which lies prior to ~this~ node in
# // the phase graph, with no nodes between ~this~ node and
# // the predecessor node.
# //
# extern function void get_adjacent_predecessor_nodes(ref uvm_phase pred[])
#
# // Function: get_adjacent_successor_nodes
# //
# // Provides an array of nodes which are successors to
# // ~this~ phase node. A 'successor's node' is defined
# // as any phase node which comes after ~this~ node in
# // the phase graph, with no nodes between ~this~ node
# // and the successor node.
# //
#function void uvm_phase::get_adjacent_successor_nodes(ref uvm_phase succ[])
def get_adjacent_successor_nodes(self):
done = False
successors = {} #bit successors[uvm_phase]
idx = 0
# Get all successors (including TERMINALS, SCHEDULES, etc.)
for s in self.m_successors:
successors[s] = 1
# Replace any terminal / schedule nodes with their successors, recursively.
#do begin
# done = 1
# foreach (successors[s]) begin
# if (s.get_phase_type() != UVM_PHASE_NODE) begin
# successors.delete(s)
# foreach (s.m_successors[next_s])
# successors[next_s] = 1
# done = 0
# end
# end
#end while (!done)
done = False
while (True):
done = True
keys = list(successors.keys())
for s in keys:
if s.get_phase_type() != UVM_PHASE_NODE:
del successors[s]
for next_s in s.m_successors:
successors[next_s] = 1
done = False
if done:
break
succ = []
for s in successors:
succ.append(s)
return succ
#endfunction : get_adjacent_successor_nodes
# //-----------------------
# // Group: Phase Done Objection
# //-----------------------
# //
# // Task-based phase nodes within the phasing graph provide a <uvm_objection>
# // based interface for prolonging the execution of the phase. All other
# // phase types do not contain an objection, and will report a fatal error
# // if the user attempts to ~raise~, ~drop~, or ~get_objection_count~.
#
# // Function- m_report_null_objection
# // Simplifies the reporting of ~null~ objection errors
def m_report_null_objection(self, obj, description, count, action):
m_action = ""
m_addon = ""
m_obj_name = "uvm_top"
if obj is not None:
m_obj_name = obj.get_full_name()
if action == "raise" or action == "drop":
if count != 1:
m_action = "{} {} objections".format(action, count)
else:
m_action = "{} an objection".format(action)
elif action == "get_objection_count":
m_action = "call get_objection_count"
if self.get_phase_type() == UVM_PHASE_IMP:
m_addon = (" (This is a UVM_PHASE_IMP, you have to query the "
+ "schedule to find the UVM_PHASE_NODE)")
uvm_report_error("UVM/PH/NULL_OBJECTION", (
"'{}' attempted to {} on '{}', however '{}' is not a task-based phase node! {}".format(
m_obj_name, m_action, self.get_name(), self.get_name(), m_addon)))
# // Function: get_objection
# //
# // Return the <uvm_objection> that gates the termination of the phase.
# //
def get_objection(self):
return self.phase_done
# // Function: raise_objection
# //
# // Raise an objection to ending this phase
# // Provides components with greater control over the phase flow for
# // processes which are not implicit objectors to the phase.
# //
# //| while(1) begin
# //| some_phase.raise_objection(this)
# //| ...
# //| some_phase.drop_objection(this)
# //| end
# //| ...
# //
def raise_objection(self, obj, description="", count=1):
if self.phase_done is not None:
if obj is not None:
uvm_debug(self, 'raise_objection', 'obj: {}'.format(obj.get_name()))
self.phase_done.raise_objection(obj, description, count)
else:
self.m_report_null_objection(obj, description, count, "raise")
#
# // Function: drop_objection
# //
# // Drop an objection to ending this phase
# //
# // The drop is expected to be matched with an earlier raise.
# //
def drop_objection(self, obj, description="", count=1):
if self.get_name() == 'reset':
print("EEE object dropping reset obj now " + obj.get_name())
if self.phase_done is not None:
if self.get_name() == 'reset':
print("Dropping reset objection\n" +
self.phase_done.convert2string())
self.phase_done.drop_objection(obj,description,count)
else:
self.m_report_null_objection(obj, description, count, "drop")
#
# // Function: get_objection_count
# //
# // Returns the current number of objections to ending this phase raised by the given ~object~.
# //
# extern virtual function int get_objection_count( uvm_object obj=null )
#
# //-----------------------
# // Group: Synchronization
# //-----------------------
# // The functions 'sync' and 'unsync' add soft sync relationships between nodes
# //
# // Summary of usage:
# //| my_phase.sync(.target(domain)
# //| [,.phase(phase)[,.with_phase(phase)]])
# //| my_phase.unsync(.target(domain)
# //| [,.phase(phase)[,.with_phase(phase)]])
# //
# // Components in different schedule domains can be phased independently or in sync
# // with each other. An API is provided to specify synchronization rules between any
# // two domains. Synchronization can be done at any of three levels:
# //
# // - the domain's whole phase schedule can be synchronized
# // - a phase can be specified, to sync that phase with a matching counterpart
# // - or a more detailed arbitrary synchronization between any two phases
# //
# // Each kind of synchronization causes the same underlying data structures to
# // be managed. Like other APIs, we use the parameter dot-notation to set
# // optional parameters.
# //
# // When a domain is synced with another domain, all of the matching phases in
# // the two domains get a 'with' relationship between them. Likewise, if a domain
# // is unsynched, all of the matching phases that have a 'with' relationship have
# // the dependency removed. It is possible to sync two domains and then just
# // remove a single phase from the dependency relationship by unsyncing just
# // the one phase.
#
#
# // Function: sync
# //
# // Synchronize two domains, fully or partially
# //
# // target - handle of target domain to synchronize this one to
# // phase - optional single phase in this domain to synchronize,
# // otherwise sync all
# // with_phase - optional different target-domain phase to synchronize with,
# // otherwise use ~phase~ in the target domain
# //
# extern function void sync(uvm_domain target,
# uvm_phase phase=null,
# uvm_phase with_phase=null)
#
# // Function: unsync
# //
# // Remove synchronization between two domains, fully or partially
# //
# // target - handle of target domain to remove synchronization from
# // phase - optional single phase in this domain to un-synchronize,
# // otherwise unsync all
# // with_phase - optional different target-domain phase to un-synchronize with,
# // otherwise use ~phase~ in the target domain
# //
# extern function void unsync(uvm_domain target,
# uvm_phase phase=null,
# uvm_phase with_phase=null)
#
#
# // Function: wait_for_state
# //
# // Wait until this phase compares with the given ~state~ and ~op~ operand.
# // For <UVM_EQ> and <UVM_NE> operands, several <!-- <uvm_phase_states> --> can be
# // supplied by ORing their enum constants, in which case the caller will
# // wait until the phase state is any of (UVM_EQ) or none of (UVM_NE) the
# // provided states.
# //
# // To wait for the phase to be at the started state or after
# //
# //| wait_for_state(UVM_PHASE_STARTED, UVM_GTE)
# //
# // To wait for the phase to be either started or executing
# //
# //| wait_for_state(UVM_PHASE_STARTED | UVM_PHASE_EXECUTING, UVM_EQ)
# //
# extern task wait_for_state(uvm_phase_state state, uvm_wait_op op=UVM_EQ)
async def wait_for_state(self, state, op=UVM_EQ):
func = None
if op == UVM_EQ:
def func():
return state & self.m_state != 0
elif op == UVM_NE:
def func():
return ((state & self.m_state) == 0)
elif op == UVM_GTE:
def func():
return self.m_state >= state
elif op == UVM_LT:
def func():
return self.m_state < state
elif op == UVM_LTE:
def func():
return self.m_state <= state
elif op == UVM_GT:
def func():
return self.m_state > state
else:
raise Exception('IMPL for wait_for_state not finished yet')
await self._wait_state_change_func(func)
async def _wait_state_change_func(self, func):
while True:
await self.m_phase_set_state_event.wait()
self.m_phase_set_state_event.clear()
if func():
break
#
#
# //---------------
# // Group: Jumping
# //---------------
#
# // Force phases to jump forward or backward in a schedule
# //
# // A phasing domain can execute a jump from its current phase to any other.
# // A jump passes phasing control in the current domain from the current phase
# // to a target phase. There are two kinds of jump scope:
# //
# // - local jump to another phase within the current schedule, back- or forwards
# // - global jump of all domains together, either to a point in the master
# // schedule outwith the current schedule, or by calling jump_all()
# //
# // A jump preserves the existing soft synchronization, so the domain that is
# // ahead of schedule relative to another synchronized domain, as a result of
# // a jump in either domain, will await the domain that is behind schedule.
# //
# // *Note*: A jump out of the local schedule causes other schedules that have
# // the jump node in their schedule to jump as well. In some cases, it is
# // desirable to jump to a local phase in the schedule but to have all
# // schedules that share that phase to jump as well. In that situation, the
# // jump_all static function should be used. This function causes all schedules
# // that share a phase to jump to that phase.
# // Function: jump
# //
# // Jump to a specified ~phase~. If the destination ~phase~ is within the current
# // phase schedule, a simple local jump takes place. If the jump-to ~phase~ is
# // outside of the current schedule then the jump affects other schedules which
# // share the phase.
# //
# extern function void jump(uvm_phase phase)
# // Function: set_jump_phase
# //
# // Specify a phase to transition to when phase is complete.
# // Note that this function is part of what jump() does; unlike jump()
# // it does not set the flag to terminate the phase prematurely.
# extern function void set_jump_phase(uvm_phase phase)
# // Function: end_prematurely
# //
# // Set a flag to cause the phase to end prematurely.
# // Note that this function is part of what jump() does; unlike jump()
# // it does not set a jump_phase to go to after the phase ends.
def end_prematurely(self):
self.m_premature_end = 1
#endfunction
# // Function- jump_all
# //
# // Make all schedules jump to a specified ~phase~, even if the jump target is local.
# // The jump happens to all phase schedules that contain the jump-to ~phase~
# // i.e. a global jump.
# //
# extern static function void jump_all(uvm_phase phase)
# // Function: get_jump_target
# //
# // Return handle to the target phase of the current jump, or ~null~ if no jump
# // is in progress. Valid for use during the phase_ended() callback
# //
# extern function uvm_phase get_jump_target()
def get_jump_target(self):
return self.m_jump_phase
#// m_find_predecessor
#// ------------------
#
def m_find_predecessor(self, phase: 'UVMPhase', stay_in_scope=True, orig_phase=None):
uvm_debug(self, 'm_find_pred', "called with phase as {}, orig_phase {}".format(
phase, orig_phase))
if phase is None:
return None
uvm_debug(self, 'm_find_pred', " Comparing now {} to {} and self {}".format(phase, self.m_imp,
self))
if phase == self.m_imp or phase == self:
uvm_debug(self, 'm_find_pred', "returning self now from")
return self
for key in self.m_predecessors.keys():
uvm_debug(self, 'm_find_pred', " key is now {}".format(key))
pred = key
if orig_phase is None:
orig = self
else:
orig = orig_phase
uvm_debug(self, 'm_find_pred', "pred is {}, orig is {}".format(pred, orig))
if (not stay_in_scope or
(pred.get_schedule() == orig.get_schedule()) or
(pred.get_domain() == orig.get_domain())):
found = pred.m_find_predecessor(phase,stay_in_scope,orig)
return found
uvm_debug(self, 'm_find_pred', "Did not find precessors for " +
str(phase))
return None
#// m_find_successor
#// ----------------
#
# @return uvm_phase
def m_find_successor(self, phase: 'UVMPhase', stay_in_scope=True, orig_phase=None):
found = None
#uvm_debug(self, 'm_find_succ', "called with phase as {}, orig_phase {}".format(
# phase, orig_phase))
if phase is None:
return None
if phase == self.m_imp or phase == self:
return self
for succ in self.m_successors.keys():
uvm_debug(self, 'm_find_succ', "succ is now {}".format(succ))
orig = None
if orig_phase is None:
orig = self
else:
orig = orig_phase
if (not stay_in_scope or
(succ.get_schedule() == orig.get_schedule()) or
(succ.get_domain() == orig.get_domain())):
found = succ.m_find_successor(phase,stay_in_scope,orig)
return found
return None
# extern function uvm_phase m_find_predecessor_by_name(string name, bit stay_in_scope=1, uvm_phase orig_phase=null)
def m_find_predecessor_by_name(self, name, stay_in_scope=1, orig_phase=None):
found = None
if self.get_name() == name:
return self
for pred in self.m_predecessors:
orig = self if (orig_phase is None) else orig_phase
if (not stay_in_scope or
(pred.get_schedule() == orig.get_schedule()) or
(pred.get_domain() == orig.get_domain())):
found = pred.m_find_predecessor_by_name(name,stay_in_scope,orig)
if found is not None:
return found
return None
# extern function uvm_phase m_find_successor_by_name(string name, bit stay_in_scope=1, uvm_phase orig_phase=null)
def m_find_successor_by_name(self, name, stay_in_scope=1, orig_phase=None):
found = None
if self.get_name() == name:
return self
for succ in self.m_successors:
orig = self if (orig_phase is None) else orig_phase
if (not stay_in_scope or
(succ.get_schedule() == orig.get_schedule()) or
(succ.get_domain() == orig.get_domain())):
found = succ.m_find_successor_by_name(name,stay_in_scope,orig)
if found is not None:
return found
return None
# extern function void m_print_successors()
#
# // Implementation - Callbacks
# //---------------------------
# // Provide the required component traversal behavior. Called by execute()
# virtual function void traverse(uvm_component comp,
# uvm_phase phase,
# uvm_phase_state state)
# endfunction
# // Provide the required per-component execution flow. Called by traverse()
def execute(self, comp, phase):
pass
#
# // Implementation - Schedule
# //--------------------------
# // Track the currently executing real task phases (used for debug)
# function uvm_phase get_begin_node(); if (m_imp != null) return this; return null; endfunction
# function uvm_phase get_end_node(); return self.m_end_node; endfunction
# // Implementation - Synchronization
# //---------------------------------
def get_ready_to_end_count(self):
return self.m_ready_to_end_count
# Internal implementation, more efficient than calling get_predessor_nodes on all
# of the successors returned by get_adjacent_successor_nodes
#function void uvm_phase::get_predecessors_for_successors(output bit pred_of_succ[uvm_phase])
def get_predecessors_for_successors(self, pred_of_succ):
done = False
successors = [] #uvm_phase[]
successors = self.get_adjacent_successor_nodes()
# get all predecessors to these successors
#for s in successors:
for s in range(len(successors)):
#for pred in successors[s].m_predecessors:
for pred in successors[s].m_predecessors:
if pred == self:
uvm_debug(self, 'get_predecessors_for_successor', self.get_name()
+ " ZZZ self found from pred_of_succ")
pred_of_succ[pred] = 1
# replace any terminal nodes with their predecessors, recursively.
# we are only interested in "real" phase nodes
done = False
while (not done):
done = True
# tpoikela: Make a copy of keys because dict can be modified in loop
items = list(pred_of_succ.keys())
for pred in items:
if pred.get_phase_type() != UVM_PHASE_NODE:
del pred_of_succ[pred]
for next_pred in pred.m_predecessors:
pred_of_succ[next_pred] = 1
done = False
# remove ourselves from the list
if self.get_name() != 'final':
del pred_of_succ[self]
async def m_wait_for_pred(self):
pred_of_succ = {} # bit [uvm_phase]
self.get_predecessors_for_successors(pred_of_succ)
await uvm_zero_delay()
# wait for predecessors to successors (real phase nodes, not terminals)
# mostly debug msgs
for sibling in pred_of_succ:
if UVMPhase.m_phase_trace:
s = sv.sformatf("Waiting for phase '%s' (%0d) to be READY_TO_END. Current state is %s",
sibling.get_name(),sibling.get_inst_id(),sibling.m_state.name())
UVM_PH_TRACE("PH/TRC/WAIT_PRED_OF_SUCC",s,self,UVM_HIGH)
await sibling.wait_for_state(UVM_PHASE_READY_TO_END, UVM_GTE)
if UVMPhase.m_phase_trace:
s = sv.sformatf("Phase '%s' (%0d) is now READY_TO_END. Releasing phase",
sibling.get_name(),sibling.get_inst_id())
UVM_PH_TRACE("PH/TRC/WAIT_PRED_OF_SUCC",s,self,UVM_HIGH)
if UVMPhase.m_phase_trace:
if len(pred_of_succ) > 0:
s = "( "
for key in pred_of_succ:
pred = key
s = s + pred.get_full_name() + " "
s = s + ")"
UVM_PH_TRACE("PH/TRC/WAIT_PRED_OF_SUCC",
"*** All pred to succ " + s + " in READY_TO_END state, so ending phase ***",self,UVM_HIGH)
else:
UVM_PH_TRACE("PH/TRC/WAIT_PRED_OF_SUCC",
"*** No pred to succ other than myself, so ending phase ***",self,UVM_HIGH)
# #0; // LET ANY WAITERS WAKE UP
await uvm_zero_delay()
# extern function void clear(uvm_phase_state state = UVM_PHASE_DORMANT)
#// for internal graph maintenance after a forward jump
def clear(self, state=UVM_PHASE_DORMANT):
self.set_state(state)
self.m_phase_proc = None
if self.phase_done is not None:
self.phase_done.clear(self)
# extern function void clear_successors(
# uvm_phase_state state = UVM_PHASE_DORMANT,
# uvm_phase end_state=null)
#// clear_successors
#// ----------------
#// for internal graph maintenance after a forward jump
#// - called only by execute_phase()
#// - depth-first traversal of the DAG, calliing clear() on each node
#// - do not clear the end phase or beyond
def clear_successors(self, state=UVM_PHASE_DORMANT, end_state=None):
if self == end_state:
return
self.clear(state)
for succ in self.m_successors:
succ.clear_successors(state, end_state)
# m_run_phases
# ------------
# This task contains the top-level process that owns all the phase
# processes. By hosting the phase processes here we avoid problems
# associated with phase processes related as parents/children
@classmethod
async def m_run_phases(cls):
cs = get_cs()
top = cs.get_root()
uvm_debug(cls, 'm_run_phases', 'Forking all phases in while-True')
# initiate by starting first phase in common domain
from .uvm_domain import UVMDomain
ph = UVMDomain.get_common_domain()
uvm_debug(cls, 'm_run_phases', 'common domain OK')
if not UVMPhase.m_phase_hopper.try_put(ph):
raise Exception('Could not add phase to phase_hopper mailbox')
while True:
qphase = []
await UVMPhase.m_phase_hopper.get(qphase) # Should block?
#fork
uvm_debug(cls, 'm_run_phases', 'Calling execute phase with |' +
str(qphase[0].get_name()) + '|')
cocotb.fork(qphase[0].execute_phase())
#join_none
await uvm_zero_delay()
#0; // let the process start running
# execute_phase
# -------------
async def execute_phase(self):
task_phase = None
state_chg = None
cs = get_cs()
top = cs.get_root() # UVMRoot
uvm_debug(self, 'execute_phase', 'Waiting predecessors to finish ' +
self.get_name())
# If we got here by jumping forward, we must wait for
# all its predecessor nodes to be marked DONE.
# (the next conditional speeds this up)
# Also, this helps us fast-forward through terminal (end) nodes
await self._wait_all_predecessors_done()
uvm_debug(self, 'execute_phase', 'All predecessors are DONE ' +
self.get_name())
# If DONE (by, say, a forward jump), return immed
if self.m_state == UVM_PHASE_DONE:
uvm_debug(self, 'execute_phase', 'PHASE_DONE_REACHED - returning now')
return
state_chg = UVMPhaseStateChange(self.get_name())
state_chg.m_phase = self
state_chg.m_jump_to = None
#---------
# SYNCING:
#---------
# Wait for phases with which we have a sync()
# relationship to be ready. Sync can be 2-way -
# this additional state avoids deadlock.
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_SYNCING)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
await uvm_zero_delay()
uvm_debug(self, 'execute_phase', 'Checking for wait_phases_synced')
if len(self.m_sync) > 0:
uvm_debug(self, 'execute_phase', 'Waiting for wait_phases_synced ' +
self.get_name())
await self._wait_phases_synced()
self.m_run_count += 1
if UVMPhase.m_phase_trace is True:
UVM_PH_TRACE("PH/TRC/STRT","Starting phase",self,UVM_LOW)
# If we're a schedule or domain, then "fake" execution
if self.m_phase_type != UVM_PHASE_NODE:
uvm_debug(self, 'execute_phase', 'schedule/domain, faking execution')
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_STARTED)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
await uvm_zero_delay()
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_EXECUTING)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
await uvm_zero_delay()
else: # PHASE NODE
uvm_debug(self, 'execute_phase', 'PHASE_NODE, setting phase to started')
#---------
# STARTED:
#---------
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_STARTED)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
# Task-phases must use yield here
if not self.m_imp.is_task_phase():
self.m_imp.traverse(top,self, UVM_PHASE_STARTED)
else:
await self.m_imp.traverse(top,self, UVM_PHASE_STARTED)
self.m_ready_to_end_count = 0 # reset the ready_to_end count when phase starts
await uvm_zero_delay() # LET ANY WAITERS WAKE UP
if not self.m_imp.is_task_phase():
#-----------
# EXECUTING: (function phases)
#-----------
uvm_debug(self, 'execute_phase', 'Exec non-task (function) phase now')
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_EXECUTING)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
await uvm_zero_delay() # LET ANY WAITERS WAKE UP
uvm_debug(self, 'execute_phase', 'Will traverse something now')
self.m_imp.traverse(top,self,UVM_PHASE_EXECUTING)
else:
task_phase = self.m_imp # was $cast(task_phase, m_imp)
# Execute task phase (which can consume time/fork procs)
UVMPhase.m_executing_phases[self] = 1
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_EXECUTING)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
#fork : master_phase_process
#self.m_phase_proc = process::self()
#-----------
# EXECUTING: (task phases)
#-----------
uvm_debug(self, 'execute_phase', "Forking now task_phase for uvm_top")
task_proc = cocotb.fork(task_phase.traverse(top, self, UVM_PHASE_EXECUTING))
#wait(0); // stay alive for later kill
#join_none
await uvm_wait_for_nba_region() # Give sequences, etc. a chance to object
await self.wait_for_criterion_for_end_phase(state_chg)
uvm_debug(self, 'execute_phase', "End criterion reached for " +
top.get_name())
# end # PHASE_NODE
uvm_debug(self, 'execute_phase', 'Now deleting self from executing phases')
if self in UVMPhase.m_executing_phases:
del UVMPhase.m_executing_phases[self]
else:
pass
#uvm_fatal('NOT_IN_EXEC', 'self not in executing phases')
# ---------
# JUMPING:
# ---------
#
# If jump_to() was called then we need to kill all the successor
# phases which may still be running and then initiate the new
# phase. The return is necessary so we don't start new successor
# phases. If we are doing a forward jump then we want to set the
# state of this phase's successors to UVM_PHASE_DONE. This
# will let us pretend that all the phases between here and there
# were executed and completed. Thus any dependencies will be
# satisfied preventing deadlocks.
# GSA TBD insert new jump support
#
if self.m_phase_type == UVM_PHASE_NODE:
if self.m_premature_end:
if self.m_jump_phase is not None:
state_chg.m_jump_to = self.m_jump_phase
uvm_report_info("PH_JUMP", (
"phase {} (schedule {}, domain {}) is jumping to phase {}".format(
self.get_name(), self.get_schedule_name(), self.get_domain_name(),
self.m_jump_phase.get_name())), UVM_MEDIUM)
else:
uvm_report_info("PH_JUMP", (
"phase {} (schedule {}, domain {}) is ending prematurely".format(
self.get_name(), self.get_schedule_name(),
self.get_domain_name())), UVM_MEDIUM)
#0; // LET ANY WAITERS ON READY_TO_END TO WAKE UP
await uvm_zero_delay()
if UVMPhase.m_phase_trace:
UVM_PH_TRACE("PH_END","ENDING PHASE PREMATURELY",self,UVM_MEDIUM)
else:
# WAIT FOR PREDECESSORS: // WAIT FOR PREDECESSORS:
# function phases only
if task_phase is None:
await self.m_wait_for_pred()
#-------
# ENDED:
#-------
# execute 'phase_ended' callbacks
if UVMPhase.m_phase_trace:
UVM_PH_TRACE("PH_END","ENDING PHASE",self,UVM_MEDIUM)
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_ENDED)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
if self.m_imp is not None:
if self.m_imp.is_task_phase():
await self.m_imp.traverse(top,self, UVM_PHASE_ENDED)
else:
self.m_imp.traverse(top,self, UVM_PHASE_ENDED)
uvm_debug(self, "execute_phase", "MMM KKK SSS ZZZ before yield")
await uvm_zero_delay()
uvm_debug(self, "execute_phase", "Phase ended after yield")
#0; // LET ANY WAITERS WAKE UP
#---------
# CLEANUP:
#---------
# kill this phase's threads
uvm_debug(self, "execute_phase", "Starting cleanup of |"
+ self.m_imp.get_name() + "|")
state_chg.m_prev_state = self.m_state
if self.m_premature_end:
self.set_state(UVM_PHASE_JUMPING)
else:
self.set_state(UVM_PHASE_CLEANUP)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
if self.m_phase_proc is not None:
self.m_phase_proc.kill()
self.m_phase_proc = None
await uvm_zero_delay()
#0; // LET ANY WAITERS WAKE UP
uvm_debug(self, "execute_phase", "Cleanup DONE |" + self.m_imp.get_name() + "|")
if self.phase_done is not None:
nn = self.get_name()
uvm_debug(self, "execute_phase", nn + "| clear() now after DONE |" +
self.m_imp.get_name() + "|")
self.phase_done.clear()
#------
# DONE:
#------
self.m_premature_end = False
if self.m_jump_fwd or self.m_jump_bkwd:
if self.m_jump_fwd:
self.clear_successors(UVM_PHASE_DONE,self.m_jump_phase)
self.m_jump_phase.clear_successors()
else:
if UVMPhase.m_phase_trace:
UVM_PH_TRACE("PH/TRC/DONE","Completed phase",self,UVM_LOW)
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_DONE)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
uvm_debug(self, 'exec_phase', 'DONE after uvm_callbacks done')
self.m_phase_proc = None
await uvm_zero_delay() # 0; // LET ANY WAITERS WAKE UP
await uvm_zero_delay() # 0; // LET ANY WAITERS WAKE UP
if self.phase_done is not None:
self.phase_done.clear()
#-----------
# SCHEDULED:
#-----------
if self.m_jump_fwd or self.m_jump_bkwd:
UVMPhase.m_phase_hopper.try_put(self.m_jump_phase)
self.m_jump_phase = None
self.m_jump_fwd = False
self.m_jump_bkwd = False
# If more successors, schedule them to run now
elif len(self.m_successors) == 0:
#top.m_phase_all_done= True
uvm_debug(self, 'execute_phase', ('name: ' + self.get_name() +
' - notify phases done OK'))
top.m_phase_all_done_event.set()
else:
# execute all the successors
for key in self.m_successors.keys():
uvm_debug(self, 'execute_phase', self.get_name() +
' has more successors')
succ = key
if succ.m_state < UVM_PHASE_SCHEDULED:
state_chg.m_prev_state = succ.m_state
state_chg.m_phase = succ
succ.set_state(UVM_PHASE_SCHEDULED)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', succ, state_chg)
await uvm_zero_delay() # LET ANY WAITERS WAKE UP
if not UVMPhase.m_phase_hopper.try_put(succ):
raise Exception('Failed try_put(succ). Should not ever fail')
if UVMPhase.m_phase_trace:
UVM_PH_TRACE("PH/TRC/SCHEDULED", ("Scheduled from phase "
+ self.get_full_name()), succ, UVM_LOW)
uvm_debug(self, 'execute_phase', 'End of task reached. Yay!')
#endtask
async def _wait_all_predecessors_done(self):
nn = self.get_name()
if self.has_predecessors():
uvm_debug(self, '_wait_all_predecessors_done', nn + '| has predecessors() OK')
events = []
for pred in self.m_predecessors:
uvm_debug(self, '_wait_all_predecessors_done', 'pred is now ' + str(pred))
#wait (pred.m_state == UVM_PHASE_DONE)
#events.append(pred.get_phase_done_event())
events.append(pred.get_phase_done_event().wait())
uvm_debug(self, '_wait_all_predecessors_done', nn + "| Before combining events")
await Combine(*events) # Combine expects *args, not list
uvm_debug(self, '_wait_all_predecessors_done', nn + "| After combining events")
else:
uvm_debug(self, '_wait_all_predecessors_done', nn + '| before yield Timer(0)')
await uvm_zero_delay()
uvm_debug(self, '_wait_all_predecessors_done', nn + '| after yield Timer(0)')
async def _wait_phases_synced(self):
events = []
for phase in self.m_sync:
if (phase.m_state < UVM_PHASE_SYNCING):
events.append(phase.get_phase_synced_event())
await Combine(*events)
def has_predecessors(self):
return len(self.m_predecessors) > 0
# extern local function void m_terminate_phase()
# extern local function void m_print_termination_state()
#//---------------------------------
#// Implementation - Overall Control
#//---------------------------------
#// This task loops until this phase instance and all its siblings, either
#// sync'd or sharing a common successor, have all objections dropped.
async def wait_for_self_and_siblings_to_drop(self):
need_to_check_all = True
top = None
cs = None
siblings = {} # bit siblings[uvm_phase]
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
top = cs.get_root()
self.get_predecessors_for_successors(siblings)
for ss in self.m_sync:
siblings[ss] = 1
await uvm_zero_delay()
while need_to_check_all is True:
need_to_check_all = False # if all are dropped, we won't need to do this again
# wait for own objections to drop
if ((self.phase_done is not None) and
(self.phase_done.get_objection_total(top) != 0)):
self.set_state(UVM_PHASE_EXECUTING)
await self.phase_done.wait_for(UVM_ALL_DROPPED, top)
need_to_check_all = True
# now wait for siblings to drop
#foreach(siblings[sib]) begin
for sib in siblings:
# sibling must be at least executing
await sib.wait_for_state(UVM_PHASE_EXECUTING, UVM_GTE)
if ((sib.phase_done is not None) and
(sib.phase_done.get_objection_total(top) != 0)):
self.set_state(UVM_PHASE_EXECUTING)
# sibling must drop any objection
await sib.phase_done.wait_for(UVM_ALL_DROPPED, top)
need_to_check_all = True
#endtask
# extern function void kill()
# extern function void kill_successors()
def convert2string(self):
#return $sformatf("PHASE %s = %p",get_name(),this)
par_str = 'null'
if self.m_parent is not None:
par_str = self.get_schedule_name()
pred_str = str(self.m_predecessors)
succ_str = str(self.m_successors)
s = "phase: {} parent={} pred={} succ={}".format(
self.get_name(), par_str, pred_str, succ_str)
return s
# local function string m_aa2string(bit aa[uvm_phase]); // TBD tidy
# string s
# int i
# s = "'{ "
# foreach (aa[ph]) begin
# uvm_phase n = ph
# s = {s, (n == null) ? "null" : n.get_name(),
# (i == aa.num()-1) ? "" : ", "}
# i++
# end
# s = {s, " }"}
# return s
# endfunction
#
# function bit is_domain()
# return (self.m_phase_type == UVM_PHASE_DOMAIN)
# endfunction
#
# virtual function void m_get_transitive_children(ref uvm_phase phases[$])
# foreach (self.m_successors[succ])
# begin
# phases.push_back(succ)
# succ.m_get_transitive_children(phases)
# end
# endfunction
#endclass
#uvm_object_utils(UVMPhase)
#uvm_register_cb(UVMPhase, UVMPhaseCb)
async def wait_for_criterion_for_end_phase(self, state_chg):
await self._wait_for_all_dropped(state_chg)
async def _wait_for_all_dropped(self, state_chg):
cs = get_cs()
top = cs.get_root()
# WAIT_FOR_ALL_DROPPED
do_ready_to_end = False # bit used for ready_to_end iterations
# OVM semantic: don't end until objection raised or stop request
if (self.phase_done.get_objection_total(top) or (UVMPhase.m_use_ovm_run_semantic
and self.m_imp.get_name() == "run")):
if not self.phase_done.m_top_all_dropped:
await self.phase_done.wait_for(UVM_ALL_DROPPED, top)
UVM_PH_TRACE("PH/TRC/EXE/ALLDROP","PHASE EXIT ALL_DROPPED",self,UVM_MEDIUM)
else:
if (UVMPhase.m_phase_trace):
UVM_PH_TRACE("PH/TRC/SKIP","No objections raised, skipping phase",self,UVM_LOW)
uvm_debug(self, '_wait_for_all_dropped', self.get_name() + ' waiting siblings to drop')
await self.wait_for_self_and_siblings_to_drop()
uvm_debug(self, '_wait_for_all_dropped', self.get_name() + ' all siblings have dropped')
do_ready_to_end = True
# --------------
# READY_TO_END:
# --------------
while do_ready_to_end:
# Let all siblings see no objections before traverse might raise another
await uvm_wait_for_nba_region()
UVM_PH_TRACE("PH_READY_TO_END","PHASE READY TO END",self,UVM_MEDIUM)
self.m_ready_to_end_count += 1
if (UVMPhase.m_phase_trace):
UVM_PH_TRACE("PH_READY_TO_END_CB","CALLING READY_TO_END CB",self,UVM_MEDIUM)
state_chg.m_prev_state = self.m_state
self.set_state(UVM_PHASE_READY_TO_END)
uvm_do_callbacks(self, UVMPhaseCb, 'phase_state_change', self, state_chg)
if self.m_imp is not None:
if self.m_imp.is_task_phase():
await self.m_imp.traverse(top, self, UVM_PHASE_READY_TO_END)
else:
self.m_imp.traverse(top, self, UVM_PHASE_READY_TO_END)
await uvm_wait_for_nba_region() # Give traverse targets a chance to object
await self.wait_for_self_and_siblings_to_drop()
do_ready_to_end = ((self.m_state == UVM_PHASE_EXECUTING) and (self.m_ready_to_end_count
< self.max_ready_to_end_iter)) # when we don't wait in task above, we drop out of while loop
#------------------------------------------------------------------------------
#
# Class: uvm_phase_cb
#
#------------------------------------------------------------------------------
#
# This class defines a callback method that is invoked by the phaser
# during the execution of a specific node in the phase graph or all phase nodes.
# User-defined callback extensions can be used to integrate data types that
# are not natively phase-aware with the UVM phasing.
class UVMPhaseCb(UVMCallback):
def __init__(self, name="unnamed-uvm_phase_cb"):
"""
Function: new
Constructor
Args:
name:
"""
UVMCallback.__init__(self)
def phase_state_change(self, phase, change):
"""
Function: phase_state_change
Called whenever a `phase` changes state.
The `change` descriptor describes the transition that was just completed.
The callback method is invoked immediately after the phase state has changed,
but before the phase implementation is executed.
An extension may interact with the phase,
such as raising the phase objection to prolong the phase,
in a manner that is consistent with the current phase state.
By default, the callback method does nothing.
Unless otherwise specified, modifying the phase transition descriptor has
no effect on the phasing schedule or execution.
Args:
phase:
change:
"""
pass
#------------------------------------------------------------------------------
#
# Class: uvm_phase_cb_pool
#
#------------------------------------------------------------------------------
# Convenience type for the uvm_callbacks#(uvm_phase, uvm_phase_cb) class.
#typedef uvm_callbacks#(uvm_phase, uvm_phase_cb) uvm_phase_cb_pool
##------------------------------------------------------------------------------
## IMPLEMENTATION
##------------------------------------------------------------------------------
#
#typedef class uvm_cmdline_processor
#//-----------------------------
#// Implementation - Construction
#//-----------------------------
#
#// get_imp
#// -------
#
#function uvm_phase uvm_phase::get_imp()
# return m_imp
#endfunction
#
#
#
#
#// m_print_successors
#// ------------------
#
#function void uvm_phase::m_print_successors()
# uvm_phase found
# static string spaces = " "
# static int level
# if (self.m_phase_type == UVM_PHASE_DOMAIN)
# level = 0
# `uvm_info("UVM/PHASE/SUCC",$sformatf("%s%s (%s) id=%0d",spaces.substr(0,level*2),get_name(), self.m_phase_type.name(),get_inst_id()),UVM_NONE)
# level++
# foreach (self.m_successors[succ]) begin
# succ.m_print_successors()
# end
# level--
#endfunction
#
#
#function void uvm_phase::get_adjacent_predecessor_nodes(ref uvm_phase pred[])
# bit done
# bit predecessors[uvm_phase]
# int idx
#
# // Get all predecessors (including TERMINALS, SCHEDULES, etc.)
# foreach (self.m_predecessors[p])
# predecessors[p] = 1
#
# // Replace any terminal / schedule nodes with their predecessors,
# // recursively.
# do begin
# done = 1
# foreach (predecessors[p]) begin
# if (p.get_phase_type() != UVM_PHASE_NODE) begin
# predecessors.delete(p)
# foreach (p.m_predecessors[next_p])
# predecessors[next_p] = 1
# done = 0
# end
# end
# end while (!done)
#
# pred = new [predecessors.size()]
# foreach (predecessors[p]) begin
# pred[idx++] = p
# end
#endfunction : get_adjacent_predecessor_nodes
#
#
#
#
#
#
#//---------------------------------
#// Implementation - Synchronization
#//---------------------------------
#
#
#
#// get_objection_count
#// -------------------
#
#function int uvm_phase::get_objection_count (uvm_object obj=null)
# if (self.phase_done != null)
# return self.phase_done.get_objection_count(obj)
# else begin
# m_report_null_objection(obj, "" , 0, "get_objection_count")
# return 0
# end
#endfunction : get_objection_count
#
#// sync
#// ----
#
#function void uvm_phase::sync(uvm_domain target,
# uvm_phase phase=null,
# uvm_phase with_phase=null)
# if (!this.is_domain()) begin
# `uvm_fatal("PH_BADSYNC","sync() called from a non-domain phase schedule node")
# end
# else if (target == null) begin
# `uvm_fatal("PH_BADSYNC","sync() called with a null target domain")
# end
# else if (!target.is_domain()) begin
# `uvm_fatal("PH_BADSYNC","sync() called with a non-domain phase schedule node as target")
# end
# else if (phase == null && with_phase != null) begin
# `uvm_fatal("PH_BADSYNC","sync() called with null phase and non-null with phase")
# end
# else if (phase == null) begin
# // whole domain sync - traverse this domain schedule from begin to end node and sync each node
# int visited[uvm_phase]
# uvm_phase queue[$]
# queue.push_back(this)
# visited[this] = 1
# while (queue.size()) begin
# uvm_phase node
# node = queue.pop_front()
# if (node.m_imp != null) begin
# sync(target, node.m_imp)
# end
# foreach (node.m_successors[succ]) begin
# if (!visited.exists(succ)) begin
# queue.push_back(succ)
# visited[succ] = 1
# end
# end
# end
# end else begin
# // single phase sync
# // this is a 2-way ('with') sync and we check first in case it is already there
# uvm_phase from_node, to_node
# int found_to[$], found_from[$]
# if(with_phase == null) with_phase = phase
# from_node = find(phase)
# to_node = target.find(with_phase)
# if(from_node == null || to_node == null) return
# found_to = from_node.m_sync.find_index(node) with (node == to_node)
# found_from = to_node.m_sync.find_index(node) with (node == from_node)
# if (found_to.size() == 0) from_node.m_sync.push_back(to_node)
# if (found_from.size() == 0) to_node.m_sync.push_back(from_node)
# end
#endfunction
#
#
#// unsync
#// ------
#
#function void uvm_phase::unsync(uvm_domain target,
# uvm_phase phase=null,
# uvm_phase with_phase=null)
# if (!this.is_domain()) begin
# `uvm_fatal("PH_BADSYNC","unsync() called from a non-domain phase schedule node")
# end else if (target == null) begin
# `uvm_fatal("PH_BADSYNC","unsync() called with a null target domain")
# end else if (!target.is_domain()) begin
# `uvm_fatal("PH_BADSYNC","unsync() called with a non-domain phase schedule node as target")
# end else if (phase == null && with_phase != null) begin
# `uvm_fatal("PH_BADSYNC","unsync() called with null phase and non-null with phase")
# end else if (phase == null) begin
# // whole domain unsync - traverse this domain schedule from begin to end node and unsync each node
# int visited[uvm_phase]
# uvm_phase queue[$]
# queue.push_back(this)
# visited[this] = 1
# while (queue.size()) begin
# uvm_phase node
# node = queue.pop_front()
# if (node.m_imp != null) unsync(target,node.m_imp)
# foreach (node.m_successors[succ]) begin
# if (!visited.exists(succ)) begin
# queue.push_back(succ)
# visited[succ] = 1
# end
# end
# end
# end else begin
# // single phase unsync
# // this is a 2-way ('with') sync and we check first in case it is already there
# uvm_phase from_node, to_node
# int found_to[$], found_from[$]
# if(with_phase == null) with_phase = phase
# from_node = find(phase)
# to_node = target.find(with_phase)
# if(from_node == null || to_node == null) return
# found_to = from_node.m_sync.find_index(node) with (node == to_node)
# found_from = to_node.m_sync.find_index(node) with (node == from_node)
# if (found_to.size()) from_node.m_sync.delete(found_to[0])
# if (found_from.size()) to_node.m_sync.delete(found_from[0])
# end
#endfunction
#
#
#
#//-------------------------
#// Implementation - Jumping
#//-------------------------
#
#// set_jump_phase
#// ----
#//
#// Specify a phase to transition to when phase is complete.
#
#function void uvm_phase::set_jump_phase(uvm_phase phase)
# uvm_phase d
#
# if ((self.m_state < UVM_PHASE_STARTED) ||
# (self.m_state > UVM_PHASE_ENDED) )
# begin
# `uvm_error("JMPPHIDL", { "Attempting to jump from phase \"",
# get_name(), "\" which is not currently active (current state is ",
# self.m_state.name(), "). The jump will not happen until the phase becomes ",
# "active."})
# end
#
#
#
# // A jump can be either forward or backwards in the phase graph.
# // If the specified phase (name) is found in the set of predecessors
# // then we are jumping backwards. If, on the other hand, the phase is in the set
# // of successors then we are jumping forwards. If neither, then we
# // have an error.
# //
# // If the phase is non-existant and thus we don't know where to jump
# // we have a situation where the only thing to do is to uvm_fatal
# // and terminate_phase. By calling this function the intent was to
# // jump to some other phase. So, continuing in the current phase doesn't
# // make any sense. And we don't have a valid phase to jump to. So we're done.
#
# d = m_find_predecessor(phase,0)
# if (d == null) begin
# d = m_find_successor(phase,0)
# if (d == null) begin
# string msg
# $sformat(msg,{"phase %s is neither a predecessor or successor of ",
# "phase %s or is non-existant, so we cannot jump to it. ",
# "Phase control flow is now undefined so the simulation ",
# "must terminate"}, phase.get_name(), get_name())
# `uvm_fatal("PH_BADJUMP", msg)
# end
# else begin
# m_jump_fwd = 1
# `uvm_info("PH_JUMPF",$sformatf("jumping forward to phase %s", phase.get_name()),
# UVM_DEBUG)
# end
# end
# else begin
# m_jump_bkwd = 1
# `uvm_info("PH_JUMPB",$sformatf("jumping backward to phase %s", phase.get_name()),
# UVM_DEBUG)
# end
#
# m_jump_phase = d
#endfunction
#
#
#// jump
#// ----
#//
#// Note that this function does not directly alter flow of control.
#// That is, the new phase is not initiated in this function.
#// Rather, flags are set which execute_phase() uses to determine
#// that a jump has been requested and performs the jump.
#
#function void uvm_phase::jump(uvm_phase phase)
# set_jump_phase(phase)
# end_prematurely()
#endfunction
#
#
#// jump_all
#// --------
#function void uvm_phase::jump_all(uvm_phase phase)
# `uvm_warning("NOTIMPL","uvm_phase::jump_all is not implemented and has been replaced by uvm_domain::jump_all")
#endfunction
#
#
#
#// kill
#// ----
#
#function void uvm_phase::kill()
#
# `uvm_info("PH_KILL", {"killing phase '", get_name(),"'"}, UVM_DEBUG)
#
# if (self.m_phase_proc != null) begin
# self.m_phase_proc.kill()
# self.m_phase_proc = null
# end
#
#endfunction
#
#
#// kill_successors
#// ---------------
#
#// Using a depth-first traversal, kill all the successor phases of the
#// current phase.
#function void uvm_phase::kill_successors()
# foreach (self.m_successors[succ])
# succ.kill_successors()
# kill()
#endfunction
#
#
#
#
#// terminate_phase
#// ---------------
#
#function void uvm_phase::m_terminate_phase()
# if (self.phase_done != null)
# self.phase_done.clear(this)
#endfunction
#
#
#// print_termination_state
#// -----------------------
#
#function void uvm_phase::m_print_termination_state()
# uvm_root top
# uvm_coreservice_t cs
# cs = uvm_coreservice_t::get()
# top = cs.get_root()
# if (self.phase_done != null) begin
# `uvm_info("PH_TERMSTATE",
# $sformatf("phase %s outstanding objections = %0d",
# get_name(), self.phase_done.get_objection_total(top)),
# UVM_DEBUG)
# end
# else begin
# `uvm_info("PH_TERMSTATE",
# $sformatf("phase %s has no outstanding objections",
# get_name()),
# UVM_DEBUG)
# end
|
from numba import njit
import numpy as np
from stingray.pulse.pulsar import _load_and_prepare_TOAs, get_model
from scipy.interpolate import interp1d
from astropy.table import Table
ONE_SIXTH = 1 / 6
@njit(nogil=True, parallel=False)
def _hist1d_numba_seq(H, tracks, bins, ranges):
delta = 1 / ((ranges[1] - ranges[0]) / bins)
for t in range(tracks.size):
i = (tracks[t] - ranges[0]) * delta
if 0 <= i < bins:
H[int(i)] += 1
return H
def histogram(a, bins, ranges):
"""
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> H, xedges = np.histogram(x, bins=5, range=[0., 1.])
>>> Hn = histogram(x, bins=5, ranges=[0., 1.])
>>> assert np.all(H == Hn)
"""
hist_arr = np.zeros((bins,), dtype=a.dtype)
return _hist1d_numba_seq(hist_arr, a, bins, np.asarray(ranges))
@njit(parallel=True)
def _fast_phase_fddot(ts, mean_f, mean_fdot=0, mean_fddot=0):
tssq = ts * ts
phases = ts * mean_f + 0.5 * tssq * mean_fdot + ONE_SIXTH * tssq * ts * mean_fddot
return phases - np.floor(phases)
def calculate_phase(events_time, model):
return _fast_phase_fddot(
events_time,
model.F0.value.astype(np.double),
model.F1.value.astype(np.double),
model.F2.value.astype(np.double),
)
def calculate_profile(phase, nbin=512, expo=None):
prof = histogram(phase.astype(float), bins=nbin, ranges=[0, 1])
prof_corr = prof
if expo is not None:
prof_corr = prof / expo
t = Table(
{"phase": np.linspace(0, 1, nbin + 1)[:-1], "profile": prof_corr, "profile_raw": prof}
)
if expo is not None:
t["expo"] = expo
return t
def prepare_TOAs(mjds, ephem):
toalist = _load_and_prepare_TOAs(mjds, ephem=ephem)
toalist.clock_corr_info["include_bipm"] = False
toalist.clock_corr_info["include_gps"] = False
return toalist
def get_phase_from_ephemeris_file(mjdstart, mjdstop, parfile, ntimes=1000, ephem="DE405"):
"""Get a correction for orbital motion from pulsar parameter file.
Parameters
----------
mjdstart, mjdstop : float
Start and end of the time interval where we want the orbital solution
parfile : str
Any parameter file understood by PINT (Tempo or Tempo2 format)
Other parameters
----------------
ntimes : int
Number of time intervals to use for interpolation. Default 1000
Returns
-------
correction_mjd : function
Function that accepts times in MJDs and returns the deorbited times.
"""
mjds = np.linspace(mjdstart, mjdstop, ntimes)
toalist = prepare_TOAs(mjds, ephem)
m = get_model(parfile)
phase_int, phase_frac = np.array(m.phase(toalist, abs_phase=True))
phases = phase_int + phase_frac
correction_mjd_rough = interp1d(mjds, phases, fill_value="extrapolate")
return correction_mjd_rough
|
import bpy
from mathutils import Matrix, Vector, Quaternion
import numpy as np
import math
# Try to make faster the retargeting.
# The algorithm is based in 2 parts.
# 1. From unkonwn armature transfer bone rotations to a known armature. Since sometimes this rotation transfer
# has rotations that mess with the associated mesh of the armature
# 2. Once armatures are equal, use a function to align 2 vectors. This function guarantees shortest rotations.
# The method is slow due to compute vectors, we need current position of bones and this can only be done using
# view_layer.update() which at the same time, slows down loops a lot!
# Ideas to improve and make it faster. Try to remove bvh skeleton between steps 1 and 2, when bvh skeleton is
# not needed anymore.
# Use matrix_world.to_translation() instead of get_bone_head_position. Matrix world compute all matrices. The
# problem with this function or equivalents is that the positions obtained are slightly different. This causes
# method to fail. The different positions probably comes from the precision of the matrices in Blender. Not sure.
source = bpy.data.objects["walking"]
target = bpy.data.objects["Avatar"]
target_cp = bpy.data.objects["Skel_cp"]
# me_cp = target.data.copy()
# target_cp = bpy.data.objects.new("Skel_cp", me_cp)
# target_cp.location = target.location
# bpy.context.scene.collection.objects.link(target_cp)
# bpy.context.view_layer.update()
#file_bone_correspondences = "/home/jsanchez/Software/gitprojects/avatar/motion/retarget_motion_mine/bone_correspondance_cmu.txt"
#file_bone_correspondences = "/home/jsanchez/Software/gitprojects/avatar/bone_correspondance_mixamo.txt"
file_bone_correspondences = "/Users/jsanchez/Software/gitprojects/avatar/motion/rigs/mixamo.txt"
def rigid_transform_3D(A, B):
assert len(A) == len(B)
N = A.shape[0] # total points
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
# centre the points
AA = A - np.tile(centroid_A, (N, 1))
BB = B - np.tile(centroid_B, (N, 1))
# dot is matrix multiplication for array
H = np.transpose(AA) * BB
U, S, Vt = np.linalg.svd(H)
R = Vt.T * U.T
# special reflection case
if np.linalg.det(R) < 0:
#print ("Reflection detected")
Vt[2,:] *= -1
R = Vt.T * U.T
t = -R*centroid_A.T + centroid_B.T
#print (t)
return R, t
def read_text_lines(filename):
list_bones = []
text_file = open(filename, "r")
lines = text_file.readlines()
for line in lines:
line_split = line.split()
if len(line_split) == 2:
list_bones.append([line_split[0], line_split[1]])
else: # only 1 element
list_bones.append([line_split[0], "none"])
return list_bones
def find_bone_match(list_bones, bone_name):
bone_match = "none"
for b in list_bones:
if b[0] == bone_name:
bone_match = b[1]
break
return bone_match
def get_bone_head_position(obj, bone_name):
return (obj.matrix_world @ Matrix.Translation(obj.pose.bones[bone_name].head)).to_translation()
def get_pose_bone_head_position(obj, pose_bone):
return (obj.matrix_world @ Matrix.Translation(pose_bone.head)).to_translation()
def get_pose_bone_tail_position(obj, pose_bone):
return (obj.matrix_world @ Matrix.Translation(pose_bone.tail)).to_translation()
def matrix_scale(scale_vec):
return Matrix([[scale_vec[0],0,0,0],
[0,scale_vec[1],0,0],
[0,0,scale_vec[2],0],
[0,0,0,1]
])
def matrix_for_bone_from_parent(bone, ao):
eb1 = ao.data.bones[bone.name]
E = eb1.matrix_local # * Matrix.Scale(eb1.length,4)
ebp = ao.data.bones[bone.name].parent
E_p = ebp.matrix_local # * Matrix.Scale(ebp.length,4)
return E_p.inverted() @ E
def matrix_the_hard_way(pose_bone, ao):
if pose_bone.rotation_mode == 'QUATERNION':
mr = pose_bone.rotation_quaternion.to_matrix().to_4x4()
else:
mr = pose_bone.rotation_euler.to_matrix().to_4x4()
m1 = Matrix.Translation(pose_bone.location) @ mr @ matrix_scale(pose_bone.scale)
E = ao.data.bones[pose_bone.name].matrix_local
if pose_bone.parent is None:
return E @ m1
else:
m2 = matrix_the_hard_way(pose_bone.parent, ao)
E_p = ao.data.bones[pose_bone.parent.name].matrix_local
return m2 @ E_p.inverted() @ E @ m1
def matrix_world(armature_ob, bone_name):
local = armature_ob.data.bones[bone_name].matrix_local
basis = armature_ob.pose.bones[bone_name].matrix_basis
parent = armature_ob.pose.bones[bone_name].parent
if parent == None:
return local @ basis
else:
parent_local = armature_ob.data.bones[parent.name].matrix_local
return matrix_world(armature_ob, parent.name) @ (parent_local.inverted() @ local) @ basis
def worldMatrix(ArmatureObject,Bone):
# simplified version of the matrix_the_hard_way
# To Test
# Probably can't use without update of the bones, since bone.matrix does not updates
# automatically
_bone = ArmatureObject.pose.bones[Bone]
_obj = ArmatureObject
return _obj.matrix_world @ _bone.matrix
def trans_coord_system(p1, o1, o2, M1, M2):
# note is necessary to use a copy version of the matrix
# otherwise it modifies the content of M2 outside the function
# Actually it transposes matrix M2 outside the function. Must be the way
# Blender handles the transform operators
M2t = M2.copy()
M2t.transpose()
return M2t @ (o1 - o2 + M1 @ p1)
def compute_rotation(poseBone, pt0, pt1, pt2):
M1 = Matrix([[1,0,0], [0,1,0], [0,0,1]])
M2 = poseBone.matrix.copy()
v1 = trans_coord_system(pt1, Vector((0,0,0)), pt0, M1, M2)
v2 = trans_coord_system(pt2, Vector((0,0,0)), pt0, M1, M2)
a = v1.normalized()
b = v2.normalized()
c = a.cross(b)
# c.magnitude from normalized vectors should not be greater than 1 ?
# in some cases c.magnitude > 1 then asin fails [-1,+1]
# check for cases > 1 and round to 1
v_magnitude = c.magnitude
if (v_magnitude > 1):
v_magnitude = 1
# to determine if angle in [0, pi/2] or in [pi/2, pi]
l = np.linalg.norm(pt1 - pt0)
dist = np.linalg.norm(pt1 - pt2)
dist_max = math.sqrt(2*l*l)
if (dist < dist_max): theta = math.asin(v_magnitude)
else: theta = theta = math.pi - math.asin(v_magnitude)
if (c.magnitude>0):
axis = c.normalized()
st2 = math.sin(theta/2)
q = Quaternion( [math.cos(theta/2), st2*axis.x, st2*axis.y, st2*axis.z] )
else:
q = Quaternion( [1,0,0,0] )
return q
# # create target animation
# target_cp.animation_data_clear()
# get frames of action
act_size = source.animation_data.action.frame_range
print(act_size)
nfirst = int(act_size[0])
nlast = int(act_size[1])
bpy.context.scene.frame_start = nfirst
bpy.context.scene.frame_end = nlast
bone_corresp = read_text_lines(file_bone_correspondences)
# store pose bone matrices target
matrices_target= {}
#for to_match in goal.data.bones:
for bone in target.pose.bones:
matrices_target[bone.name] = bone.matrix_basis.copy()
#print([ "matrix", bone.name, matrix_os[bone.name] ] )
# if bone.name == "RightArm":
# print(bone.matrix_basis.decompose()[0])
# print(bone.matrix_basis.decompose()[1].to_euler())
matrices_source = {}
for bone in source.data.bones:
matrices_source[bone.name] = bone.matrix_local.copy()
# target bones in rest position
trg_bone_loc_hips = get_bone_head_position(target_cp, "Hips")
trg_bone_loc_lefthips = get_bone_head_position(target_cp, "LeftUpLeg")
trg_bone_loc_righthips = get_bone_head_position(target_cp, "RightUpLeg")
trg_bone_loc_neck = get_bone_head_position(target_cp, "Neck")
# read source animation
for f in range(nfirst, nlast):
bpy.context.scene.frame_set(f)
# get bvh bone locations
source_bone_name = find_bone_match(bone_corresp, "Hips")
src_bone_loc_hips = get_bone_head_position(source, source_bone_name)
source_bone_name = find_bone_match(bone_corresp, "LeftUpLeg")
src_bone_loc_lefthips = get_bone_head_position(source, source_bone_name)
source_bone_name = find_bone_match(bone_corresp, "RightUpLeg")
src_bone_loc_righthips = get_bone_head_position(source, source_bone_name)
source_bone_name = find_bone_match(bone_corresp, "Neck")
src_bone_loc_neck = get_bone_head_position(source, source_bone_name)
matrix_os= {}
#for to_match in goal.data.bones:
for bone in target_cp.data.bones:
bone_match = find_bone_match(bone_corresp, bone.name)
if bone_match is not "none":
#matrix_os[bone_match] = goal.data.bones[bone_match].matrix_local # if we want to match rest pose
ebp = source.pose.bones[bone_match]
matrix_os[bone_match] = matrix_the_hard_way(ebp, source)
#print([ "matrix", bone_match, matrix_os[bone_match] ] )
# read source motion
for pb in target_cp.pose.bones:
bone_name = find_bone_match(bone_corresp, pb.name)
if bone_name is not "none":
goal_bone = bone_name
# # source bone
# spb = source.pose.bones[bone_name]
# # insert keyframe
# loc = spb.location
if pb.parent is None:
# print(f, (source.pose.bones["mixamorig:Hips"].matrix_basis).to_translation())
# bone_translate_matrix = Matrix.Translation(source.pose.bones["mixamorig:Hips"].matrix_basis).to_translation()
# loca = (source.data.bones["mixamorig:Hips"].matrix_local.inverted() @ Vector(bone_translate_matrix)).to_translation()
# print(f, loca)
loc = source.matrix_world @ source.pose.bones["mixamorig:Hips"].head
# loc = source.matrix_world @ source.pose.bones["hip"].head
pb.location = target_cp.matrix_world.inverted() @ pb.bone.matrix_local.inverted() @ loc
pb.keyframe_insert('location', frame=f, group=pb.name)
# compute translation and first rotation between rest position and desired points
A = np.mat((trg_bone_loc_hips, trg_bone_loc_lefthips, trg_bone_loc_righthips, trg_bone_loc_neck)) # my skeleton
B = np.mat((src_bone_loc_hips, src_bone_loc_lefthips, src_bone_loc_righthips, src_bone_loc_neck)) # bvh skeleton
R, T = rigid_transform_3D(A,B)
mR = Matrix([[R[0,0],R[0,1],R[0,2]], [R[1,0],R[1,1],R[1,2]], [R[2,0],R[2,1],R[2,2]]])
mR.resize_4x4()
boneRefPoseMtx = pb.bone.matrix_local.copy()
rotMtx = boneRefPoseMtx.inverted() @ mR @ boneRefPoseMtx
pb.rotation_mode = 'XYZ'
pb.rotation_euler = rotMtx.to_euler()
pb.keyframe_insert('rotation_euler', frame=f, group=pb.name)
# spb.rotation_mode = 'XYZ'
# pb.rotation_mode = 'XYZ'
# #rot = spb.rotation_euler
# rot = (spb.matrix_basis @ matrices_target[pb.name]).to_euler()
# pb.rotation_euler = rot
# pb.keyframe_insert('rotation_euler', frame=f, group=pb.name)
else:
# pb.location = loc
# pb.keyframe_insert('location', frame=f, group=pb.name)
# # if pb.parent is None:
# # pb.location = matrices_source[bone_name].to_translation()
# # print(f, loc)
# # print(f, pb.location)
# # pb.keyframe_insert('location', frame=f, group=pb.name)
# # else:
# # pb.location = loc
# # pb.keyframe_insert('location', frame=f, group=pb.name)
# # pb.location = spb.location
# # pb.keyframe_insert('location', frame=f, group=pb.name)
# #spb.rotation_mode = 'XYZ'
# pb.rotation_mode = 'XYZ'
# #rot = spb.rotation_euler
# rot = matrices_target[pb.name] @ spb.matrix_basis.copy()
# #rot = spb.matrix_basis.copy()
# pb.rotation_euler = rot.to_euler()
# pb.keyframe_insert('rotation_euler', frame=f, group=pb.name)
# we can not set .matrix, because a lot of stuff behind the scenes has not yet
# caught up with our alterations, and it ends up doing math on outdated numbers
mp = matrix_the_hard_way(pb.parent, target_cp) @ matrix_for_bone_from_parent(pb, target_cp)
m2 = mp.inverted() @ matrix_os[goal_bone] # @ Matrix.Scale(goal.data.bones[goal_bone].length, 4)
#m2 = matrix_os[goal_bone] # @ Matrix.Scale(goal.data.bones[goal_bone].length, 4)
loc,rot,scale = m2.decompose()
# to_pose.location = loc
if 'QUATERNION' == pb.rotation_mode:
pb.rotation_quaternion = rot
pb.keyframe_insert('rotation_quaternion', frame=f, group=pb.name)
else:
pb.rotation_euler = rot.to_euler(pb.rotation_mode)
pb.keyframe_insert('rotation_euler', frame=f, group=pb.name)
# to_pose.scale = scale / arm.data.bones[to_pose.name].length
print("last debug")
print(rot)
ept0 = bpy.data.objects["ept0"]
ept1 = bpy.data.objects["ept1"]
ept2 = bpy.data.objects["ept2"]
# copy rotations from 3d points
# now skeletons are equal (same name bones, same length bones)
#for f in range(nfirst, nlast):
for f in range(1, 2):
# set target in rest position
for bone in target.pose.bones:
bone.rotation_mode = 'XYZ'
bone.rotation_euler = (0, 0, 0)
bpy.context.scene.frame_set(f)
pb_list = ["Hips", "LowerBack", "Spine", "Spine1", "RightShoulder", "RightArm", "RightForeArm", "RightHand"]
# for pb in target.pose.bones:
for pbname in pb_list:
pb = target.pose.bones[pbname]
print(pbname)
# bpy.context.view_layer.update()
pb_cp = target_cp.pose.bones[pb.name]
if pb.parent is None:
pb.location = pb_cp.location
pb.keyframe_insert('location', frame=f, group=pb.name)
pb_cp.rotation_mode = 'XYZ'
pb.rotation_mode = 'XYZ'
pb.rotation_euler = pb_cp.rotation_euler
pb.keyframe_insert('rotation_euler', frame=f, group=pb.name)
else:
if pb.children:
# recalculate rotations to avoid strange mesh deformations
pt0 = get_pose_bone_head_position(target, pb)
pt1 = get_pose_bone_tail_position(target, pb)
pt2 = get_pose_bone_tail_position(target_cp, pb_cp)
print("points blender update")
print(pt0)
print(pt1)
print(pt2)
# print(pb.children)
pb_child = pb.children[0] # we assume each bone only have one children !!
pb_cp_child = pb_cp.children[0]
pt0 = matrix_world(target, pb.name).to_translation()
pt1 = matrix_world(target, pb_child.name).to_translation()
pt2 = matrix_world(target_cp, pb_cp_child.name).to_translation()
print("points matrix_world update")
print(pt0)
print(pt1)
print(pt2)
# print(pb.children)
pb_child = pb.children[0] # we assume each bone only have one children !!
pb_cp_child = pb_cp.children[0]
# pt0 = worldMatrix(target, pb.name).to_translation()
# pt1 = worldMatrix(target, pb_child.name).to_translation()
pt0 = matrix_the_hard_way(pb, target).to_translation()
pt1 = matrix_the_hard_way(pb_child, target).to_translation()
pt2 = matrix_the_hard_way(pb_cp_child, target_cp).to_translation()
print("points matrix_the_hard_way update")
print(pt0)
print(pt1)
print(pt2)
# print(pb.children)
pb_child = pb.children[0] # we assume each bone only have one children !!
pb_cp_child = pb_cp.children[0]
pt0 = worldMatrix(target, pb.name).to_translation()
pt1 = worldMatrix(target, pb_child.name).to_translation()
# pt0 = matrix_world(target, pb.name).to_translation()
# pt1 = matrix_world(target, pb_child.name).to_translation()
pt2 = worldMatrix(target_cp, pb_cp_child.name).to_translation()
print("points worldMatrix update")
print(pt0)
print(pt1)
print(pt2)
ept0.location = pt0
ept1.location = pt1
ept2.location = pt2
q2 = compute_rotation(pb, pt0, pt1, pt2)
pb.rotation_mode = 'QUATERNION'
pb.rotation_quaternion = q2
pb.keyframe_insert('rotation_quaternion', frame=f, group=pb.name)
|
from hermes.engines.databases import *
from datetime import datetime, date
import time
import simplejson
from pprint import pprint
import urllib
from sqlobject.sqlbuilder import *
from operator import itemgetter
import logging
import hermes.lib.util as Util
import sys
import re
def sortMultipleKeys(items, columns) :
"""
This code is used to sort a list by multiple keys in the sub dicts
"""
# The following code was based on http://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys/1144405
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right) :
for fn, mult in comparers :
result = cmp(fn(left), fn(right))
if result :
return mult * result
else :
return 0
return sorted(items, cmp=comparer)
def selectSpecial(mainTableClass, joinExp, fields, whereExp, **kwargs):
"""This will construct special queries using sqlbuilder."""
"""mainTableClass expects main class to work from (usually the "FROM" in a sql query)"""
"""joinExp expects a SQLExpression join object"""
"""fields expects a list of SQLExpression fields you want returned ex: [Datastore.q.id, Datastore.q.map, SearchAttributes.q.fieldName]"""
"""whereExp expects a SQLExpression where clause"""
"""**kwargs expects any special operations you want to do see http://www.sqlobject.org/SQLBuilder.html#select ex: distinct=True or groupBy=SearchAttributes.q.fieldName"""
""" selectSpecial(StoreAttributes, joinExp = LEFTJOINOn(StoreAttributes, DataStore, StoreAttributes.q.datastore == DataStore.q.id), fields = [StoreAttributes.q.fieldValue]
whereExp = AND(DataStore.q.dataType == 'record', StoreAttributes.q.fieldName == 'Prefix'), distinct=True)"""
conn = mainTableClass._connection
select = Select(fields, where=whereExp, join=joinExp, **kwargs)
sql = conn.sqlrepr(select)
rows = conn.queryAll(sql)
if len(fields) < 2:
formatted_rows = [rows[p][j] for p in range(len(rows)) for j in range(len(fields))]
rows = formatted_rows
return rows
class HermesLegend(SQLObject) :
"""
Hermes Legend
==========================
The Hermes Legend connects the system template to the hermes map
+-------------------+ +----------------------+
| HermesMap | | System Template |
|-------------------| |----------------------|
| | +--------------+ | |
| | <----+ | Legend | <----+ | |
| | +----> | | +----> | |
| | +--------------+ | |
+-------------------+ +----------------------+
"""
class sqlmeta :
table="hermes_legend"
systemItemId = IntCol(length=11, default=None)
modified = DateTimeCol(default=datetime.now())
details = StringCol()
current = BoolCol(default=True)
systemType = ForeignKey('HermesSystems')
# Joins
# Each legend has one map
maps = RelatedJoin('HermesMap', joinColumn='legend_id', otherColumn='map_id', intermediateTable='hermes_map_to_hermes_legend', createRelatedTable=True)
def _set_client(self, value=None):
self.client = value
def _set_client_connection(self, value=None):
self.client_connection = value
@classmethod
def _set_logging(self):
self.logging = logging.getLogger('hermes.hermeslegend')
"""
def _get_mapName(self) :
""
Retrieve the Legend's Map's Name.
** returns map name **
""
mapName = None
if len(self.maps) > 0:
return self.maps[0].name
return mapName
"""
def __repr__(self) :
return "<HermesLegend('%i', '%s', '%s', '%s', '%s')>" % (self.id, self.systemItemId, self.details, self.current, self.systemType)
@classmethod
def saveLegend( self, legendFields, mapId=None, systemItemId=None, systemType=None, id=0) :
"""
Save legend, find map of mapId, then add the legend to the map
- id : required
OR
- Map id : required
- systemItemId : required
"""
# write Connection
self._connection = self.client_connection['write']
systemLegendId = 0
# Map id and system id are required if the id was not passed
if id < 1 and (mapId is None or systemItemId is None or systemType is None) :
return systemLegendId
legendDict = {
'details' : simplejson.dumps(legendFields),
'current' : True
}
if id > 0 :
systemLegend = HermesLegend.get(id)
legendDict['modified'] = datetime.today()
systemLegend.set(**legendDict)
if mapId is None:
mapId = systemLegend.maps[0].id
else :
"""
Search by mapId and systemItemId to ensure there is only one legend per map and systemItemId
"""
existingLegends = list(HermesMap.get(mapId).importLegendSQL.filter(AND(
HermesLegend.q.systemItemId == int(systemItemId),
HermesLegend.q.systemType == int(systemType))))
# if a legend matches by mapId, systemItemId, and systemType
if len(existingLegends) > 0 :
systemLegend = HermesLegend.get(existingLegends[0].id)
legendDict['modified'] = datetime.today()
existingLegends[0].set(**legendDict)
else :
legendDict['systemItemId'] = int(systemItemId)
legendDict['systemType'] = int(systemType)
# else add new legend
systemLegend = HermesLegend(**legendDict)
# Get New legends Id
systemLegendId = systemLegend.id
"""
Add legend to map if not associated
"""
if mapId :
legendMapIds = [ slmap.id for slmap in systemLegend.maps ]
if int(mapId) not in legendMapIds :
# Select Map
legendMap = HermesMap.get(int(mapId))
# Select Legend
legend = HermesLegend.get(systemLegendId)
# Add map to legend
legend.addHermesMap(legendMap)
return systemLegendId
@classmethod
def deleteLegend(self, id = None) :
"""
Delete a Map Legend
**Returns:**
Bool: True/False
**Usage:**
Removes the connection of the Map to the system item
"""
# write Connection
self._connection = self.client_connection['write']
if id and int(id) > 0:
try:
# remove map
HermesLegend.delete(int(id))
return True
except:
pass
return False
@classmethod
def getLegend( self, id=None, systemItemId=None, systemType=None, mapId=None ):
"""
Retrieve a Legend
**Params:**
- id : required
** Optional Params if you don't have the legendId:**
- systemItemId
- systemType
- mapId
**Returns:**
A Dict:
- legend_id : the legend id
- legend : dict of the legend
- fullname : fullname of the map
"""
# read Connection
self._connection = self.client_connection['read']
print 'get legend ( systemItemId : %s, mapId : %s, legendId : %s)' % (systemItemId, mapId, id)
if id :
"""
If they passed in the legend id
"""
legend = HermesLegend.get(int(id))
map = legend.maps[0]
dbLegend = simplejson.loads(legend.details, encoding='utf-8')
return {
'legend_id' : int(legend.id),
'details' : dbLegend,
'name' : map.name,
'current' : legend.current,
'hermes_system' : {
'id' : legend.systemType.id,
'short' : legend.systemType.shortName,
'name' : legend.systemType.name,
'status': legend.systemType.status
}
}
elif systemItemId and systemType and mapId:
"""
If they passed in the map id, grab legend by that map id and system id
"""
map = HermesMap.get(int(mapId))
for legend in map.importLegendSQL.filter(AND(
HermesLegend.q.systemItemId == int(systemItemId),
HermesLegend.q.systemType == int(systemType)
)):
dbLegend = simplejson.loads(legend.details, encoding='utf-8')
return {
'legend_id' : int(legend.id),
'details' : dbLegend,
'name' : map.name,
'current' : legend.current,
'hermes_system' : {
'id' : legend.systemType.id,
'short' : legend.systemType.shortName,
'name' : legend.systemType.name,
'status': legend.systemType.status
}
}
return False
@classmethod
def getScopedLegends( self, systemItemId=None, mapSystem=None, systemType=None, outdated=None, includeMaps=False, mapType=None ):
"""
Select the Legends
**Optional Params: **
You may pass in the following parameters:
* systemItemId : int
* mapSystem : the system that the map is connected to
* systemType : system short name or id that the legend points to
* outdated : Boolean
** Returns: **
{ <SYSTEM SHORT NAME>: [{'current': bool,
'system_id': int,
'id': <LEGEND ID>,
'name': <MAP NAME>,
'map_id': <MAP ID>,
'system_id': <SYSTEM ID>,
'system_name': <SYSTEM NAME>}]
Example:
[{'banner': [{'current': False,
'system_id': 11,
'id': 3,
'name': 'Banner Monthly Test Outdated',
'map_id': 4,
'system_id': 3,
'system_name': 'Banner'}]
"""
# read Connection
self._connection = self.client_connection['read']
print 'get legend ( systemItemId : %s, system : %s , outdated : %s )' % (systemItemId, systemType, outdated)
#legends = []
legends = {}
query = OR(
HermesSystems.q.status == 'active',
HermesSystems.q.status == 'locked',
)
if mapSystem :
try :
query = AND(
query,
HermesSystems.q.id == int(mapSystem)
)
except:
query = AND(
query,
HermesSystems.q.shortName == mapSystem
)
# Loop systems, ordered by name
for system in HermesSystems.select(query).orderBy(['name']) :
filterMap = AND(
HermesMap.q.status == 'active',
HermesMap.q.current == True
)
if mapType :
# catch incase they do not pass valid string as mapType
try:
filterMap = AND(
filterMap,
HermesMap.q.type == mapType.lower()
)
except:
pass
# loop over each system children maps, ordered by name
for map in system.childrenMaps.filter(filterMap).orderBy(['name']) :
noLegends = True
filterQuery = None
if systemItemId :
try:
# Filter the legends by the system id if passed to function
filterQuery = HermesLegend.q.systemItemId == int(systemItemId)
except:
pass
if systemType :
if filterQuery :
filterQuery = AND(
filterQuery,
HermesLegend.q.systemType == int(systemType)
)
else :
filterQuery = HermesLegend.q.systemType == int(systemType)
# loop over each system's map's legends
for legend in map.importLegendSQL.filter(filterQuery) :
if outdated and (outdated == True or outdated == "1" or outdated == "true") and legend.current :
break
"""
safeToAdd = True
# check if older version exists
if system.shortName in legends:
for mapDict in legends[system.shortName]:
self.logging.info("legend system: %s" % (legend.systemItemId))
if map.name == mapDict.get('name') and legend.systemItemId == mapDict.get('system_item_id') :
safeToAdd = False
# If the legend has not been added
if safeToAdd :
"""
if system.shortName not in legends:
legends[system.shortName] = []
legends[system.shortName].append({
'id' : int(legend.id),
'name' : map.name,
'system_id' : int(system.id),
'system_name' : system.name,
'system_item_id' : int(legend.systemItemId),
'map_id' : int(map.id),
'current' : legend.current,
'type' : map.type
})
noLegends = False
"""
To display maps that have not been assigned a legend
"""
if includeMaps and systemItemId and noLegends:
"""
safeToAdd = True
if system.shortName in legends:
for mapDict in legends[system.shortName]:
if map.name == mapDict.get('name') and int(systemItemId) == mapDict.get('system_item_id') :
safeToAdd = False
If the legend has not been added
if safeToAdd :
"""
if system.shortName not in legends:
legends[system.shortName] = []
legends[system.shortName].append({
'id' : 0,
'name' : map.name,
'system_id' : int(system.id),
'system_name' : system.name,
'system_item_id': int(systemItemId),
'map_id' : int(map.id),
'current' : False,
'type' : map.type
})
return legends
class HermesMap(SQLObject) :
"""
Hermes Map
==========================
Each map has 1 system, but may have multiple legends, and multiple dataStoreItems.
+--------------+
| System | +----------------+
|--------------| | Legend |
| | |----------------|
| | +-----------------+ +-------+ | |
+--------------+ | Map | | +-----> | |
^ + |-----------------| | | +----------------+
| | | | <-------+ |
| +----------> | | +---------+
+------------+ | | <-------+ | +----------------+
+-----------------+ | | | Legend |
^ + | | |----------------|
| | | +-----> | |
| | +-------+ | |
| | +----------------+
| |
| | +--------------------+
| | | DataStore |
| | |--------------------|
| | | |
| +------> | |
+--------+ | |
+--------------------+
"""
class sqlmeta:
table="hermes_map"
name = StringCol(length=50, default=None)
modified = DateTimeCol(default=datetime.now())
details = StringCol()
status = StringCol(length=20, default="active")
type = StringCol(length=30, default="record")
current = BoolCol(default=True)
# Joins
importLegend = RelatedJoin('HermesLegend', joinColumn='map_id', otherColumn='legend_id', intermediateTable='hermes_map_to_hermes_legend', createRelatedTable=True)
importLegendSQL = SQLRelatedJoin('HermesLegend', joinColumn='map_id', otherColumn='legend_id', intermediateTable='hermes_map_to_hermes_legend', createRelatedTable=False)
systemType = ForeignKey('HermesSystems')
dataStoreItems = SQLMultipleJoin( 'DataStore', joinColumn='map' )
def _set_client(self, value=None):
self.client = value
def _set_client_connection(self, value=None):
self.client_connection = value
@classmethod
def _set_logging(self):
self.logging = logging.getLogger('hermes.hermesmap')
def __repr__(self):
return "<HermesMap('%i','%s','%s','%s')" % (self.id, self.systemType.id, self.name, self.details)
@classmethod
def validateMap(self, system, name, systemFieldNames, systemDefinitions, id = 0, action='modify', status="active", details=None, mapType='record') :
self._connection = self.client_connection['read']
#try:
client = None
if hasattr(self, 'client'):
client = self.client
systemDict = HermesSystems.getSystems(filter = system, returnMeta=True)
if len(systemDict) > 0:
systemId = systemDict[0]['id']
systemName = systemDict[0]['short']
name = "%s.%s" % ('plugins', systemName)
mod = __import__(name, globals(), locals(), [name], -1)
#plugin = eval("mod.%s" % systemName.title())
plugin = eval("mod.%s('%s')" % (systemName.title(), client))
searchKeys = {
'prefix' : None,
'code' : None,
'name' : None,
'id' : None,
'system_item_id' : None,
}
if hasattr(plugin, 'validateMap' ) :
#plugin._set_logging()
#plugin.__init__(client=client)
return plugin.validateMap(system, name, systemFieldNames, systemDefinitions, searchKeys, id = 0, action='modify', status="active", details=None, mapType='record')
else :
zz = ['^%s$|: %s$' % (searchKey,searchKey) for searchKey in searchKeys.iterkeys()]
# Check if attribute is "name" or ends with ": name"
p = re.compile(r'%s' % '|'.join(zz), re.I)
for a in systemFieldNames.iterkeys() :
x = p.search(a)
if x:
i = a[x.span()[0]: x.span()[1]].replace(': ', '').lower()
searchKeys[i] = x.string
print "systemFieldNames"
print systemFieldNames
print "Search Keys"
print searchKeys
print "Search Key prefix"
print searchKeys.get('prefix', None)
"""
Store the attributes dict in the raw data column, convert None to "", convert ints and floats to string
"""
attributesDict = {}
for key, value in systemFieldNames.iteritems():
#self.logging.info('attribute type: %s' % (type(value)))
if value is None:
value = ""
if type(value) is int or type(value) is float:
value = str(value)
attributesDict[str(key.encode('utf-8'))] = value
if searchKeys.get('id', None) or searchKeys.get('system_item_id', None) :
"""
Add if it has a Id or system_item_id attribute
"""
fieldKey = None
if searchKeys.get('system_item_id', None) :
fieldKey = 'system_item_id'
elif searchKeys.get('id', None) :
fieldKey = searchKeys['id']
if fieldKey :
return True
elif searchKeys.get('prefix', None) and searchKeys.get('code', None) and searchKeys.get('name', None) :
"""
Add if it has a prefix / code / name
"""
return True
elif searchKeys.get('name', None) :
"""
Add if it has a Name attribute
"""
return True
else:
self.logging.info("addItem but it failed man")
return False
@classmethod
def saveMap(self, system, name, systemFieldNames, systemDefinitions, id = 0, action='modify', status="active", details=None, mapType='record') :
"""
Save a Hermes Map
If a map of same name already exists, it will copy the legends of that map and mark them as current = False
You may pass in the system shortName or the system id when you save a map.
"""
# write Connection
self._connection = self.client_connection['write']
self._connection.expireAll()
id = int(id)
systemId = 0
systemName = None
systemDict = HermesSystems.getSystems(filter = system, returnMeta=True)
if len(systemDict) > 0:
systemId = systemDict[0]['id']
systemName = systemDict[0]['short']
systemDict = systemDict[0]
else :
return False
mapType = mapType.lower()
print systemDict['meta']
# Set Connection to the same as this function
HM = HermesMap
HM._connection = self._connection
print "id : ", id
# if id passed edit that map
if id > 0 :
existingMap = HM.get(id)
else :
params = []
# else grab by system
params.append(HermesMap.q.systemType == systemId)
# grab by name and system if a Multiple Map System
if systemDict.get('meta', None) and systemDict['meta'].get('mms','0') == '1':
params.append(HermesMap.q.name == name)
# Filter by map type
params.append(HermesMap.q.type == mapType)
print self.logging
self.logging.info("test logging")
self.logging.info("%s" % HermesMap.select(AND(
*params
), orderBy = ['-name']))
print "select map from db that matches filter"
print HermesMap.select(AND(
*params
), orderBy = ['-name'])
# grab latest map to copy templates to new map
latestMaps = HermesMap.select(AND(
*params
), orderBy = ['-name'])
if latestMaps.count() > 0 :
print "found latestMaps"
if details and 'external-map-id' in details:
for latestMap in latestMaps:
latestMapDetails = simplejson.loads(latestMap.details, encoding='utf-8')
if details['external-map-id'] == latestMapDetails['external-map-id'] :
print "found map by way of external-map-id"
existingMap = latestMap
id = existingMap.id
else :
print "found map by way of grabbing first one"
existingMap = latestMaps[0]
id = existingMap.id
"""
Create Dict for adding a new Map
"""
print type(systemFieldNames), systemFieldNames
if type(systemFieldNames) != dict:
simplejson.loads(systemFieldNames)
newTranslatorMapDetails = {
"delimiter" : "comma",
"action" : action,
"systemFieldNames" : systemFieldNames, #This defines the fieldnames of the columns, it may be used more than once, this is a lookup key.
"systemDefinitions" : systemDefinitions # This maps the column positions to a systemFieldName, the order represents the columns in the system 1:1 ratio
}
if details is not None :
newTranslatorMapDetails.update(details)
newTranslatorMap = {
'systemType' : systemId,
'name' : name,
'modified' : datetime.now(),
'details' : simplejson.dumps(newTranslatorMapDetails),
'type' : mapType,
'current' : True
}
if status:
# Catch that they must pass in active / inactive
if status == 'active' or status is True:
newTranslatorMap['status'] = 'active'
else:
newTranslatorMap['status'] = 'inactive'
self.logging.info("map id: %s " % id)
if id > 0 :
print "id: ", id
# if id passed, updated existing map
existingMap.set(**newTranslatorMap)
# update map's legends to not be current
for legend in existingMap.importLegendSQL:
"""
Find better way to do this, must update connection to the write connection
"""
legend._connection = self._connection
legend.set(**{'current' : False})
# remove all datastore items that belong to this map
for datastore in existingMap.dataStoreItems:
DataStore.deleteItem(datastore.id)
newMap = existingMap
else :
# Add new map
print "add new map"
newMap = HermesMap(**newTranslatorMap)
self.client_connection['read'].expireAll()
return newMap.id
@classmethod
def getMap(self, id=None) :
"""
Get a Specific Map Details, including the count of how many legends are pointing to that map (without timestamp)
**Return a dict:**
* id : id of map
* name : name
* legend : map's legend
* map : map's fileMap
* system_type : id of system type
* action : map's action - currently only 'modify'
* has_legends : count of the legends referencing this map name
"""
# read Connection
self._connection = self.client_connection['read']
print "connection", self._connection
if type(id) is dict:
id = id.get('id', 0)
if id and int(id) > 0:
"""
If they passed in a map id > 0
"""
map = HermesMap.get(int(id))
print "mapDetails", map.details
dbLegend = simplejson.loads(map.details, encoding='utf-8')
print "dbLegend", dbLegend
details = dbLegend.copy()
del details['systemFieldNames']
del details['systemDefinitions']
del details['action']
mapDict = {
'id' : str(map.id),
'name' : map.name,
'systemFieldNames' : dbLegend['systemFieldNames'],
'systemDefinitions' : dbLegend['systemDefinitions'],
'system_type' : map.systemType.id,
'action' : dbLegend['action'],
'type' : map.type,
'has_legends' : len(map.importLegend),
'status' : map.status,
'current' : map.current,
'details' : details
}
print "mapDict", mapDict
return mapDict
"""
If they didn't pass a map id or map name
"""
return False
@classmethod
def getScopedMaps(self, active=True, shortName=None, meta=None, filter=None, groupBySystem=False):
"""
Get a list of maps, ordered by system
**Params**
- active : bool
- shortName : short name to only grab maps from that system
- filter : allows you to pass a key/value pair to search the map
- groupBySystem : return the maps organized by the maps, includes a count of the amount of items available to map to
**Example of Filter Argument:**
Plugin can pass filter={'external-map-id' : proposal['approvalProcessID']} to find the map that connects to the external-map-id
** Returns: **
List of Maps
"""
# read Connection
self._connection = self.client_connection['read']
scopedMaps = []
query = OR(
HermesSystems.q.status == 'active',
HermesSystems.q.status == 'locked',
)
if shortName :
"""
If they passed in a shortName, then only return the maps that belong to that shortName
"""
query = AND(
query,
HermesSystems.q.shortName == shortName)
for system in HermesSystems.select(query, orderBy = HermesSystems.q.name) :
systemMaps = []
# loop the maps belonging to this system
filterQuery = None
print "Active passed for getScopedMaps: ", active
if active:
filterQuery = AND(
HermesMap.q.status == 'active',
HermesMap.q.current == True
)
for map in system.childrenMaps.filter(filterQuery).orderBy(['name', 'type']):
"""
filtering = True; when you don't want to add the map
filtering = False; when you want to add the map
"""
filtering = False
#self.logging.info("filter: %s" % filter)
print "filter", filter
if filter is not None:
details = simplejson.loads(map.details)
#self.logging.info("details: %s" % details)
for key, value in filter.iteritems():
# if still filtering out the map
print "key", key
if not filtering :
# if the filter key is in the details and matches
if key in details :
print "found key", type(details[key])
print "search for ", value, type(value)
if type(details[key]) is str :
if str(details[key]) != str(value):
filtering = True
elif type(details[key]) is int :
if details[key] != int(value):
filtering = True
elif key == 'type':
if map.type != str(value):
filtering = True
#self.logging.info("filter: %s" % filtering)
# if filtering is false / You want to add the map
if not filtering :
mapDict = {
'name' : map.name,
'id' : map.id,
'system_id' : int(system.id),
'status' : map.status,
'current' : map.current,
'type' : map.type
}
if groupBySystem :
"""
When grouping by system, return the meta data for each map
"""
meta = simplejson.loads(map.details, encoding='utf-8')
del meta['systemFieldNames']
del meta['systemDefinitions']
del meta['action']
mapDict['meta'] = meta
if meta :
fileSummary = {}
mapDict['system_name'] = system.name
"""
Create summary of the file data for the datastore items connected to this map
"""
for item in map.dataStoreItems.filter(None).orderBy('fileID'):
if str(item.fileSystem.id) not in fileSummary:
fileSummary[str(item.fileSystem.id)] = {}
if item.fileID not in fileSummary[str(item.fileSystem.id)] :
fileSummary[str(item.fileSystem.id)][str(item.fileID)] = datetime.strftime(item.createdAt, '%Y/%m/%d %H:%M:%S')
mapDict['files'] = fileSummary
# Add map dict to list of maps for this system
systemMaps.append(mapDict)
if groupBySystem :
"""
Group By System and return System Details
"""
availableSystemItems = []
print system.meta
if system.meta is not None:
meta = simplejson.loads(system.meta)
print meta
if meta.get('mms','0') == '1':
print "call to get listSystemItems"
# Get count of available system items if mms is enabled
availableSystemItems = HermesSystems.listSystemItems(system.shortName)
systemDict = {
'short' : system.shortName,
'name' : system.name,
'status' : system.status,
'id' : system.id,
'maps' : systemMaps,
'system_item_count' : len(availableSystemItems)
}
meta = system.meta
if meta is not None:
meta = simplejson.loads(meta)
systemDict['meta'] = meta
scopedMaps.append(systemDict)
else :
"""
Do not group the maps by system, return list of maps
"""
scopedMaps.extend(systemMaps)
return scopedMaps
@classmethod
def deleteMap(self, id=None):
"""
When you delete a map, it will delete all children legends first
"""
# write Connection
self._connection = self.client_connection['write']
if id and int(id) > 0:
try:
map = HermesMap.get(int(id))
for legend in map.importLegend:
HermesLegend.delete(legend.id)
for datastore in map.dataStoreItems:
DataStore.deleteItem(datastore.id)
HermesMap.delete(int(id))
return True
except:
pass
return False
class HermesSystems(SQLObject):
"""
Hermes Systems
==========================
- Active Directory
- MySQL
- PostgreSQL
- MSSQL
"""
class sqlmeta:
table="hermes_systems"
shortName = StringCol(length=20, default=None)
name = StringCol(length=20, default=None)
meta = StringCol(default=None)
modified = DateTimeCol(default=datetime.now())
status = StringCol(length=20, default='inactive') # active / inactive / locked
# Joins
childrenMaps = SQLMultipleJoin( 'HermesMap', joinColumn='system_type' )
def __repr__(self):
return "<HermesSystems('%i, %s','%s','%s')" % (self.id, self.shortName, self.name, self.meta)
def _set_client(self, value=None):
self.client = value
def _set_client_connection(self, value=None):
self.client_connection = value
@classmethod
def _set_logging(self):
self.logging = logging.getLogger('hermes.hermessystems')
@classmethod
def addDefaultSystems(self):
"""
Will add the following records to the table:
+----+-----------------+------------------+------+---------------------+----------+
| id | short_name | name | meta | modified | status |
+----+-----------------+------------------+------+---------------------+----------+
| 1 | activeDirectory | Active Directory | NULL | 2012-10-08 11:06:16 | inactive |
| 2 | mySQL | MySQL | NULL | 2012-10-08 11:06:16 | inactive |
| 3 | postgre | PostgreSQL | NULL | 2012-10-08 11:06:16 | inactive |
| 4 | MSSQL | MSSQL | NULL | 2012-10-08 11:06:16 | inactive |
+----+-----------------+------------------+------+---------------------+----------+
"""
systemsDefault = [
{'shortName' : 'activeDirectory',
'name' : 'Active Directory'},
{'shortName' : 'mySQL',
'name' : 'MySQL'},
{'shortName' : 'postgre',
'name' : 'PostgreSQL'},
{'shortName' : 'MSSQL',
'name' : 'MSSQL'}
]
for system in systemsDefault:
self.saveSystem(system)
return True
@classmethod
def addNewSystem(self, systemDict={}):
"""
Will add new system to available systems.
The dict must contain the shortName and name keys.
This is to be used by the api incase we want to add a new system later on
"""
if 'shortName' in systemDict and 'name' in systemDict:
return self.saveSystem(systemDict)
return False
@classmethod
def deleteSystem(self, id=None):
# write Connection
self._connection = self.client_connection['write']
if id and int(id) > 0:
try:
system = HermesSystems.get(int(id))
if len(list(system.childrenMaps)) < 1:
system.status = "inactive"
return True
except:
pass
return False
@classmethod
def getSystems(self, active=True, returnMeta=False, filter=None):
"""
Return a dict of systems
if active is false, return the status of each system
if returnMeta = True : return the meta in the dictionary of systems
"""
# read Connection
self._connection = self.client_connection['read']
systems = [];
query = None
if active:
query = OR(
HermesSystems.q.status == 'active',
HermesSystems.q.status == 'locked',
)
if filter:
if type(filter) is int:
systemRecord = HermesSystems.get(filter)
intSystemDict = {
'id' : systemRecord.id,
'short' : systemRecord.shortName,
'name' : systemRecord.name,
'status': systemRecord.status
}
if returnMeta :
meta = systemRecord.meta
if meta is not None:
meta = simplejson.loads(meta)
intSystemDict['meta'] = meta
intSystemDict = [intSystemDict]
print intSystemDict
return intSystemDict
elif type(filter) is str or type(filter) is unicode:
# if they pass in the systemTypeId as the short name convert to id
query = HermesSystems.q.shortName == filter
for system in list(HermesSystems.select(query, orderBy = ['name'])):
systemDict = {
'id' : system.id,
'short' : system.shortName,
'name' : system.name,
'status' : system.status
}
if returnMeta :
meta = system.meta
if meta is not None:
meta = simplejson.loads(meta)
systemDict['meta'] = meta
if active is not True:
systemDict['status'] = system.status
systems.append(systemDict)
return systems
@classmethod
def listSystemItems(self, system, id=None, itemType='record', template=False, client=None):
if client == None:
if hasattr(self, 'client'):
client = self.client
data = []
if client :
self.logging.info("Retrieve Data for System: %s" % system)
# Get the system settings
systemDict = HermesSystems.getSystems(filter = system, returnMeta=True )
if len(systemDict) > 0:
systemDict = systemDict[0]
if systemDict.get('meta', None) and systemDict['meta'].get('dmh','0') == '1':
#try:
name = "hermes.%s.%s" % ('plugins', system)
mod = __import__(name, globals(), locals(), [name], -1)
#plugin = eval("mod.%s" % system.title())
print "init the plugin"
#plugin = eval("mod.%s('%s', %s, %s, %s)" % (system.title(), client, None, None, None))
plugin = eval("mod.%s('%s')" % (system.title(), client))
#plugin._set_logging()
#plugin.__init__(client=client)
data = plugin.listSystemItems(id, itemType, template)
"""
except Exception as e:
e = sys.exc_info()
self.logging.info("listSystemItems : %s" % e[0])
self.logging.info("%s" % e[1])
return data
"""
else :
self.logging.info("%s does not have the Dynamic Map Helper (dmh) setting enabled" % client)
return data
@classmethod
def saveSystem(self, system):
"""
Save / Update an System
**Params:**
Dictionary of key/value for system settings.
**You must pass in the shortName**
'status' : default 'inactive', may pass 'active'
"""
# write Connection
self._connection = self.client_connection['write']
if 'shortName' in system :
newSystem = {
'shortName' : system['shortName']
}
if 'name' in system:
newSystem['name'] = system['name']
if 'status' in system:
# Catch that they must pass in active / inactive
if system['status'] == 'locked' :
newSystem['status'] = 'locked'
elif system['status'] == 'active' or system['status'] is True:
newSystem['status'] = 'active'
else:
newSystem['status'] = 'inactive'
# Check if exist in database
existingSystem = HermesSystems.select(HermesSystems.q.shortName == system['shortName'])
if existingSystem.count() > 0 :
# set modified date to today
newSystem['modified'] = datetime.today()
existingSystem[0].set(**newSystem)
else :
HermesSystems(**newSystem)
# if they passed settings, and it is a dict
if 'meta' in system and type(system['meta']) is dict:
return self.saveSystemSettings({system['shortName'] : system['meta'] })
return True
return False
@classmethod
def saveSystemSettings(self, settings=None):
"""
Pass a dict with keys as the short names and the values as a dict of settings
"""
# write Connection
self._connection = self.client_connection['write']
success = 0
if settings:
for system, systemSettings in settings.iteritems():
if 'url' in systemSettings:
# URL should start with "http://"
url = systemSettings['url']
if url[0:7] != 'http://':
url = 'http://' + url
# Add slash to end of url
if url[-1:] != '/':
url += '/'
systemSettings['url'] = urllib.quote_plus(url)
existingSystem = HermesSystems.select(HermesSystems.q.shortName == system)
if existingSystem.count() > 0 :
existingSystem[0].set(**{'meta' : simplejson.dumps(systemSettings), 'modified': datetime.today()})
success += 1
if success >= len(settings) :
return True
else :
return False
class DataStore(SQLObject):
"""
DataStore
+--------------+
| Map |
|--------------|
| |
| | +----------------------+
| | | StoreAttributes |
+--------------+ |----------------------|
^ + +------------------+ | |
| | | DataStore | +----->| |
| | |------------------| | +---+| |
| +--------> | | | | +----------------------+
+----------+ | | +------+ |
| | <--------+
+------------------+
^ + +----------------------+
| | | DataStoreImported |
| | |----------------------|
| +-----------------> | |
+-------------------+ | |
+----------------------+
"""
class sqlmeta:
table="hermes_datastore"
fileID = IntCol(length=11, notNone=False, default=0)
fileSystem = ForeignKey('HermesSystems')
dataType = StringCol(length=255, default='')
map = ForeignKey('HermesMap')
# do not store since majority will be cron job
#createdBy = IntCol(length=11, notNone=False, default=0)
createdAt = DateTimeCol(default=datetime.now())
modifiedAt = DateTimeCol(default=datetime.now())
deletedAt = DateTimeCol(default=None)
# This system column is not needed
#system = StringCol(length=255, default='')
rawData = BLOBCol(length=16777215,default=None) # Medium Blob
children = RelatedJoin('DataStore', joinColumn='from_id', otherColumn='to_id',
intermediateTable='hermes_datastore_relationships')
parents = RelatedJoin('DataStore', joinColumn='to_id', otherColumn='from_id',
intermediateTable='hermes_datastore_relationships', createRelatedTable=False)
store_attributes = MultipleJoin('StoreAttributes', joinColumn='datastore_id') #joinColumn=?
_scoped_store_attributes = SQLMultipleJoin('StoreAttributes', joinColumn='datastore_id') #joinColumn?
SQLimported = SQLMultipleJoin('DataStoreImported', joinColumn='datastore_id')
def _set_client(self, value=None):
self.client = value
def _set_client_connection(self, value=None):
self.client_connection = value
@classmethod
def _set_logging(self):
self.logging = logging.getLogger('hermes.datastore')
@classmethod
def _set_search_cache(self):
self.found = {
'record' : {}
}
self.missing = {
'record' : {}
}
@classmethod
def addItem(self, datastoreDict, attributes ):
"""
Add new if not exist or update
"""
# write Connection
self._connection = self.client_connection['write']
datastoreId = 0
datastoreItem = []
self.logging.info(" ")
self.logging.info(" ")
self.logging.info("addItem")
"""
begin of new way
"""
params = []
joins = []
params.append(DataStore.q.map == datastoreDict['mapID'])
params.append(DataStore.q.dataType == datastoreDict['dataType'])
"""
This block of commented out code is replaced by the regex following it
searchKeys = []
for a in map(lambda l: l.lower(), attributes.iterkeys()) :
if a.find(': ') > -1 :
searchKeys.append(a.split(': ')[-1])
else :
searchKeys.append(a)
"""
"""
Search the attributes and pull out the searchKeys which are used to ensure that the items doesn't exist.
It will search for if the attribute is "name" or ends with ": name"
"""
searchKeys = {
'prefix' : None,
'code' : None,
'name' : None,
'id' : None,
'system_item_id' : None,
}
zz = ['^%s$|: %s$' % (searchKey,searchKey) for searchKey in searchKeys.iterkeys()]
# Check if attribute is "name" or ends with ": name"
p = re.compile(r'%s' % '|'.join(zz), re.I)
for a in attributes.iterkeys() :
x = p.search(a)
if x:
i = a[x.span()[0]: x.span()[1]].replace(': ', '').lower()
searchKeys[i] = x.string
print "Attributes"
print attributes
print "Search Keys"
print searchKeys
"""
Store the attributes dict in the raw data column, convert None to "", convert ints and floats to string
"""
attributesDict = {}
for key, value in attributes.iteritems():
#self.logging.info('attribute type: %s' % (type(value)))
if value is None:
value = ""
if type(value) is int or type(value) is float:
value = str(value)
attributesDict[str(key.encode('utf-8'))] = value
if searchKeys.get('id', None) or searchKeys.get('system_item_id', None) :
"""
Add if it has a Id or system_item_id attribute
"""
print "Had id or system_item_id"
fieldKey = None
if searchKeys.get('system_item_id', None) :
fieldKey = 'system_item_id'
elif searchKeys.get('id', None) :
fieldKey = searchKeys['id']
self.logging.info("adding item id, key : %s" % fieldKey)
daId = Alias(StoreAttributes, "daId")
daIdQuery = daId.q.datastore == DataStore.q.id
daIdQuery = AND(
daIdQuery,
daId.q.fieldName == fieldKey)
params.append(daId.q.fieldValue == str(attributesDict[fieldKey]))
joins.append( LEFTJOINOn(None, daId, daIdQuery))
elif searchKeys.get('prefix', None) and searchKeys.get('code', None) and searchKeys.get('name', None) :
"""
Add if it has a prefix / code / name
"""
print "adding item prefix / code / name"
self.logging.info("adding item prefix / code / name")
daPrefix = Alias(StoreAttributes, "daPrefix")
daCode = Alias(StoreAttributes, "daCode")
daName = Alias(StoreAttributes, "daName")
"""
Join the Prefix
"""
daPrefixQuery = daPrefix.q.datastore == DataStore.q.id
daPrefixQuery = AND(
daPrefixQuery,
daPrefix.q.fieldName == searchKeys['prefix'])
params.append(daPrefix.q.fieldValue == attributesDict[searchKeys['prefix']])
joins.append( LEFTJOINOn(None, daPrefix, daPrefixQuery))
"""
Join the Code
"""
daCodeQuery = daCode.q.datastore == DataStore.q.id
daCodeQuery = AND(
daCodeQuery,
daCode.q.fieldName == searchKeys['code'])
params.append(daCode.q.fieldValue == attributesDict[searchKeys['code']])
joins.append( LEFTJOINOn(None, daCode, daCodeQuery))
"""
Join the Name
"""
daNameQuery = daName.q.datastore == DataStore.q.id
daNameQuery = AND(
daNameQuery,
daName.q.fieldName == searchKeys['name'])
params.append(daName.q.fieldValue == attributesDict[searchKeys['name']])
joins.append( LEFTJOINOn(None, daName, daNameQuery))
elif searchKeys.get('name', None) :
"""
Add if it has a Name attribute
"""
print "adding item name"
self.logging.info("adding item name")
daName = Alias(StoreAttributes, "daName")
# Join the Name
daNameQuery = daName.q.datastore == DataStore.q.id
daNameQuery = AND(
daNameQuery,
daName.q.fieldName == searchKeys['name'])
params.append(daName.q.fieldValue == attributesDict[searchKeys['name']])
joins.append( LEFTJOINOn(None, daName, daNameQuery))
else:
self.logging.info("addItem but it failed man")
return False
print "check if item exists"
print DataStore.select(AND(
*params
),join=joins).distinct()
datastoreItem = list(DataStore.select(AND(
*params
),join=joins).distinct())
datastoreDict['rawData'] = simplejson.dumps(attributesDict, encoding='utf-8')
print datastoreItem
print datastoreDict
if datastoreItem != []:
# Update record
print "update record"
datastoreDict['modifiedAt'] = datetime.today()
datastoreId = datastoreItem[0].id
datastoreItem[0].set(**datastoreDict)
else:
# Create a new record
print "create record"
datastoreId = int(DataStore(**datastoreDict).id)
#self.logging.info("datastoreId: %s" % datastoreId)
print "datastoreId ********"
print datastoreId
# Add items attributes
if len(attributesDict) > 0 and datastoreId and datastoreId > 0:
StoreAttributes.addAttributes(datastoreId, attributesDict)
print "datastoreId ********"
print datastoreId
return datastoreId
@classmethod
def deleteItem(self, dataStoreId):
"""
Delete Attributes, Data from the datastore imported too
"""
# write Connection
self._connection = self.client_connection['write']
try:
"""
# when the ForeignKey Cascade setting is set to true, you no longer need to loop over items attributes and imported records
item = DataStore.get(dataStoreId)
for attribute in item.store_attributes:
StoreAttributes.delete(attribute.id)
for imported in item.SQLimported:
DataStoreImported.delete(imported.id)
"""
#DataStoreImported.deleteMany(where = DataStoreImported.q.datastoreID == datastoreId )
DataStore.delete(dataStoreId)
return True
except:
return False
@classmethod
def getItem(self, id=None, showMeta=True, systemItemId=None, systemType=None, matchId=None):
"""
Get the Datastore Item, including the attributes
If the systemItemId, systemType are passed, search for matches of items
If the matchId is passed, grab item and compare
**Result**
A dict containing the datastore item, plus all the attributes
"""
# read Connection
self._connection = self.client_connection['read']
item = {}
if id :
#try :
datastoreRecord = DataStore.get(int(id))
# Add custom attributes for item
item.update( dict( ( attribute.fieldName, attribute.fieldValue ) for attribute in datastoreRecord.store_attributes ) )
item['hermes_id'] = datastoreRecord.id
item['hermes_type'] = datastoreRecord.dataType
if showMeta :
item['hermes_meta'] = {
'hermes_created' : datastoreRecord.createdAt.isoformat(' '),
'hermes_modified' : datastoreRecord.modifiedAt.isoformat(' '),
'hermes_deleted' : None,
'hermes_map' : datastoreRecord.map.id,
'hermes_system' : {
'id' : datastoreRecord.map.systemType.id,
'short' : datastoreRecord.map.systemType.shortName,
'name' : datastoreRecord.map.systemType.name,
'status': datastoreRecord.map.systemType.status
}
}
if datastoreRecord.deletedAt is not None:
item['hermes_meta']['hermes_deleted'] = datastoreRecord.deletedAt.isoformat(' ')
"""
if legendId and systemId are passed in, then create hermes_match attribute
"""
if systemType and systemItemId and hasattr(self, 'client') :
client = self.client
if client :
# set the variables used in this search function
self._set_search_cache()
system = HermesSystems.get(systemType)
DestinationReport = None
name = "hermes.%s.%s" % ('plugins', system.shortName)
mod = __import__(name, globals(), locals(), [name], -1)
DestinationReport = eval("mod.%s('%s',%s,%s,%s)" % (system.shortName.title(), client, None, None, None))
"""
Recursively add children
"""
if len(datastoreRecord.children) > 0 :
#self.logging.info("datastoreRecord.children : %s" % ([child.id for child in datastoreRecord.children]))
childItems = [child.id for child in datastoreRecord.children]
if len(childItems) > 0 :
item['children'] = []
for childItem in childItems:
item['children'].append(self.getItem(childItem, False))
"""
except:
return {
"status" : "Error",
"message" : ["An error has occurred"] }
"""
return item
@classmethod
def addFileData(self, mapId, data, fileID=0, fileSystem=None, dataType="record"):
"""
Accept the file as a json object through the api, loop over each row and add the row as a datastore item according to mapId
Parse each row as a systemItem and process according to the HermesMap
** Params: **
- mapId : the id of the map to add to datastore
- data : the contents of the file
- fileID : this is optional
- dataType : for the data type, default is record
"""
# write Connection
self._connection = self.client_connection['write']
if mapId and int(mapId) > 0 :
map = HermesMap.getMap(int(mapId))
#system = HermesSystems.get(map['system_type'])
#'system' : system.shortName,
datastoreDict = {
'dataType' : dataType,
'mapID' : int(mapId),
'fileID' : int(fileID)
}
if fileSystem:
datastoreDict['fileSystem'] = int(fileSystem)
self.logging.info("datadict : %s" % (datastoreDict))
# process data through map
report = []
for record in data:
dataRecord = {}
if len(record) >= len(map['systemDefinitions']):
for key, value in enumerate(map['systemDefinitions']):
if value != '0':
dataRecord[map['systemFieldNames'][value].encode('utf-8')] = record[key]
report.append(dataRecord)
# loop over processed data and add items
for key, attributes in enumerate(report):
#self.logging.info("attributes 22: %s" % (attributes))
datastoreId = self.addItem(datastoreDict, attributes)
return True
return False
@classmethod
def getDatastoreFields(self, searchParameters, systemFieldIds={}, systemType=None):
"""
This function should be used to get fields from the datastore
"""
# read Connection
self._connection = self.client_connection['read']
results = {}
if 'map_id' in searchParameters and 'system_id' in searchParameters and ('Prefix' in systemFieldIds or 'Code' in systemFieldIds or 'Name' in systemFieldIds):
# Grab the legend for the map and system id
legend = HermesLegend.getLegend( mapId=searchParameters['map_id'], systemItemId=searchParameters['system_id'], systemType=systemType )
if searchParameters['map_id'] is None or int(searchParameters['map_id']) < 1 :
return False
map = HermesMap.getMap(id=searchParameters['map_id'])
newSystemFieldIds = dict( (systemFieldKey, systemFieldValue) for systemFieldKey, systemFieldValue in systemFieldIds.iteritems() if systemFieldIds[systemFieldKey] in legend["details"] )
kwargs = { "distinct" : True }
for systemFieldKey, systemFieldValue in newSystemFieldIds.iteritems() :
joinExp = LEFTJOINOn(StoreAttributes, DataStore, StoreAttributes.q.datastore == DataStore.q.id)
fields = [StoreAttributes.q.fieldName, StoreAttributes.q.fieldValue]
whereExp = AND(
DataStore.q.dataType == map['type'],
DataStore.q.map == searchParameters['map_id'],
StoreAttributes.q.fieldName == legend['details'][str(systemFieldValue)]
)
resultTuple = selectSpecial(StoreAttributes, joinExp, fields, whereExp, **kwargs)
values = [resultTuple[p][1] for p in range(len(resultTuple))]
results[legend['details'][str(systemFieldValue)]] = values
return results
@classmethod
def searchDatastore(self, searchParameters, displayTitleFields={}, systemType=None, order=None, showMatch=False, client=None):
"""
**Parameters**
@searchParameters
Example:
searchParameters = {
'map_id' : 7
'system_id' : 11
}
*** Pass Filters ***
#seaarchParameters['filter'] = [
{"Map Field Name goes here" : "value it equals goes here"}
]
Only pass what you want to search.
If you pass 'code' = '', it will search where code = ''
searchParameters['paginate'] = {
'start' : 0
'end' : 25
}
#searchParameters['include_imported'] = True
***When searching by date with a Start and End***
#searchParameters["modified"] = [{"compare" : ">=", "value" : "2012/08/26" },{"compare" : "<=","value" : "2012/09/20"}]
***When searching by date with only a End Date***
#searchParameters["modified"] = {"compare":"<=","value":"2012/09/19"}
***When searching by date with only a Start Date***
#searchParameters["modified"] = {"compare":">=","value":"2012/08/27"}
@displayTitleFields
optional if the plugin has the "displayTitleFields" function defined
Example:
displayTitleFields={ 'Prefix' : '627', 'Code' : '629', 'Name' : '631' }
@order Specifies the ordering of results, pass in the string name of a map field name or a list of strings of map field names
@showMatch default: False
if set to true, will find a match in current system software to see if already exists
@systemType
**Required
This is the system id that the legend is going into
system -> map -> legend -> systemType
**Returns:**
a sorted list of the search results
"""
# write Connection
self._connection = self.client_connection['read']
if client == None:
if hasattr(self, 'client'):
client = self.client
if 'type' in searchParameters :
searchType = searchParameters['type']
else:
searchType = 'record'
results = []
params = []
joins = []
kwargs = { "distinct" : True }
aliases = {}
aliasQuery = []
searchSummary = {
"total" : 0,
"results" : []
}
self.logging.info("searchParameters %s" % searchParameters)
if 'map_id' in searchParameters and 'system_id' in searchParameters:
# Grab the Map, Legend, System
map = HermesMap.getMap(id=searchParameters['map_id'])
legend = HermesLegend.getLegend( mapId=searchParameters['map_id'], systemItemId=searchParameters['system_id'], systemType=systemType )
system = HermesSystems.get(legend['hermes_system']['id'])
name = "hermes.%s.%s" % ('plugins', system.shortName)
mod = __import__(name, globals(), locals(), [name], -1)
#pluginParams = {
# 'specialParam' : searchParameters['system_id']
# }
pluginParams = None
plugin = eval("mod.%s('%s',%s,%s,%s)" % (system.shortName.title(), client, map, legend, pluginParams))
"""
Setup the Joins and Conditions
"""
params.append(DataStore.q.map == map['id'])
params.append(DataStore.q.dataType == searchType)
if 'include_imported' in searchParameters and searchParameters['include_imported']:
params.append(DataStoreImported.q.importedAt == None)
if 'modified' in searchParameters :
value = searchParameters['modified']
try :
try :
if value['compare']:
"""
If they passed in 1 date
"""
filterDate = datetime.strptime(value['value'], '%Y/%m/%d')
if value['compare'] == '>=':
"""
They passed in the start Date
"""
params.append(func.date(DataStore.q.modifiedAt) >= filterDate.date())
elif value['compare'] == '<=':
"""
They passed in the end Date
"""
params.append(func.date(DataStore.q.modifiedAt) <= filterDate.date())
except:
"""
If they passed in a start and end date
"""
startMonth = datetime.strptime(value[0]['value'], '%Y/%m/%d')
endMonth = datetime.strptime(value[1]['value'], '%Y/%m/%d')
params.append(func.date(DataStore.q.modifiedAt) >= startMonth.date())
params.append(func.date(DataStore.q.modifiedAt) <= endMonth.date())
pass
except :
pass
# Datastore Imported
joins.append( LEFTJOINOn(DataStore, DataStoreImported, AND(DataStore.q.id == DataStoreImported.q.datastore, DataStoreImported.q.systemItemId == int(searchParameters['system_id']) )))
if 'filter' in searchParameters:
for filterKey, filter in enumerate(searchParameters['filter']):
print "search for : ", filter
fieldName = filter.keys()[0]
# only use the alphabetical characters in the keyName
keyName = ''.join(e for e in fieldName if e.isalpha())
if len(keyName) < 0 :
filterKey += 1
keyName = "filter%s" % filterKey
aliases[keyName] = Alias(StoreAttributes, keyName)
params.append(aliases[keyName].q.fieldValue == filter[fieldName])
joins.append( LEFTJOINOn(None, aliases[keyName], AND(
aliases[keyName].q.datastore == DataStore.q.id,
aliases[keyName].q.fieldName == fieldName)))
"""
Sort by Map Field Name
"""
if order is not None :
if type(order) is not list :
order = [order]
print "order by for : ", order
kwargs["orderBy"] = []
for orderKey, orderField in enumerate(order):
keyName = "OrderColumn%s" % orderKey
aliases[keyName] = Alias(StoreAttributes, keyName)
joins.append( LEFTJOINOn(None, aliases[keyName], AND(
aliases[keyName].q.datastore == DataStore.q.id,
aliases[keyName].q.fieldName == orderField)))
kwargs["orderBy"].append(aliases[keyName].q.fieldValue)
print "***************************************"
print "***************************************"
print "***************************************"
joinExp = joins
fields = [DataStore.q.id]
whereExp = AND(
*params
)
# find total of results
resultTuple = selectSpecial(DataStore, joinExp, fields, whereExp, **kwargs)
# return the total result
searchSummary["total"] = len(resultTuple)
# if a dict is passed in the searchParameters['paginate'] then add to the kwargs
if 'paginate' in searchParameters and type(searchParameters['paginate']) is dict :
kwargs.update( searchParameters['paginate'] )
# if pagination passed then reset the resultTuple variable with the paginated results
resultTuple = selectSpecial(DataStore, joinExp, fields, whereExp, **kwargs)
# convert the resultTuple to a list of ints
values = [int(value) for value in resultTuple]
print "values", values
if hasattr(plugin, 'displayTitleFields' ) :
print "call function to displayTitleFields"
displayTitleFields = plugin.displayTitleFields(id=searchParameters['system_id'], itemType=searchType, legend=legend)
self.logging.info("legend details: %s" % legend['details'])
self.logging.info("displayTitleFields: %s" % displayTitleFields)
# Loop over results and build list
self.logging.info("Loop over results and build list")
for id in values :
datastoreRecord = DataStore.get(id)
self.logging.info("datastore id : %s " % datastoreRecord.id)
# dict containing the title fields
fields = {}
attributes = dict( ( attribute.fieldName, attribute.fieldValue ) for attribute in datastoreRecord._scoped_store_attributes )
# format the title of the item in the plugin
if len(displayTitleFields) > 0 :
for key, field in displayTitleFields.iteritems() :
if field in attributes :
fields[key.lower()] = attributes[field]
# A Dict for each item
item = {
'id' : datastoreRecord.id,
'modified' : datetime.strftime(datastoreRecord.modifiedAt, '%Y/%m/%d'),
'imported' : None,
'requirements_met' : True
}
# Get the imported Date
for imported in datastoreRecord.SQLimported.filter(DataStoreImported.q.systemItemId == int(searchParameters['system_id'])):
item['imported'] = datetime.strftime(imported.importedAt, '%Y/%m/%d')
# add the fields dict to the item dict
item.update(fields)
if showMatch and len(fields) > 0 and hasattr(plugin, 'searchResultSupplementalInformation' ):
item.update( plugin.searchResultSupplementalInformation(fields=fields, datastoreRecord=datastoreRecord, attributes=attributes) )
results.append(item)
searchSummary["results"] = results
# return the summary of the search
return searchSummary
@classmethod
def findChildrenMatchesToDatastore(self, report, dataStoreId, itemChildren=[], searchFields, legend=None ) :
"""
Loop over the children of a datastore item and search to see if there is a match in the destination system
searchFields is a list of fields to search
"""
results = {}
if type(dataStoreId) is int :
dataStoreItem = DataStore.get(dataStoreId)
elif dataStoreItem.dataType == 'record' :
print "findChildrenMatchesToDatastore"
# Check if fields match
results = report.findMatchRequirements(dataStoreItem, legend)
# Loop datastore children
for child in dataStoreItem.children :
# only search records
if child.dataType == "record" :
# if already found, grab from self variable
if str(child.id) in self.found :
results['found'][child.dataType][str(child.id)] = self.found[str(child.id)]
# if already found, grab from self variable
elif str(child.id) in self.missing :
results['missing'][child.dataType][str(child.id)] = self.missing[str(child.id)]
# if not found, then make api call
else :
# Build Fields from select columns
fields = dict( ( attributes.fieldName.lower(), attributes.fieldValue ) for attributes in child.store_attributes if attributes.fieldName.lower() in searchFields )
matches = report.findMatchToDatastore(fields)
if matches != False and len(matches) > 0:
results['found'][child.dataType][str(child.id)] = matches.keys()[0]
self.found[child.dataType][str(child.id)] = results['found'][child.dataType][str(child.id)]
else :
print results
print child.dataType
print child.id
results['missing'][child.dataType][str(child.id)] = fields
self.missing[child.dataType][str(child.id)] = results['missing'][child.dataType][str(child.id)]
if len(child.children) > 0 :
childrenResults = self.findChildrenMatchesToDatastore(report, child.id, itemChildren)
# Dynamically update the results
for key, value in results.iteritems():
if key in childrenResults :
for key2, value2 in results[key].iteritems(): # found
if key2 in childrenResults[key] : # type
results[key][key2].update(childrenResults[key][key2])
else :
results[key][key2] = childrenResults[key][key2]
return results
class StoreAttributes(SQLObject):
"""
Store Attributes
Contains the attributes of the Datastore Item
"""
class sqlmeta:
table="hermes_datastore_attributes"
datastore = ForeignKey('DataStore', cascade=True)
fieldName = StringCol(length=255, default='')
fieldValue = StringCol(default=None)
dataType = StringCol(length=255, default='')
createdAt = DateTimeCol(default=datetime.now())
modifiedAt = DateTimeCol(default=datetime.now())
deletedAt = DateTimeCol(default=None)
def _set_client(self, value=None):
self.client = value
def _set_client_connection(self, value=None):
self.client_connection = value
@classmethod
def _set_logging(self):
self.logging = logging.getLogger('hermes.storeattributes')
@classmethod
def addAttributes(self, datastoreId, attributes, *itemType):
"""
Once an item has been added to the datastore, you can addAttributes
** Params: **
- datastoreId :
- attributes : dict of the attributes of item
- type : type of
"""
# write Connection
self._connection = self.client_connection['write']
print "Add Attributes"
# Grab the datastore record
datastoreRecord = DataStore.get(datastoreId)
# convert the rawData into a dict
try :
rawData = simplejson.loads(datastoreRecord.rawData.replace("'",'"'), encoding='utf-8')
except:
rawData = simplejson.loads(datastoreRecord.rawData, encoding='utf-8')
# update the rawData with the new attributes
rawData.update(attributes)
rawData = simplejson.dumps(rawData, encoding='utf-8')
# update the record
datastoreRecord.set(rawData=rawData)
# flag to know if the item was actually updated
updated = False
for key, value in attributes.iteritems():
attributeDict = {
'datastoreID' : datastoreId,
'fieldName' : key,
'fieldValue' : value
}
itemType = ''.join(itemType)
if itemType != '':
attributeDict['data_type'] = str(itemType)
datastoreItemAttribute = list(datastoreRecord._scoped_store_attributes.filter(StoreAttributes.q.fieldName == attributeDict['fieldName']))
"""
datastoreItemAttribute = list(StoreAttributes.select(AND(
StoreAttributes.q.datastoreID == attributeDict['datastoreID'],
StoreAttributes.q.fieldName == attributeDict['fieldName']
)))
"""
# if record exists
if datastoreItemAttribute != []:
# if value is different, update
if datastoreItemAttribute[0].fieldValue != attributeDict['fieldValue'] :
attributeDict['modifiedAt'] = datetime.today()
# Update record
datastoreItemAttribute[0].set(**attributeDict)
updated = True
else:
# Create a new record
StoreAttributes(
**attributeDict
)
updated = True
# remove the flag that this item was imported
if updated :
DataStoreImported.clearFlag(datastoreId)
return True
@classmethod
def getAttributes(self, datastoreId):
"""
retrieve a dict of the attributes of a datastore item
"""
# read Connection
self._connection = self.client_connection['read']
datastoreItemAttributes = list(StoreAttributes.select(
StoreAttributes.q.datastoreID == datastoreId
))
#, StoreAttributes.q.fieldName != 'Approval Process Name'
# if records exist
if datastoreItemAttributes != []:
fields = {}
for attribute in datastoreItemAttributes :
fields[attribute.fieldName] = attribute.field_value
return fields
return []
class DataStoreImported(SQLObject):
"""
DataStoreImported
Contains a lookup table if the datastore item has been imported
"""
class sqlmeta:
table="hermes_datastore_imported"
datastore = ForeignKey('DataStore', cascade=True)
systemItemId = IntCol(length=11, notNone=True)
system = ForeignKey('HermesSystems')
importedAt = DateTimeCol(default=datetime.now())
def _set_client(self, value=None):
self.client = value
def _set_client_connection(self, value=None):
self.client_connection = value
@classmethod
def _set_logging(self):
self.logging = logging.getLogger('hermes.datastoreimported')
@classmethod
def markImported(self, datastoreId, systemItemId, system=None):
"""
markImported
Marks the item as imported into a system item or updates the imported date
**Params:**
@systemItemId : this is the system id
@system : this is the system short name
"""
# write Connection
self._connection = self.client_connection['write']
# Verify that they pass in
if system is None :
return False
else:
systemDict = HermesSystems.getSystems(filter = system)
if len(systemDict) > 0:
system = systemDict[0]['id']
else :
system = 0
importedItem = DataStoreImported.select(AND(
DataStoreImported.q.datastore == int(datastoreId),
DataStoreImported.q.systemItemId == int(systemItemId),
DataStoreImported.q.system == int(system)))
if importedItem.count() > 0 :
importedItem[0].set(**{'importedAt' : datetime.today()})
return importedItem[0].id
else:
# Create a new record
datastoreImportedRecord = {
'datastoreID' : int(datastoreId),
'systemItemId' : int(systemItemId),
'system' : int(system)
}
return int(DataStoreImported(**datastoreImportedRecord).id)
return False
@classmethod
def isImported(self, datastoreId, systemItemId, system=None):
# read Connection
self._connection = self.client_connection['read']
if system is None :
return False
importedItem = DataStoreImported.select(AND(
DataStoreImported.q.datastore == datastoreId,
DataStoreImported.q.systemItemId == systemItemId,
DataStoreImported.q.system == system))
if importedItem.count() > 0 :
return True
#importedItem[0].set(**{'importedAt' : datetime.today()})
#return importedItem[0].importedAt
else:
return False
@classmethod
def clearFlag(self, datastoreId):
# write Connection
self._connection = self.client_connection['write']
DataStoreImported.deleteMany(where = DataStoreImported.q.datastoreID == datastoreId )
|
<reponame>tony/libvcs<filename>libvcs/cmd/git.py
import pathlib
import shlex
from typing import Any, Literal, Optional, Sequence, Union
from libvcs._internal.run import run
from libvcs._internal.types import StrOrBytesPath, StrPath
_CMD = Union[StrOrBytesPath, Sequence[StrOrBytesPath]]
class Git:
def __init__(self, *, dir: StrPath):
"""Lite, typed, pythonic wrapper for git(1).
Parameters
----------
dir :
Operates as PATH in the corresponding git subcommand.
Examples
--------
>>> Git(dir=tmp_path)
<Git dir=...>
"""
#: Directory to check out
self.dir: pathlib.Path
if isinstance(dir, pathlib.Path):
self.dir = dir
else:
self.dir = pathlib.Path(dir)
def __repr__(self):
return f"<Git dir={self.dir}>"
def run(
self,
args: _CMD,
*,
# Print-and-exit flags
version: Optional[bool] = None,
help: Optional[bool] = None,
html_path: Optional[bool] = None,
man_path: Optional[bool] = None,
info_path: Optional[bool] = None,
# Normal flags
C: Optional[Union[StrOrBytesPath, list[StrOrBytesPath]]] = None,
cwd: Optional[StrOrBytesPath] = None,
git_dir: Optional[StrOrBytesPath] = None,
work_tree: Optional[StrOrBytesPath] = None,
namespace: Optional[StrOrBytesPath] = None,
super_prefix: Optional[StrOrBytesPath] = None,
exec_path: Optional[StrOrBytesPath] = None,
bare: Optional[bool] = None,
no_replace_objects: Optional[bool] = None,
literal_pathspecs: Optional[bool] = None,
global_pathspecs: Optional[bool] = None,
noglob_pathspecs: Optional[bool] = None,
icase_pathspecs: Optional[bool] = None,
no_optional_locks: Optional[bool] = None,
config: Optional[str] = None,
config_env: Optional[str] = None,
**kwargs,
):
"""
Passing None to a subcommand option, the flag won't be passed unless otherwise
stated.
`git help` and `git help [cmd]`
Wraps git's `Options <https://git-scm.com/docs/git#_options>`_.
Parameters
----------
cwd : :attr:`libvcs._internal.types.StrOrBytesPath`, optional, passed to
subprocess's ``cwd`` the command runs from. Defaults to :attr:`~.cwd`.
C : :attr:`libvcs._internal.types.StrOrBytesPath`, optional
``-C <path>``
git_dir : :attr:`libvcs._internal.types.StrOrBytesPath`, optional
``--git-dir <path>``
work_tree : :attr:`libvcs._internal.types.StrOrBytesPath`, optional
``--work-tree <path>``
namespace : :attr:`libvcs._internal.types.StrOrBytesPath`, optional
``--namespace <path>``
super_prefix : :attr:`libvcs._internal.types.StrOrBytesPath`, optional
``--super-prefix <path>``
exec_path : :attr:`libvcs._internal.types.StrOrBytesPath`, optional
``--exec-path=<path>``
bare : bool
``--bare``
no_replace_objects : bool
``--no-replace-objects``
literal_pathspecs : bool
``--literal-pathspecs``
global_pathspecs : bool
``--glob-pathspecs``
noglob_pathspecs : bool
``--noglob-pathspecs``
icase_pathspecs : bool
``--icase-pathspecs``
no_optional_locks : bool
``--no-optional-locks``
version : bool
``--version``
html_path : bool
``--html-path``
man_path : bool
``--man-path``
info_path : bool
``--info-path``
help : bool
``-h / --help``
pager : bool
``-p --pager``
no_pager : bool
``-P / --no-pager``
config :
``--config=<name>=<value>``
config_env :
``--config-env=<name>=<envvar>``
Examples
--------
>>> git = Git(dir=tmp_path)
>>> git.run(['help'])
"usage: git [--version] [--help] [-C <path>]..."
"""
if isinstance(args, Sequence):
cli_args = ["git", *args]
else:
cli_args = ["git", args]
if "cwd" not in kwargs:
kwargs["cwd"] = self.dir
#
# Print-and-exit
#
if version is True:
cli_args.append("--version")
if help is True:
cli_args.append("--help")
if html_path is True:
cli_args.append("--html-path")
if man_path is True:
cli_args.append("--man-path")
if info_path is True:
cli_args.append("--info-path")
#
# Flags
#
if C is not None:
if not isinstance(C, list):
C = [C]
C = [str(c) for c in C]
cli_args.extend(["-C", C])
if git_dir is not None:
cli_args.extend(["--git-dir", str(git_dir)])
if work_tree is not None:
cli_args.extend(["--work-tree", str(work_tree)])
if namespace is not None:
cli_args.extend(["--namespace", namespace])
if super_prefix is not None:
cli_args.extend(["--super-prefix", super_prefix])
if exec_path is not None:
cli_args.extend(["--exec-path", exec_path])
if bare is True:
cli_args.append("--bare")
if no_replace_objects is True:
cli_args.append("--no-replace-objects")
if literal_pathspecs is True:
cli_args.append("--literal-pathspecs")
if global_pathspecs is True:
cli_args.append("--global-pathspecs")
if noglob_pathspecs is True:
cli_args.append("--noglob-pathspecs")
if icase_pathspecs is True:
cli_args.append("--icase-pathspecs")
if no_optional_locks is True:
cli_args.append("--no-optional-locks")
return run(args=cli_args, **kwargs)
def clone(
self,
*,
url: str,
separate_git_dir: Optional[StrOrBytesPath] = None,
template: Optional[str] = None,
depth: Optional[str] = None,
branch: Optional[str] = None,
origin: Optional[str] = None,
upload_pack: Optional[str] = None,
shallow_since: Optional[str] = None,
shallow_exclude: Optional[str] = None,
reference: Optional[str] = None,
reference_if_able: Optional[str] = None,
server_option: Optional[str] = None,
jobs: Optional[str] = None,
force: Optional[bool] = None,
local: Optional[bool] = None,
all: Optional[bool] = None,
no_hardlinks: Optional[bool] = None,
hardlinks: Optional[bool] = None,
shared: Optional[bool] = None,
progress: Optional[bool] = None,
no_checkout: Optional[bool] = None,
no_reject_shallow: Optional[bool] = None,
reject_shallow: Optional[bool] = None,
sparse: Optional[bool] = None,
shallow_submodules: Optional[bool] = None,
no_shallow_submodules: Optional[bool] = None,
remote_submodules: Optional[bool] = None,
no_remote_submodules: Optional[bool] = None,
verbose: Optional[bool] = None,
quiet: Optional[bool] = None,
# Special behavior
make_parents: Optional[bool] = True,
**kwargs,
):
"""Clone a working copy from an git repo.
Wraps `git clone <https://git-scm.com/docs/git-clone>`_.
Parameters
----------
url : str
directory : str
separate_git_dir : StrOrBytesPath
Separate repository (.git/ ) from working tree
force : bool, optional
force operation to run
make_parents : bool, default: ``True``
Creates checkout directory (`:attr:`self.dir`) if it doesn't already exist.
Examples
--------
>>> git = Git(dir=tmp_path)
>>> git_remote_repo = create_git_remote_repo()
>>> git.clone(url=f'file://{git_remote_repo}')
''
>>> git.dir.exists()
True
"""
required_flags: list[str] = [url, str(self.dir)]
local_flags: list[str] = []
if template is not None:
local_flags.append(f"--template={template}")
if separate_git_dir is not None:
local_flags.append(f"--separate-git-dir={separate_git_dir!r}")
if (filter := kwargs.pop("filter", None)) is not None:
local_flags.append(f"--filter={filter}")
if depth is not None:
local_flags.extend(["--depth", depth])
if branch is not None:
local_flags.extend(["--branch", branch])
if origin is not None:
local_flags.extend(["--origin", origin])
if upload_pack is not None:
local_flags.extend(["--upload-pack", upload_pack])
if shallow_since is not None:
local_flags.append(f"--shallow-since={shallow_since}")
if shallow_exclude is not None:
local_flags.append(f"--shallow-exclude={shallow_exclude}")
if reference is not None:
local_flags.extend(["--reference", reference])
if reference_if_able is not None:
local_flags.extend(["--reference", reference_if_able])
if server_option is not None:
local_flags.append(f"--server-option={server_option}")
if jobs is not None:
local_flags.extend(["--jobs", jobs])
if local is True:
local_flags.append("--local")
if hardlinks is True:
local_flags.append("--hardlinks")
if no_hardlinks is True:
local_flags.append("--no-hardlinks")
if shared is True:
local_flags.append("--shared")
if quiet is True:
local_flags.append("--quiet")
if verbose is True:
local_flags.append("--verbose")
if progress is True:
local_flags.append("--progress")
if no_checkout is True:
local_flags.append("--no-checkout")
if no_reject_shallow is True:
local_flags.append("--no-reject-shallow")
if reject_shallow is True:
local_flags.append("--reject-shallow")
if sparse is True:
local_flags.append("--sparse")
if shallow_submodules is True:
local_flags.append("--shallow-submodules")
if no_shallow_submodules is True:
local_flags.append("--no-shallow-submodules")
if remote_submodules is True:
local_flags.append("--remote-submodules")
if no_remote_submodules is True:
local_flags.append("--no-remote-submodules")
# libvcs special behavior
if make_parents and not self.dir.exists():
self.dir.mkdir(parents=True)
return self.run(
["clone", *local_flags, "--", *required_flags], check_returncode=False
)
def fetch(
self,
*,
reftag: Optional[Any] = None,
deepen: Optional[str] = None,
depth: Optional[str] = None,
branch: Optional[str] = None,
origin: Optional[str] = None,
upload_pack: Optional[str] = None,
shallow_since: Optional[str] = None,
shallow_exclude: Optional[str] = None,
negotiation_tip: Optional[str] = None,
jobs: Optional[str] = None,
server_option: Optional[str] = None,
recurse_submodules: Optional[
Union[bool, Literal["yes", "on-demand", "no"]]
] = None,
recurse_submodules_default: Optional[
Union[bool, Literal["yes", "on-demand"]]
] = None,
submodule_prefix: Optional[StrOrBytesPath] = None,
#
all: Optional[bool] = None,
force: Optional[bool] = None,
keep: Optional[bool] = None,
multiple: Optional[bool] = None,
dry_run: Optional[bool] = None,
append: Optional[bool] = None,
atomic: Optional[bool] = None,
ipv4: Optional[bool] = None,
ipv6: Optional[bool] = None,
progress: Optional[bool] = None,
quiet: Optional[bool] = None,
verbose: Optional[bool] = None,
unshallow: Optional[bool] = None,
update_shallow: Optional[bool] = None,
negotiate_tip: Optional[bool] = None,
no_write_fetch_head: Optional[bool] = None,
write_fetch_head: Optional[bool] = None,
no_auto_maintenance: Optional[bool] = None,
auto_maintenance: Optional[bool] = None,
no_write_commit_graph: Optional[bool] = None,
write_commit_graph: Optional[bool] = None,
prefetch: Optional[bool] = None,
prune: Optional[bool] = None,
prune_tags: Optional[bool] = None,
no_tags: Optional[bool] = None,
tags: Optional[bool] = None,
no_recurse_submodules: Optional[bool] = None,
set_upstream: Optional[bool] = None,
update_head_ok: Optional[bool] = None,
show_forced_updates: Optional[bool] = None,
no_show_forced_updates: Optional[bool] = None,
negotiate_only: Optional[bool] = None,
**kwargs,
):
"""Download from repo. Wraps `git fetch <https://git-scm.com/docs/git-fetch>`_.
Examples
--------
>>> git = Git(dir=git_local_clone.dir)
>>> git_remote_repo = create_git_remote_repo()
>>> git.fetch()
''
>>> git = Git(dir=git_local_clone.dir)
>>> git_remote_repo = create_git_remote_repo()
>>> git.fetch(reftag=f'file://{git_remote_repo}')
''
>>> git.dir.exists()
True
"""
required_flags: list[str] = []
if reftag:
required_flags.insert(0, reftag)
local_flags: list[str] = []
if submodule_prefix is not None:
local_flags.append(f"--submodule-prefix={submodule_prefix!r}")
if (filter := kwargs.pop("filter", None)) is not None:
local_flags.append(f"--filter={filter}")
if depth is not None:
local_flags.extend(["--depth", depth])
if branch is not None:
local_flags.extend(["--branch", branch])
if origin is not None:
local_flags.extend(["--origin", origin])
if upload_pack is not None:
local_flags.extend(["--upload-pack", upload_pack])
if shallow_since is not None:
local_flags.append(f"--shallow-since={shallow_since}")
if shallow_exclude is not None:
local_flags.append(f"--shallow-exclude={shallow_exclude}")
if server_option is not None:
local_flags.append(f"--server-option={server_option}")
if jobs is not None:
local_flags.extend(["--jobs", jobs])
if keep:
local_flags.append("--keep")
if force:
local_flags.append("--force")
if multiple:
local_flags.append("--multiple")
if quiet:
local_flags.append("--quiet")
if progress:
local_flags.append("--progress")
if verbose:
local_flags.append("--verbose")
if all:
local_flags.append("--all")
if atomic:
local_flags.append("--atomic")
if unshallow:
local_flags.append("--unshallow")
if append:
local_flags.append("--append")
if update_shallow:
local_flags.append("--update-shallow")
if dry_run:
local_flags.append("--dry-run")
if no_write_fetch_head:
local_flags.append("--no-write-fetch-head")
if write_fetch_head:
local_flags.append("--write-fetch-head")
if auto_maintenance:
local_flags.append("--auto-maintenance")
if no_auto_maintenance:
local_flags.append("--no-auto-maintenance")
if write_commit_graph:
local_flags.append("--write-commit-graph")
if no_write_commit_graph:
local_flags.append("--no-write-commit-graph")
if prefetch:
local_flags.append("--prefetch")
if prune:
local_flags.append("--prune")
if prune_tags:
local_flags.append("--prune-tags")
if tags:
local_flags.append("--tags")
if no_tags:
local_flags.append("--no-tags")
if no_recurse_submodules:
local_flags.append("--no-recurse-submodules")
if set_upstream:
local_flags.append("--set-upstream")
if update_head_ok:
local_flags.append("--update-head-ok")
if show_forced_updates:
local_flags.append("--show-forced-updates")
if no_show_forced_updates:
local_flags.append("--no-show-forced-updates")
if negotiate_only:
local_flags.append("--negotiate-only")
return self.run(
["fetch", *local_flags, "--", *required_flags], check_returncode=False
)
def rebase(
self,
*,
upstream: Optional[str] = None,
onto: Optional[str] = None,
branch: Optional[str] = None,
apply: Optional[bool] = None,
merge: Optional[bool] = None,
quiet: Optional[bool] = None,
verbose: Optional[bool] = None,
stat: Optional[bool] = None,
no_stat: Optional[bool] = None,
verify: Optional[bool] = None,
no_verify: Optional[bool] = None,
fork_point: Optional[bool] = None,
no_fork_point: Optional[bool] = None,
whitespace: Optional[str] = None,
no_whitespace: Optional[bool] = None,
commit_date_is_author_date: Optional[bool] = None,
ignore_date: Optional[bool] = None,
root: Optional[bool] = None,
autostash: Optional[bool] = None,
no_autostash: Optional[bool] = None,
autosquash: Optional[bool] = None,
no_autosquash: Optional[bool] = None,
reschedule_failed_exec: Optional[bool] = None,
no_reschedule_failed_exec: Optional[bool] = None,
context: Optional[int] = None,
rerere_autoupdate: Optional[bool] = None,
no_rerere_autoupdate: Optional[bool] = None,
keep_empty: Optional[bool] = None,
no_keep_empty: Optional[bool] = None,
reapply_cherry_picks: Optional[bool] = None,
no_reapply_cherry_picks: Optional[bool] = None,
allow_empty_message: Optional[bool] = None,
signoff: Optional[bool] = None,
keep_base: Optional[bool] = None,
strategy: Optional[Union[str, bool]] = None,
strategy_option: Optional[str] = None,
exec: Optional[str] = None,
gpg_sign: Optional[Union[str, bool]] = None,
no_gpg_sign: Optional[bool] = None,
empty: Optional[Union[str, Literal["drop", "keep", "ask"]]] = None,
rebase_merges: Optional[
Union[str, Literal["rebase-cousins", "no-rebase-cousins"]]
] = None,
#
# Interactive
#
interactive: Optional[bool] = None,
edit_todo: Optional[bool] = None,
skip: Optional[bool] = None,
show_current_patch: Optional[bool] = None,
abort: Optional[bool] = None,
quit: Optional[bool] = None,
**kwargs,
):
"""Reapply commit on top of another tip.
Wraps `git rebase <https://git-scm.com/docs/git-rebase>`_.
Parameters
----------
continue : bool
Accepted via kwargs
Examples
--------
>>> git = Git(dir=git_local_clone.dir)
>>> git_remote_repo = create_git_remote_repo()
>>> git.rebase()
'Current branch master is up to date.'
Declare upstream:
>>> git = Git(dir=git_local_clone.dir)
>>> git_remote_repo = create_git_remote_repo()
>>> git.rebase(upstream='origin')
'Current branch master is up to date.'
>>> git.dir.exists()
True
"""
required_flags: list[str] = []
local_flags: list[str] = []
if upstream:
required_flags.insert(0, upstream)
if branch:
required_flags.insert(0, branch)
if onto:
local_flags.extend(["--onto", onto])
if context:
local_flags.extend(["--C", str(context)])
if exec:
local_flags.extend(["--exec", shlex.quote(exec)])
if reschedule_failed_exec:
local_flags.append("--reschedule-failed-exec")
if no_reschedule_failed_exec:
local_flags.append("--no-reschedule-failed-exec")
if fork_point:
local_flags.append("--fork-point")
if no_fork_point:
local_flags.append("--no-fork-point")
if root:
local_flags.append("--root")
if keep_base:
local_flags.append("--keep-base")
if autostash:
local_flags.append("--autostash")
if no_autostash:
local_flags.append("--no-autostash")
if merge:
local_flags.append("--merge")
if verbose:
local_flags.append("--verbose")
if quiet:
local_flags.append("--quiet")
if stat:
local_flags.append("--stat")
if no_stat:
local_flags.append("--no-stat")
if whitespace:
local_flags.append("--whitespace")
if no_whitespace:
local_flags.append("--no-whitespace")
if rerere_autoupdate:
local_flags.append("--rerere-autoupdate")
if no_rerere_autoupdate:
local_flags.append("--no-rerwre-autoupdate")
if reapply_cherry_picks:
local_flags.append("--reapply-cherry-picks")
if no_reapply_cherry_picks:
local_flags.append("--no-reapply-cherry-picks")
if keep_empty:
local_flags.append("--keep-empty")
if no_keep_empty:
local_flags.append("--no-keep-empty")
if verify:
local_flags.append("--verify")
if no_verify:
local_flags.append("--no-verify")
if ignore_date:
local_flags.append("--ignore-date")
if commit_date_is_author_date:
local_flags.append("--commit-date-is-author-date")
if empty is not None:
if isinstance(empty, str):
local_flags.append(f"--empty={empty}")
else:
local_flags.append("--empty")
if rebase_merges is not None:
if isinstance(rebase_merges, str):
local_flags.append(f"--rebase-merges={rebase_merges}")
else:
local_flags.append("--rebase-merges")
if gpg_sign is not None:
if isinstance(gpg_sign, str):
local_flags.append(f"--gpg-sign={gpg_sign}")
else:
local_flags.append("--gpg-sign")
if no_gpg_sign:
local_flags.append("--no-gpg-sign")
if signoff:
local_flags.append("--signoff")
#
# Interactive
#
if interactive:
local_flags.append("--interactive")
if kwargs.get("continue"):
local_flags.append("--continue")
if abort:
local_flags.append("--abort")
if edit_todo:
local_flags.append("--edit-todo")
if show_current_patch:
local_flags.append("--show-current-patch")
if quit:
local_flags.append("--quit")
return self.run(
["rebase", *local_flags, *required_flags], check_returncode=False
)
def pull(
self,
*,
reftag: Optional[Any] = None,
repository: Optional[str] = None,
deepen: Optional[str] = None,
depth: Optional[str] = None,
branch: Optional[str] = None,
origin: Optional[str] = None,
upload_pack: Optional[str] = None,
shallow_since: Optional[str] = None,
shallow_exclude: Optional[str] = None,
negotiation_tip: Optional[str] = None,
jobs: Optional[str] = None,
server_option: Optional[str] = None,
recurse_submodules: Optional[
Union[bool, Literal["yes", "on-demand", "no"]]
] = None,
recurse_submodules_default: Optional[
Union[bool, Literal["yes", "on-demand"]]
] = None,
submodule_prefix: Optional[StrOrBytesPath] = None,
#
# Pull specific flags
#
# Options related to git pull
# https://git-scm.com/docs/git-pull#_options_related_to_pull
#
cleanup: Optional[str] = None,
rebase: Optional[Union[str, bool]] = None,
no_rebase: Optional[bool] = None,
strategy: Optional[Union[str, bool]] = None,
strategy_option: Optional[str] = None,
gpg_sign: Optional[Union[str, bool]] = None,
no_gpg_sign: Optional[bool] = None,
commit: Optional[bool] = None,
no_commit: Optional[bool] = None,
edit: Optional[bool] = None,
no_edit: Optional[bool] = None,
fast_forward_only: Optional[bool] = None,
fast_forward: Optional[bool] = None,
no_fast_forward: Optional[bool] = None,
sign_off: Optional[bool] = None,
no_sign_off: Optional[bool] = None,
stat: Optional[bool] = None,
no_stat: Optional[bool] = None,
squash: Optional[bool] = None,
no_squash: Optional[bool] = None,
verify: Optional[bool] = None,
no_verify: Optional[bool] = None,
verify_signatures: Optional[bool] = None,
no_verify_signatures: Optional[bool] = None,
summary: Optional[bool] = None,
no_summary: Optional[bool] = None,
autostash: Optional[bool] = None,
no_autostash: Optional[bool] = None,
allow_unrelated_histories: Optional[bool] = None,
#
# Options related to git fetch
# https://git-scm.com/docs/git-pull#_options_related_to_fetching
#
fetch: Optional[bool] = None,
no_fetch: Optional[bool] = None,
all: Optional[bool] = None,
force: Optional[bool] = None,
keep: Optional[bool] = None,
multiple: Optional[bool] = None,
dry_run: Optional[bool] = None,
append: Optional[bool] = None,
atomic: Optional[bool] = None,
ipv4: Optional[bool] = None,
ipv6: Optional[bool] = None,
progress: Optional[bool] = None,
quiet: Optional[bool] = None,
verbose: Optional[bool] = None,
unshallow: Optional[bool] = None,
update_shallow: Optional[bool] = None,
negotiate_tip: Optional[bool] = None,
no_write_fetch_head: Optional[bool] = None,
write_fetch_head: Optional[bool] = None,
no_auto_maintenance: Optional[bool] = None,
auto_maintenance: Optional[bool] = None,
no_write_commit_graph: Optional[bool] = None,
write_commit_graph: Optional[bool] = None,
prefetch: Optional[bool] = None,
prune: Optional[bool] = None,
prune_tags: Optional[bool] = None,
no_tags: Optional[bool] = None,
tags: Optional[bool] = None,
no_recurse_submodules: Optional[bool] = None,
set_upstream: Optional[bool] = None,
update_head_ok: Optional[bool] = None,
show_forced_updates: Optional[bool] = None,
no_show_forced_updates: Optional[bool] = None,
negotiate_only: Optional[bool] = None,
**kwargs,
):
"""Download from repo. Wraps `git pull <https://git-scm.com/docs/git-pull>`_.
Examples
--------
>>> git = Git(dir=git_local_clone.dir)
>>> git_remote_repo = create_git_remote_repo()
>>> git.pull()
'Already up to date.'
Fetch via ref:
>>> git = Git(dir=tmp_path)
>>> git.run(['init'])
'Initialized ...'
>>> git_remote_repo = create_git_remote_repo()
>>> git.pull(reftag=f'file://{git_remote_repo}')
''
>>> git.dir.exists()
True
"""
required_flags: list[str] = []
if repository:
required_flags.insert(0, repository)
if reftag:
required_flags.insert(0, reftag)
local_flags: list[str] = []
#
# Pull-related arguments
#
if rebase is not None:
if isinstance(rebase, str):
local_flags.append(f"--rebase={rebase}")
else:
local_flags.append("--rebase")
if no_rebase:
local_flags.append("--no-rebase")
if strategy is not None:
if isinstance(strategy, str):
local_flags.append(f"--strategy={strategy}")
else:
local_flags.append("--strategy")
if strategy_option is not None:
local_flags.append(f"--strategy-option={strategy_option}")
if gpg_sign is not None:
if isinstance(gpg_sign, str):
local_flags.append(f"--gpg-sign={gpg_sign}")
else:
local_flags.append("--gpg-sign")
if no_gpg_sign:
local_flags.append("--no-gpg-sign")
if cleanup:
local_flags.append("--cleanup")
if commit:
local_flags.append("--commit")
if no_commit:
local_flags.append("--no-commit")
if fast_forward:
local_flags.append("--fast-forward")
if fast_forward_only:
local_flags.append("--fast-forward-only")
if no_fast_forward:
local_flags.append("--no-fast-forward")
if edit:
local_flags.append("--edit")
if no_edit:
local_flags.append("--no-edit")
if sign_off:
local_flags.append("--sign_off")
if no_sign_off:
local_flags.append("--no-sign_off")
if stat:
local_flags.append("--stat")
if no_stat:
local_flags.append("--no-stat")
if squash:
local_flags.append("--squash")
if no_squash:
local_flags.append("--no-squash")
if verify:
local_flags.append("--verify")
if no_verify:
local_flags.append("--no-verify")
if verify_signatures:
local_flags.append("--verify-signatures")
if no_verify_signatures:
local_flags.append("--no-verify-signatures")
if summary:
local_flags.append("--summary")
if no_summary:
local_flags.append("--no-summary")
if autostash:
local_flags.append("--autostash")
if no_autostash:
local_flags.append("--no-autostash")
if allow_unrelated_histories:
local_flags.append("--allow-unrelated-histories")
#
# Fetch-related arguments
#
if submodule_prefix is not None:
local_flags.append(f"--submodule-prefix={submodule_prefix!r}")
if (filter := kwargs.pop("filter", None)) is not None:
local_flags.append(f"--filter={filter}")
if depth is not None:
local_flags.extend(["--depth", depth])
if branch is not None:
local_flags.extend(["--branch", branch])
if origin is not None:
local_flags.extend(["--origin", origin])
if upload_pack is not None:
local_flags.extend(["--upload-pack", upload_pack])
if shallow_since is not None:
local_flags.append(f"--shallow-since={shallow_since}")
if shallow_exclude is not None:
local_flags.append(f"--shallow-exclude={shallow_exclude}")
if server_option is not None:
local_flags.append(f"--server-option={server_option}")
if jobs is not None:
local_flags.extend(["--jobs", jobs])
if keep:
local_flags.append("--keep")
if force:
local_flags.append("--force")
if multiple:
local_flags.append("--multiple")
if quiet:
local_flags.append("--quiet")
if progress:
local_flags.append("--progress")
if verbose:
local_flags.append("--verbose")
if all:
local_flags.append("--all")
if atomic:
local_flags.append("--atomic")
if unshallow:
local_flags.append("--unshallow")
if append:
local_flags.append("--append")
if update_shallow:
local_flags.append("--update-shallow")
if dry_run:
local_flags.append("--dry-run")
if no_write_fetch_head:
local_flags.append("--no-write-fetch-head")
if write_fetch_head:
local_flags.append("--write-fetch-head")
if auto_maintenance:
local_flags.append("--auto-maintenance")
if no_auto_maintenance:
local_flags.append("--no-auto-maintenance")
if write_commit_graph:
local_flags.append("--write-commit-graph")
if no_write_commit_graph:
local_flags.append("--no-write-commit-graph")
if prefetch:
local_flags.append("--prefetch")
if prune:
local_flags.append("--prune")
if prune_tags:
local_flags.append("--prune-tags")
if tags:
local_flags.append("--tags")
if no_tags:
local_flags.append("--no-tags")
if no_recurse_submodules:
local_flags.append("--no-recurse-submodules")
if set_upstream:
local_flags.append("--set-upstream")
if update_head_ok:
local_flags.append("--update-head-ok")
if show_forced_updates:
local_flags.append("--show-forced-updates")
if no_show_forced_updates:
local_flags.append("--no-show-forced-updates")
if negotiate_only:
local_flags.append("--negotiate-only")
return self.run(
["pull", *local_flags, "--", *required_flags], check_returncode=False
)
def init(
self,
*,
template: Optional[str] = None,
separate_git_dir: Optional[StrOrBytesPath] = None,
object_format: Optional[Literal["sha1", "sha256"]] = None,
branch: Optional[str] = None,
initial_branch: Optional[str] = None,
shared: Optional[bool] = None,
quiet: Optional[bool] = None,
bare: Optional[bool] = None,
**kwargs,
):
"""Create empty repo. Wraps `git init <https://git-scm.com/docs/git-init>`_.
Parameters
----------
quiet : bool
``--quiet``
bare : bool
``--bare``
object_format :
Hash algorithm used for objects. SHA-256 is still experimental as of git
2.36.0.
Examples
--------
>>> new_repo = tmp_path / 'example'
>>> new_repo.mkdir()
>>> git = Git(dir=new_repo)
>>> git.init()
'Initialized empty Git repository in ...'
>>> pathlib.Path(new_repo / 'test').write_text('foo', 'utf-8')
3
>>> git.run(['add', '.'])
''
Bare:
>>> new_repo = tmp_path / 'example1'
>>> new_repo.mkdir()
>>> git = Git(dir=new_repo)
>>> git.init(bare=True)
'Initialized empty Git repository in ...'
>>> pathlib.Path(new_repo / 'HEAD').exists()
True
Existing repo:
>>> git = Git(dir=new_repo)
>>> git = Git(dir=git_local_clone.dir)
>>> git_remote_repo = create_git_remote_repo()
>>> git.init()
'Reinitialized existing Git repository in ...'
"""
required_flags: list[str] = [str(self.dir)]
local_flags: list[str] = []
if template is not None:
local_flags.append(f"--template={template}")
if separate_git_dir is not None:
local_flags.append(f"--separate-git-dir={separate_git_dir!r}")
if object_format is not None:
local_flags.append(f"--object-format={object_format}")
if branch is not None:
local_flags.extend(["--branch", branch])
if initial_branch is not None:
local_flags.extend(["--initial-branch", initial_branch])
if shared is True:
local_flags.append("--shared")
if quiet is True:
local_flags.append("--quiet")
if bare is True:
local_flags.append("--bare")
return self.run(
["init", *local_flags, "--", *required_flags], check_returncode=False
)
def help(
self,
*,
all: Optional[bool] = None,
verbose: Optional[bool] = None,
no_external_commands: Optional[bool] = None,
no_aliases: Optional[bool] = None,
config: Optional[str] = None,
guides: Optional[str] = None,
info: Optional[str] = None,
man: Optional[str] = None,
web: Optional[str] = None,
**kwargs,
):
"""Help info. Wraps `git help <https://git-scm.com/docs/git-help>`_.
Parameters
----------
all : bool
Prints everything.
no_external_commands : bool
For use with ``all``, excludes external commands.
no_aliases : bool
For use with ``all``, excludes aliases.
verbose : bool
For us with ``all``, on by default.
config : bool
List all config vars.
guides : bool
List concept guides.
info : bool
Display man page in info format.
man : bool
Man page.
web : bool
Man page in HTML.
Examples
--------
>>> git = Git(dir=tmp_path)
>>> git.help()
"usage: git [--version] [--help] [-C <path>]..."
>>> git.help(all=True)
"See 'git help <command>' to read about a specific subcommand..."
>>> git.help(info=True)
"usage: git [--version] [--help] [-C <path>] [-c <name>=<value>]..."
>>> git.help(man=True)
"usage: git [--version] [--help] [-C <path>] [-c <name>=<value>]..."
"""
local_flags: list[str] = []
if verbose is True:
local_flags.append("--verbose")
if all is True:
local_flags.append("--all")
if no_external_commands is True:
local_flags.append("--no-external-commands")
if no_aliases is True:
local_flags.append("--no-aliases")
if config is True:
local_flags.append("--config")
if guides is True:
local_flags.append("--guides")
if info is True:
local_flags.append("--info")
if man is True:
local_flags.append("--man")
if web is True:
local_flags.append("--web")
return self.run(["help", *local_flags], check_returncode=False)
def reset(
self,
*,
quiet: Optional[bool] = None,
refresh: Optional[bool] = None,
no_refresh: Optional[bool] = None,
pathspec_from_file: Optional[StrOrBytesPath] = None,
pathspec: Optional[Union[StrOrBytesPath, list[StrOrBytesPath]]] = None,
soft: Optional[bool] = None,
mixed: Optional[bool] = None,
hard: Optional[bool] = None,
merge: Optional[bool] = None,
keep: Optional[bool] = None,
commit: Optional[str] = None,
recurse_submodules: Optional[bool] = None,
no_recurse_submodules: Optional[bool] = None,
**kwargs,
):
"""Reset HEAD. Wraps `git help <https://git-scm.com/docs/git-help>`_.
Parameters
----------
quiet : bool
no_refresh : bool
refresh : bool
pathspec_from_file : :attr:`libvcs._internal.types.StrOrBytesPath`
pathspec_file_nul : bool
pathspec : :attr:`libvcs._internal.types.StrOrBytesPath` or list
:attr:`libvcs._internal.types.StrOrBytesPath`
soft : bool
mixed : bool
hard : bool
merge : bool
keep : bool
commit : str
Examples
--------
>>> git = Git(dir=git_local_clone.dir)
>>> git.reset()
''
>>> git.reset(soft=True, commit='HEAD~1')
''
"""
local_flags: list[str] = []
if quiet is True:
local_flags.append("--quiet")
if no_refresh is True:
local_flags.append("--no-refresh")
if refresh is True:
local_flags.append("--refresh")
if pathspec_from_file is not None:
local_flags.append(f"--pathspec_from_file={pathspec_from_file!r}")
# HEAD to commit form
if soft is True:
local_flags.append("--soft")
if mixed is True:
local_flags.append("--mixed")
if hard is True:
local_flags.append("--hard")
if merge is True:
local_flags.append("--merge")
if keep is True:
local_flags.append("--keep")
if commit is True:
local_flags.append(commit)
if recurse_submodules:
local_flags.append("--recurse-submodules")
elif no_recurse_submodules:
local_flags.append("--no-recurse-submodules")
if pathspec is not None:
if not isinstance(pathspec, list):
pathspec = [pathspec]
else:
pathspec = []
return self.run(
["reset", *local_flags, *(["--", *pathspec] if len(pathspec) else [])],
check_returncode=False,
)
def checkout(
self,
*,
quiet: Optional[bool] = None,
progress: Optional[bool] = None,
no_progress: Optional[bool] = None,
pathspec_from_file: Optional[StrOrBytesPath] = None,
pathspec: Optional[Union[StrOrBytesPath, list[StrOrBytesPath]]] = None,
force: Optional[bool] = None,
ours: Optional[bool] = None,
theirs: Optional[bool] = None,
no_track: Optional[bool] = None,
guess: Optional[bool] = None,
no_guess: Optional[bool] = None,
_list: Optional[bool] = None,
detach: Optional[bool] = None,
merge: Optional[bool] = None,
ignore_skip_worktree_bits: Optional[bool] = None,
patch: Optional[bool] = None,
orphan: Optional[str] = None,
conflict: Optional[str] = None,
overwrite_ignore: Optional[bool] = None,
no_overwrite_ignore: Optional[bool] = None,
recurse_submodules: Optional[bool] = None,
no_recurse_submodules: Optional[bool] = None,
overlay: Optional[bool] = None,
no_overlay: Optional[bool] = None,
commit: Optional[str] = None,
branch: Optional[str] = None,
new_branch: Optional[str] = None,
start_point: Optional[str] = None,
treeish: Optional[str] = None,
**kwargs,
):
"""Switches branches or checks out files. Wraps
`git checkout <https://git-scm.com/docs/git-checkout>`_ (`git co`).
Parameters
----------
quiet : bool
progress : bool
no_progress : bool
pathspec_from_file : :attr:`libvcs._internal.types.StrOrBytesPath`
pathspec : :attr:`libvcs._internal.types.StrOrBytesPath` or list
:attr:`libvcs._internal.types.StrOrBytesPath`
force : bool
ours : bool
theirs : bool
no_track : bool
guess : bool
no_guess : bool
ignore_skip_worktree_bits : bool
merge : bool
_list : bool
detach : bool
patch : bool
orphan : bool
conflict : str
overwrite_ignore : bool
no_overwrite_ignore : bool
commit : str
branch : str
new_branch : str
start_point : str
treeish : str
Examples
--------
>>> git = Git(dir=git_local_clone.dir)
>>> git.checkout()
"Your branch is up to date with 'origin/master'."
>>> git.checkout(branch='origin/master', pathspec='.')
''
"""
local_flags: list[str] = []
if quiet is True:
local_flags.append("--quiet")
if progress is True:
local_flags.append("--progress")
elif no_progress is True:
local_flags.append("--no-progress")
if force is True:
local_flags.append("--force")
if ours is True:
local_flags.append("--ours")
if theirs is True:
local_flags.append("--theirs")
if detach is True:
local_flags.append("--detach")
if orphan is True:
local_flags.append("--orphan")
if conflict is True:
local_flags.append(f"--conflict={conflict}")
if commit is True:
local_flags.append(commit)
if branch is True:
local_flags.append(branch)
if new_branch is True:
local_flags.append(new_branch)
if start_point is True:
local_flags.append(start_point)
if treeish is True:
local_flags.append(treeish)
if recurse_submodules:
local_flags.append("--recurse-submodules")
elif no_recurse_submodules:
local_flags.append("--no-recurse-submodules")
if pathspec is not None:
if not isinstance(pathspec, list):
pathspec = [pathspec]
else:
pathspec = []
return self.run(
["checkout", *local_flags, *(["--", *pathspec] if len(pathspec) else [])],
check_returncode=False,
)
def status(
self,
*,
verbose: Optional[bool] = None,
long: Optional[bool] = None,
short: Optional[bool] = None,
branch: Optional[bool] = None,
z: Optional[bool] = None,
column: Optional[Union[bool, str]] = None,
no_column: Optional[bool] = None,
ahead_behind: Optional[bool] = None,
no_ahead_behind: Optional[bool] = None,
renames: Optional[bool] = None,
no_renames: Optional[bool] = None,
find_renames: Optional[Union[bool, str]] = None,
porcelain: Optional[Union[bool, str]] = None,
untracked_files: Optional[Literal["no", "normal", "all"]] = None,
ignored: Optional[Literal["traditional", "no", "matching"]] = None,
ignored_submodules: Optional[Literal["untracked", "dirty", "all"]] = None,
pathspec: Optional[Union[StrOrBytesPath, list[StrOrBytesPath]]] = None,
**kwargs,
):
"""Status of working tree. Wraps
`git status <https://git-scm.com/docs/git-status>`_.
`git ls-files` has similar params (e.g. `z`)
Parameters
----------
verbose : bool
long : bool
short : bool
branch : bool
z : bool
column : bool
no_column : bool
ahead_behind : bool
no_ahead_behind : bool
find_renames : bool
no_find_renames : bool
porcelain : str, bool
untracked_files : "no", "normal", "all"
ignored : "traditional", "no", "matching"
ignored_submodules : "untracked", "dirty", "all"
pathspec : :attr:`libvcs._internal.types.StrOrBytesPath` or list
:attr:`libvcs._internal.types.StrOrBytesPath`
Examples
--------
>>> git = Git(dir=git_local_clone.dir)
>>> git.status()
"On branch master..."
>>> pathlib.Path(git_local_clone.dir / 'new_file.txt').touch()
>>> git.status(porcelain=True)
'?? new_file.txt'
>>> git.status(porcelain='1')
'?? new_file.txt'
>>> git.status(porcelain='2')
'? new_file.txt'
>>> git.status(C=git_local_clone.dir / '.git', porcelain='2')
'? new_file.txt'
"""
local_flags: list[str] = []
if verbose is True:
local_flags.append("--verbose")
if long is True:
local_flags.append("--long")
if short is True:
local_flags.append("--short")
if branch is True:
local_flags.append("--branch")
if z is True:
local_flags.append("--z")
if untracked_files is not None and isinstance(untracked_files, str):
local_flags.append(f"--untracked-files={untracked_files}")
if ignored is not None and isinstance(column, str):
local_flags.append(f"--ignored={ignored}")
if ignored_submodules is not None:
if isinstance(column, str):
local_flags.append(f"--ignored-submodules={ignored_submodules}")
else:
local_flags.append("--ignored-submodules")
if column is not None:
if isinstance(column, str):
local_flags.append(f"--column={column}")
else:
local_flags.append("--column")
elif no_column is not None:
local_flags.append("--no-column")
if porcelain is not None:
if isinstance(porcelain, str):
local_flags.append(f"--porcelain={porcelain}")
else:
local_flags.append("--porcelain")
if find_renames is True:
if isinstance(find_renames, str):
local_flags.append(f"--find-renames={find_renames}")
else:
local_flags.append("--find-renames")
if pathspec is not None:
if not isinstance(pathspec, list):
pathspec = [pathspec]
else:
pathspec = []
return self.run(
["status", *local_flags, *(["--", *pathspec] if len(pathspec) else [])],
check_returncode=False,
)
def config(
self,
*,
replace_all: Optional[bool] = None,
get: Optional[str] = None,
get_all: Optional[bool] = None,
get_regexp: Optional[str] = None,
get_urlmatch: Optional[tuple[str, str]] = None,
system: Optional[bool] = None,
local: Optional[bool] = None,
worktree: Optional[bool] = None,
file: Optional[StrOrBytesPath] = None,
blob: Optional[str] = None,
remove_section: Optional[bool] = None,
rename_section: Optional[bool] = None,
unset: Optional[bool] = None,
unset_all: Optional[bool] = None,
_list: Optional[bool] = None,
fixed_value: Optional[bool] = None,
no_type: Optional[bool] = None,
null: Optional[bool] = None,
name_only: Optional[bool] = None,
show_origin: Optional[bool] = None,
show_scope: Optional[bool] = None,
get_color: Optional[Union[str, bool]] = None,
get_colorbool: Optional[Union[str, bool]] = None,
default: Optional[str] = None,
_type: Optional[
Literal["bool", "int", "bool-or-int", "path", "expiry-date", "color"]
] = None,
edit: Optional[bool] = None,
no_includes: Optional[bool] = None,
includes: Optional[bool] = None,
add: Optional[bool] = None,
**kwargs,
):
"""Status of working tree. Wraps
`git status <https://git-scm.com/docs/git-status>`_.
`git ls-files` has similar params (e.g. `z`)
Parameters
----------
replace_all : Optional[bool]
get : Optional[bool]
get_all : Optional[bool]
get_regexp : Optional[bool]
get_urlmatch : Optional[tuple[str, str]]
system : Optional[bool]
local : Optional[bool]
worktree : Optional[bool]
file : Optional[StrOrBytesPath]
blob : Optional[str]
remove_section : Optional[bool]
rename_section : Optional[bool]
unset : Optional[bool]
unset_all : Optional[bool]
_list : Optional[bool]
fixed_value : Optional[bool]
no_type : Optional[bool]
null : Optional[bool]
name_only : Optional[bool]
show_origin : Optional[bool]
show_scope : Optional[bool]
get_color : Optional[Union[str, bool]]
get_colorbool : Optional[Union[str, bool]]
default : Optional[str]
_type : "bool", "int", "bool-or-int", "path", "expiry-date", "color"
edit : Optional[bool]
no_includes : Optional[bool]
includes : Optional[bool]
add : Optional[bool]
Examples
--------
>>> git = Git(dir=git_local_clone.dir)
>>> git.config()
'usage: git config ...'
>>> git.config(_list=True)
'...user.email=...'
>>> git.config(get='color.diff')
'auto'
"""
local_flags: list[str] = []
if replace_all is True:
local_flags.append("--replace-all")
if get is not None and isinstance(get, str):
local_flags.extend(["--get", get])
if get_regexp is not None and isinstance(get_regexp, str):
local_flags.extend(["--get-regexp", get_regexp])
if get_all is not None and isinstance(get_all, str):
local_flags.extend(["--get-all", get_all])
if get_urlmatch is not None and isinstance(get_urlmatch, tuple):
local_flags.extend(["--get-urlmatch=", *get_urlmatch])
if unset is not None and isinstance(unset, str):
local_flags.extend(["--unset", unset])
if unset_all is not None and isinstance(unset_all, str):
local_flags.extend(["--unset-all", unset_all])
if _list is True:
local_flags.append("--list")
if fixed_value is True:
local_flags.append("--fixed-value")
if no_type is True:
local_flags.append("--no-type")
if null is True:
local_flags.append("--null")
if name_only is True:
local_flags.append("--name-only")
if show_origin is True:
local_flags.append("--show-origin")
if show_scope is True:
local_flags.append("--show-scope")
if edit is True:
local_flags.append("--edit")
if system is True:
local_flags.append("--system")
if local is True:
local_flags.append("--local")
if worktree is True:
local_flags.append("--worktree")
if remove_section is True:
local_flags.append("--remove-section")
if rename_section is True:
local_flags.append("--rename-section")
if _type is not None and isinstance(_type, str):
local_flags.extend(["--type", _type])
if blob is not None and isinstance(blob, str):
local_flags.extend(["--blob", blob])
if file is not None:
local_flags.extend(["--file", str(file)])
if default is True:
local_flags.append("--default")
if includes is True:
local_flags.append("--includes")
if no_includes is True:
local_flags.append("--no-includes")
if add is True:
local_flags.append("--add")
if get_colorbool is not None:
if isinstance(get_colorbool, str):
local_flags.extend(["--get-colorbool", get_colorbool])
else:
local_flags.append("--get-colorbool")
if get_color is not None:
if isinstance(get_color, str):
local_flags.extend(["--get-color", get_color])
else:
local_flags.append("--get-color")
return self.run(
["config", *local_flags],
check_returncode=False,
)
|
from CAMOnion.database.tables import *
from CAMOnion.core.math_tools import rotate_point
from CAMOnion.engine import face, slot, drill
import os
code_engines = {
'face_rough': face.face_rough,
'face_finish': face.face_finish,
'slot_rough': slot.slot_rough,
'slot_finish': slot.slot_finish,
'drill': drill.drill,
}
class CodeBuilder:
def __init__(self, controller):
self.controller = controller
self.operations = self.controller.current_camo_file.operations
self.session = self.controller.session
self.setup = self.operations[0].part_feature.setup
self.machine = self.session.query(Machine).filter(
Machine.id == self.operations[0].part_feature.setup.machine_id).one()
self.origin = self.setup.origin
self.header_comment = os.path.basename(self.controller.current_camo_file.filename)
self.program_number = self.operations[0].part_feature.setup.program_number
self.dxf_entities = self.controller.main_window.all_dxf_entities
self.tool_set = set()
self.tool_list = []
self.code = []
self.current_tool = None
self.next_tool = None
self.get_tool_list()
self.process_operations()
def process_operations(self):
for i, op in enumerate(self.operations):
print('op')
# if int(op.base_operation.speed) > int(op.part_feature.setup.machine.max_rpm):
# op.base_operation.feed = float(op.base_operation.feed) * (
# int(op.part_feature.setup.machine.max_rpm) / int(op.base_operation.speed))
# op.base_operation.speed = int(op.part_feature.setup.machine.max_rpm)
op.machine = self.machine
op.points = self.get_operation_points(op)
op.cutting_code = code_engines[op.base_operation.camo_op.function](op)
op.start_code = self.get_start_code(op, i)
op.end_code = self.get_end_of_tool_code()
def get_start_code(self, op, i):
code = []
spindle = get_spindle(op)
x, y = set_start_location(op)
if self.current_tool != op.base_operation.tool.tool_number:
self.get_tool_start_code(code, op, spindle, x, y)
else:
self.get_op_start_code(code, op, spindle)
self.set_current_tool(op, i)
return ''.join(code)
def set_current_tool(self, op, i):
self.current_tool = op.base_operation.tool.tool_number
if i < len(self.operations) - 1:
self.next_tool = self.operations[i + 1].base_operation.tool.tool_number
if i == len(self.operations) - 1:
self.next_tool = self.operations[0].base_operation.tool.tool_number
def get_tool_start_code(self, code, op, spindle, x, y):
code.append(self.machine.tool_start.format
(tool_name=f"{op.base_operation.tool.name}",
work_offset=op.part_feature.setup.origin.wfo_num + 54,
x=round(x, 4),
y=round(y, 4),
spindle=spindle,
tool_number=op.base_operation.tool.tool_number,
clearance=op.part_feature.setup.clearance_plane,
next_tool=self.next_tool,
coolant='M8'))
def get_op_start_code(self, code, op, spindle):
code.append(self.machine.op_start.format
(x=round(op.points[0][0], 4),
y=round(op.points[0][1], 4),
spindle=spindle,
clearance=op.part_feature.setup.clearance_plane))
def get_end_of_tool_code(self):
code = []
if self.next_tool and self.next_tool != self.current_tool:
code.append(self.machine.tool_end)
if len(code) > 0:
return ''.join(code)
else:
return ''
def get_tool_list(self):
self.tool_list = list(set([op.base_operation.tool for op in self.operations]))
self.tool_list.sort(key=lambda t: t.tool_number)
string_builder = ''
for tool in self.tool_list:
string_builder += f'(T{tool.tool_number} - {tool.name})\n'
self.tool_list = string_builder[:-1]
def get_operation_points(self, op):
points = []
if op.base_operation.feature.feature_type.id == 3: # facing
points = self.get_face_points()
elif op.base_operation.feature.feature_type.id == 1: # drilling
points = self.get_drill_points(op)
elif op.base_operation.feature.feature_type.id == 2: # slotting
points = self.get_slot_points(op)
return points
def post(self):
self.code = []
self.get_program_start_code()
self.get_code_body()
self.get_program_end_code()
for i, line in enumerate(self.code):
print(line)
if line == '':
self.code.pop(i)
return '\n'.join(self.code)
def get_program_start_code(self):
self.code.append(self.machine.program_start.format
(program_number=self.program_number,
program_comment=self.header_comment.upper(),
tool_list=self.tool_list,
machine_name=self.machine.name.upper()))
def get_code_body(self):
for op in self.operations:
self.code.append(op.start_code)
self.code.append(op.cutting_code)
self.code.append(op.end_code)
def get_program_end_code(self):
self.code.append(self.machine.program_end)
def get_slot_points(self, op):
op_geo = [self.dxf_entities[entity] for entity in op.part_feature.geometry]
points = all_slot_points_from_lines(op_geo)
rotated_lines = []
for line in points:
rotated_line_points = []
for point in line:
rotated_line_points.append(self.translate_point(point))
rotated_lines.append((rotated_line_points[0], rotated_line_points[1]))
return rotated_lines
def get_drill_points(self, op):
op_geo = [self.dxf_entities[entity] for entity in op.part_feature.geometry]
points = [self.translate_point((round(entity.dxf.center.x, 4), round(entity.dxf.center.y, 4))) for entity in
op_geo]
return zig_zag(points)
def get_face_points(self):
all_lines = [self.dxf_entities[entity] for entity in self.dxf_entities if
self.dxf_entities[entity].DXFTYPE == 'LINE']
all_points = all_points_from_lines(all_lines)
x_left = min(map(lambda point: point[0], all_points))
x_right = max(map(lambda point: point[0], all_points))
y_top = max(map(lambda point: point[1], all_points))
y_bottom = min(map(lambda point: point[1], all_points))
points = [self.translate_point((x_left, y_top)), self.translate_point((x_right, y_bottom))]
return points
def translate_point(self, point):
x, y = point
shifted_point = (x - self.origin.x, y - self.origin.y)
rotated_point = rotate_point(shifted_point, -self.origin.angle)
return rotated_point
def all_points_from_lines(lines):
points = []
for line in lines:
points.append(line.dxf.start)
points.append(line.dxf.end)
return points
def all_slot_points_from_lines(lines):
points = []
for line in lines:
points.append(((round(line.dxf.start[0], 4), round(line.dxf.start[1], 4)),
(round(line.dxf.end[0], 4), round(line.dxf.end[1], 4))))
return points
def get_spindle(op):
if op.base_operation.camo_op.op_type == 'Tap':
return ''
else:
return f"M3 S{int(op.base_operation.fixed_speed(op.part_feature.setup.machine.max_rpm))}"
def set_start_location(op):
function = op.base_operation.camo_op.function
if 'slot' in function:
return op.points[0][0][0], op.points[0][0][1]
elif 'drill' in function or 'tap' in function:
return op.points[0][0], op.points[0][1]
elif 'face' in function:
return op.tool_path[0][0], op.tool_path[0][1]
def zig_zag(points): # "set" gets unique values, "map" is for using function on list
values = sorted(list(set(map(lambda x: x[0], points))))
new_points = []
rev = False
for v in values:
li = [x for x in points if x[0] == v]
new_points += sorted(li, key=lambda tup: tup[1], reverse=rev)
rev = not rev
return new_points
|
<reponame>pranshu30/Azure-DevOps<filename>code/training/train.py
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pickle
from azureml.core import Workspace
from azureml.core.run import Run
import os
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Ridge
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import numpy as np
import json
import subprocess
from typing import Tuple, List
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
# run_history_name = 'devops-ai'
# os.makedirs('./outputs', exist_ok=True)
# #ws.get_details()
# Start recording results to AML
# run = Run.start_logging(workspace = ws, history_name = run_history_name)
run = Run.get_submitted_run()
X, y = load_diabetes(return_X_y=True)
columns = ["age", "gender", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
data = {"train": {"X": X_train, "y": y_train}, "test": {"X": X_test, "y": y_test}}
print("Running train.py")
def GridSearch(reg,parameters,train_X,train_y,test_X,test_y):
model = GridSearchCV(reg, parameters)
model.fit(train_X,train_y)
mse = mean_squared_error(model.predict(test_X),test_y)
return model, mse
#Ridge Regression
reg_ridge = Ridge()
parameters_ridge = {'alpha':np.arange(0.01, 1.0, 0.05)}
model_ridge,mse_ridge = GridSearch(reg_ridge, parameters_ridge, data["train"]["X"], data["train"]["y"],data["test"]["X"], data["test"]["y"])
#Support Vector Regression
reg_svr = SVR()
parameters_svr = {'kernel':['linear','poly','rbf']}
model_SVR,mse_SVR = GridSearch(reg_svr, parameters_svr,data["train"]["X"], data["train"]["y"],data["test"]["X"], data["test"]["y"])
#Random Forest Regression
reg_rfr = RandomForestRegressor()
parameters_rfr = {'max_depth': [10, 20, 30, 50],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10]}
model_rfr,mse_rfr = GridSearch(reg_rfr, parameters_rfr,data["train"]["X"], data["train"]["y"],data["test"]["X"], data["test"]["y"])
#Check which model is better and save the best model
models = [model_ridge,model_SVR,model_rfr]
mse = [mse_ridge,mse_SVR,mse_rfr]
best_mse = min(mse)
best_model = models[np.argmin(mse)]
run.log("mse", best_mse)
# Save model as part of the run history
model_name = "sklearn_regression_model.pkl"
# model_name = "."
with open(model_name, "wb") as file:
joblib.dump(value=best_model, filename=model_name)
# upload the model file explicitly into artifacts
run.upload_file(name="./outputs/" + model_name, path_or_stream=model_name)
print("Uploaded the model {} to experiment {}".format(model_name, run.experiment.name))
dirpath = os.getcwd()
print(dirpath)
# register the model
# run.log_model(file_name = model_name)
# print('Registered the model {} to run history {}'.format(model_name, run.history.name))
print("Following files are uploaded ")
print(run.get_file_names())
run.complete()
|
<filename>edl/taxon.py
import re
import logging
import numpy as np
logger = logging.getLogger(__name__)
##############
# Classes #
##############
class Taxonomy:
"""
A container for taxonomy data: contains two maps: id to Node and name
to Node
"""
def __init__(self, idMap, nameMap, realNameMap, path=None, rootNode=None):
self.idMap = idMap
self.nameMap = nameMap
self.realNameMap = realNameMap
self.path = path
if rootNode is None:
rootNode = next(iter(idMap.values())).getRootNode()
self.root = rootNode
def __str__(self):
return "Taxonomy with %d nodes" % (len(self.idMap))
def __repr__(self):
return "Taxonomy(%s)" % (self.path)
class TaxNode:
"""
A node in a pylogenetic tree. This has a parent and many children
"""
domains = ["Bacteria", "Archaea", "Eukaryota", "Viruses", "Viroids"]
namedNodes = {}
def __init__(self, taxid, parentid, rank):
"""
"""
self.id = taxid
self.taxid = taxid
self.parentid = parentid
self.rank = rank
self.parent = None
self.children = []
self.name = ""
self.translation = None
self.lineage = None
self.lineage_strings = {}
def __hash__(self):
return hash(self.getLineageString(';'))
def __repr__(self):
return "TaxNode(%s,%s,%s)" % (
repr(self.id), repr(self.parentid), repr(self.rank))
def __key__(self):
return self.getLineageString(';')
def __lt__(self, other):
return self.__key__() < other.__key__()
def __eq__(self, other):
return self.__key__() == other.__key__() if isinstance(
other, self.__class__) else False
def __str__(self):
if self.name == "":
return str(self.taxid)
else:
if (self.isNameGeneric()):
if self is not self.parent:
return "%s(%s)" % (self.name, str(self.parent))
return self.name
def setParent(self, parent):
self.parent = parent
if parent is not None:
parent.children.append(self)
def isAncestorOf(self, node):
"""
Return true if node contained within this node
"""
return node is not None and self in node.getLineage()
def getLCA(self, node):
"""
Given another node in the same tree, find the most recent (lowest)
common ancestor.
Node will cache their lineages for faster retrieval
"""
if self in node.getLineage():
if logger.getEffectiveLevel() <= logging.DEBUG:
lineageString = ""
for n in node.getLineage():
lineageString += str(n) + ','
logger.debug("%s found in [%s]" % (str(self), lineageString))
return self
else:
if self.parent is None or self.parent is self:
return self
return self.parent.getLCA(node)
def transmogrify(self, rank, taxList):
"""
Given a rank string, and a list of taxa, return a single
taxon name by the following rules:
1) if taxon or an ancestor matches a name in taxList, return
matching name.
2) if taxon or an ancestor has the indicated rank, return
matching taxon name.
3) if taxon or a parent matches list of domains, return the domain
4) print a warning and return None
"""
if self.translation is None:
if self.name in taxList:
self.translation = self.name
logger.debug("%s in tax list" % (self.name))
return self.translation
if self.parent is None or self.parent is self:
self.translation = self.name
logger.debug("map to self %s" % (self.name))
return self.translation
parentTranslation = self.parent.transmogrify(rank, taxList)
if self.rank == rank and parentTranslation not in taxList:
self.translation = self.name
logger.debug("%s is rank %s" % (self.name, rank))
return self.translation
self.translation = parentTranslation
logger.debug(
"%s is using parent's translation: %s" %
(self.name, self.translation))
else:
logger.debug(
"%s already translated to %s" %
(self.name, self.translation))
return self.translation
def getAncestorClosestToRank(self, rank, **kwargs):
return getAncestorClosestToRank(self, rank, **kwargs)
def getAncestorAtRank(self, rank):
"""
return the TaxNode ancestor of this node which has the given rank.
Return None if no suitable ancestor found.
"""
if self.rank == rank:
return self
else:
if self.parent is None:
return None
if self.parent is self:
return None
return self.parent.getAncestorAtRank(rank)
def getRootNode(self):
"""
go up through parents til we get to the root
"""
logger.info("getting root node through %s" % (self.name))
if self.parent is not None and self.parent is not self:
return self.parent.getRootNode()
else:
return self
def isNameGeneric(self):
name = spaceRE.sub(',', self.name)
if name[0:10] == 'uncultured':
return True
if name[0:13] == 'environmental':
return True
if metagenomeRE.search(name):
return True
if name[0:12] == 'endosymbiont':
return True
return False
def getCollapsedCounts(self, counts, cutoff, hitTranslations):
"""
walk the tree and fill hitTranslation hash with map from nodes
under cutoff to node where counts should be aggregated
"""
count = 0
countedNodes = [self]
if self in counts:
# use value in counts if it's there
count = counts[self]
logger.debug("%s has %d hits" % (self, count))
# add sum of children (those under cutoff)
for child in self.children:
if child is self:
logger.warn(
"node %s(%s) is child of itself!" %
(repr(self), str(self)))
continue
(kidCount, kidsCountedNodes) = child.getCollapsedCounts(
counts, cutoff, hitTranslations)
if kidCount is not None:
logging.debug(
"Adding %d to %s from %s" %
(kidCount, child, self))
count += kidCount
countedNodes.extend(kidsCountedNodes)
# if this node has a generic name, defer to parent
if self.isNameGeneric():
logger.debug(
"%s is too generic, giving %d hits to parent: %s" %
(self.name, count, self.parent.name))
return (count, countedNodes)
# if this node is over cutoff, add to counts
if count >= cutoff:
logger.info(
"keeping %d hits in %s (from %d nodes)" %
(count, str(self), len(countedNodes)))
name = self.name
for node in countedNodes:
hitTranslations[node] = self
return (None, [])
else:
# otherwise return count for parent to use
logger.debug("Passing %d hits to parent from %s" % (count, self))
return (count, countedNodes)
def getLineageString(self, sep):
if sep not in self.lineage_strings:
if self.parent is None or self.parent is self:
self.lineage_strings[sep] = self.name
else:
self.lineage_strings[sep] = \
sep.join((self.parent.getLineageString(sep),
self.name))
return self.lineage_strings[sep]
def getLineage(self):
"""
return a list of all the nodes in this node's ancestry
"""
if self.lineage is None:
if self.parent is None or self.parent is self:
self.lineage = tuple([self, ])
else:
lineage = list(self.parent.getLineage())
lineage.append(self)
self.lineage = tuple(lineage)
return self.lineage
def compareRanks(self, comparisons):
for kid in self.children:
if kid is self or kid is None:
continue
if self.rank is not None and self.rank.strip() != "no rank":
kid.compareRank(self.rank, comparisons)
kid.compareRanks(comparisons)
def compareRank(self, ancestorRank, comparisons):
if self.rank is not None and self.rank.strip() != "no rank":
compKey = (ancestorRank, self.rank)
comparisons[compKey] = comparisons.get(compKey, 0) + 1
for kid in self.children:
if kid is self or kid is None:
continue
kid.compareRank(ancestorRank, comparisons)
def generateMemberTaxids(node):
for child in node.children:
for taxid in generateMemberTaxids(child):
yield taxid
yield node.id
@staticmethod
def getNamedNode(name):
if name not in TaxNode.namedNodes:
node = TaxNode(name, None, None)
TaxNode.namedNodes[name] = node
return TaxNode.namedNodes[name]
@staticmethod
def addToTreeFromString(taxString, tree={}):
if 'root' not in tree:
if len(tree) > 0:
raise Error('tree must have root node!')
root = TaxNode('root', None, None)
root.name = root.id
tree['root'] = root
lineage = scRE.split(taxString)
logger.debug("parsing %s: %s" % (taxString, str(lineage)))
lastNode = tree['root']
for taxon in lineage:
taxon = taxon.strip()
taxon = removeSpaces(taxon)
if (taxon in tree) and (tree[taxon].parent is lastNode):
lastNode = tree[taxon]
else:
newNode = TaxNode(taxon, lastNode.id, None)
newNode.name = newNode.id
newNode.setParent(lastNode)
tree[taxon] = newNode
lastNode = newNode
return lastNode
################
# compiled REs #
################
cladeRE = re.compile(r'clade')
parensRE = re.compile(r'\([^\(\)]+\)')
lastSemicolonRE = re.compile(r'^.*;([^;]+)$')
spaceRE = re.compile(r'\s')
dotRE = re.compile(r'\.')
scRE = re.compile(r';+')
metagenomeRE = re.compile(r'metagenome')
#############
# Functions #
#############
# this is a list (in order) of the ranks in the ncbi tax dump
ranks = [
'forma',
'varietas',
'subspecies',
'species',
'species subgroup',
'species group',
'subgenus',
'genus',
'subtribe',
'tribe',
'subfamily',
'family',
'superfamily',
'parvorder',
'infraorder',
'suborder',
'cohort',
'order',
'superorder',
'infraclass',
'subclass',
'class',
'superclass',
'subphylum',
'phylum',
'superphylum',
'subkingdom',
'kingdom',
'superkingdom']
# The next few things were an attempt to automatically determine the order
# of the ranks from the taxonomic tree.
"""
sortKey={}
def getSortKey(rank):
return sortKey.get(rank,len(rank))
"""
comparisons = {}
def compareRanks(r1, r2):
r1anc = comparisons.get((r1, r2), 0)
r2anc = comparisons.get((r2, r1), 0)
if r1anc > 0 and r2anc > 0:
logger.warn(
"ambiguos relationshp between %s and %s: (%d,%d)" %
(r1, r2, r1anc, r2anc))
if r1anc == 0 and r2anc == 0:
logger.warn(
"no information for %s and %s: (%d, %d)" %
(r1, r2, r1anc, r2anc))
return cmp(r1anc, r2anc)
def deduceRankOrder(taxMap):
import numpy as np
logger.info("figuring out ranks!")
root = next(iter(taxMap.values())).getRootNode()
# this generates a map of tuples to counts
# comparisons[(ranka,rankb)] == 4 means ranka was an ancestor to rankb 4
# times
global comparisons
comparisons = {}
root.compareRanks(comparisons)
logger.info(
"There are %d entries in the comparison map!" %
(len(comparisons)))
# get list of all ranks
ranks = []
for key in comparisons:
ranks.extend(key)
ranks = set(ranks)
logger.info("%d ranks: %s" % (len(ranks), ranks))
"""
# some testing
global sortKey
sortKey={}
aCounts={}
dCounts={}
for key in comparisons:
v = comparisons[key]
aCounts[key[0]]=aCounts.get(key[0],0) + v
dCounts[key[1]]=dCounts.get(key[1],0) + v
# just checking:
if (key[1],key[0]) in comparisons:
logger.warn("%s is an ancestor of %s %d times and "
"vice versa %d times!" %
(key[0],key[1],v,comparisons[(key[1],key[0])]))
for rank in ranks:
if rank not in aCounts:
if rank not in dCounts:
logger.warn("rank %s is not counted!" % (rank))
continue
sortKey[rank]=600.0
elif rank not in dCounts:
sortKey[rank]=-600.0
else:
sortKey[rank]=np.arctan(float(aCounts[rank])/dCounts[rank])
logger.info("key for %s is %s (%d/%d)" % (rank,str(sortKey[rank]),
aCounts.get(rank,1),dCounts.get(rank,0)))
"""
# sort ranks
ranks = sorted(ranks, cmp=compareRanks)
logger.info("sorted ranks: %s" % str(ranks))
return ranks
_taxonomies = {}
def readTaxonomy(taxDir, namesMap=False):
"""
read the names.dmp and nodes.dmp files in this directory and build a tree
return a map from taxon id to each node in the tree
"""
if taxDir in _taxonomies:
# if this taxonomy has already been parse, just re-use it
if not namesMap or len(_taxonomies[taxDir].nameMap) > 0:
return _taxonomies[taxDir]
logger.info("read nodes")
taxMap = {}
nameMap = {}
realNameMap = {}
# build tree from nodes.dmp
for line in open("%s/nodes.dmp" % (taxDir)):
cells = line.split(r'|')
taxid = int(cells[0].strip())
parentid = int(cells[1].strip())
rank = cells[2].strip()
node = TaxNode(taxid, parentid, rank)
taxMap[taxid] = node
logger.info("link nodes")
# link parents and children
for node in taxMap.values():
parent = None
try:
parent = taxMap[node.parentid]
except KeyError:
logger.warn(
"Can't find parent (%s) for %s" %
(node.parentid, node.id))
else:
node.setParent(parent)
logger.info("name nodes")
# name nodes from names.dmp
for line in open("%s/names.dmp" % (taxDir)):
cells = line.split(r'|')
taxid = int(cells[0].strip())
name = cells[2].strip()
name2 = cells[1].strip()
if name == "":
name = name2
name2 = None
quality = cells[3].strip()
if quality == "scientific name":
node = taxMap[taxid]
node.name = name
if namesMap:
realNameMap[simplifyString(name)] = node
elif namesMap:
node = taxMap[taxid]
if namesMap:
if name2 is None or name2 == name:
names = [name, ]
else:
names = [name, name2]
for name in names:
name = simplifyString(name)
mapnode = nameMap.get(name, 0)
if mapnode == 0:
# not in map, add it
nameMap[name] = node
elif mapnode is not None:
# already in map
if mapnode is not node:
# already in map with different taxon
lca = node.getLCA(mapnode)
nameMap[name] = lca
taxonomy = Taxonomy(taxMap, nameMap, realNameMap)
_taxonomies[taxDir] = taxonomy
return taxonomy
def simplifyString(string):
return dotRE.sub("", removeSpaces(string.lower()))
def removeSpaces(string):
return spaceRE.sub("", string)
nameTranslations = {'asaia lannensis': 'asaia lannaensis',
'uncultured haptophyte': 'haptophyta',
'acidisoma sibiricum': 'acidisoma sibirica'}
methodCount = {
'sp': 0,
'par': 0,
'map': 0,
'raw': 0,
'none': 0,
'pre': 0,
'sub': 0}
def getNodeFromHit(hit, nameMap, exhaustive=True):
"""
Use a number of tricks to map the organism name given by 'hit' to
a taxon object in 'nameMap'
"""
if hit is None:
return hit
# remove extra formatting
hit = simplifyString(hit)
# remove an initial '|'
if hit[0:1] == '|':
hit = hit[1:]
"""
First, try a simple look up:
nameMap contains all the synonyms in the NCBI tax dump
the synonyms and hit are lowercase have had all spaces remove
"""
try:
taxNode = nameMap[hit]
if logger.getEffectiveLevel() <= logging.DEBUG:
methodCount['raw'] += 1
return taxNode
except KeyError:
pass
# try to remove parens
m = parensRE.search(hit)
if m:
hit = parensRE.sub('', hit)
try:
taxNode = nameMap[hit]
if logger.getEffectiveLevel() <= logging.DEBUG:
methodCount['par'] += 1
return taxNode
except KeyError:
pass
# hard coded translations
if hit in nameTranslations:
hit = nameTranslations[hit]
try:
taxNode = nameMap[hit]
if logger.getEffectiveLevel() <= logging.DEBUG:
methodCount['map'] += 1
return taxNode
except KeyError:
pass
# replace clade with cluster
(newHit, count) = cladeRE.subn('cluster', hit)
if count > 0:
try:
taxNode = nameMap[newHit]
return taxNode
except KeyError:
pass
if exhaustive:
startingName = None
# look for name that starts with this complete hit
# or name that is found is start of hit
hitLen = len(hit)
for name in nameMap:
nameLen = len(name)
if hitLen < nameLen:
# except for cases like 'alteromonas sp.', ...
if hit[-2:] != 'sp':
# check to see if hit is substring of name
if name[0:hitLen] == hit:
logger.debug("%s changed to %s" % (hit, name))
if logger.getEffectiveLevel() <= logging.DEBUG:
methodCount['pre'] += 1
return nameMap[name]
else:
if hit[0:nameLen] == name:
if startingName is None:
startingName = name
else:
if nameLen > len(startingName):
startingName = name
else:
# there was no 'pre' match, so take longest 'sub' match
if startingName is not None:
logger.debug('%s changed to %s' % (hit, startingName))
if logger.getEffectiveLevel() <= logging.DEBUG:
methodCount['sub'] += 1
return nameMap[startingName]
logger.warn("Can't translate name: %s" % (hit))
if logger.getEffectiveLevel() <= logging.DEBUG:
methodCount['none'] += 1
return None
def getAncestorClosestToRank(node, rank, **kwargs):
"""
This is an attempt to get something close to the requested rank even when
the organism has no acnestral taxon with that exact rank
The named ranks on either side of our target are found
(eg prehaps kingdom and class if phylum is missing)
Then based on how many unranked items are in the lineage between these AND
the number of ranks skipped, interpolate which ancestral taxon is
closest to
the target rank
"""
# Set the fall back (aka default) to starting node unless set by caller
default = kwargs.pop('default', node)
# Set behavior in special case:
# if the first ranked ancestor is beyond the target rank
# then use the child of that ancestor (ie the previous on in the lineage)
# if this is set to False, just return the 'default'
useChildOfFirstRankedAncestor = kwargs.pop(
'useChildOfFirstRankedAncestor', True)
# This only works on TaxNode objects
if not isinstance(node, TaxNode):
logger.debug("Not a TaxNode (%r)" % (node))
return default
# Walk through the lineage and find ranked taxa as reference points
# Get all the ancestors/parents of this Taxon
lineage = list(node.getLineage())
lineage.reverse()
if rank == 'domain':
rank = 'superkingdom'
targetIndex = ranks.index(rank)
logger.debug("looking for rank: %s (%d)" % (rank, targetIndex))
lastIndex = -1
lastIndexedAnc = None
lastAnc = node
# For each ancestor/parent in this TaxNode's lineage
for anc in lineage:
try:
# If it has a rank, where is that rank in the heirarchy?
ancRankIndex = ranks.index(anc.rank)
logger.debug(
"rank of %s is %s(%d)" %
(anc, anc.rank, ancRankIndex))
except ValueError:
# hard coded special cases
# family:
# SAR11,SAR116,SAR324,SAR86,SAR406,SAR202,SUP05,SAR92,OMZ60,
if anc.id in [
54526,
62654,
131190,
62672,
62680,
648176,
655184,
745004,
744996]:
ancRankIndex = ranks.index('family')
else:
ancRankIndex = -1
if ancRankIndex >= 0:
# An exact match is easy, just return it
if ancRankIndex is targetIndex:
logger.debug("MATCH!")
return anc
elif ancRankIndex > targetIndex:
# if we've hit the next rank without hitting any other ranks:
if lastIndexedAnc is None:
# We've hit the a lower rank without hitting any other
# if this is an ancestor of the node itself and
# and it caller requests it, return previous ancestor
if anc is not node and useChildOfFirstRankedAncestor:
logger.debug("Take previous!")
return lastAnc
# Otherwise return the default
if useChildOfFirstRankedAncestor:
logger.debug("Node is already ranked. Won't do it.")
return default
# We've hit the next rank and there was a previous rank:
# try to interpolate
ancIndex = lineage.index(anc)
lastAncIndex = lineage.index(lastIndexedAnc)
logger.debug(
"Trying to interpolate between %d and %d based "
"on %d between %d and %d" % (ancIndex, lastAncIndex,
targetIndex, ancRankIndex,
lastIndex))
ranksSkipped = ancRankIndex - lastIndex
ancsSkipped = ancIndex - lastAncIndex
rankAdjustment = ancRankIndex - targetIndex
ancAdjustment = (
float(rankAdjustment) / ranksSkipped) * ancsSkipped
logger.debug("rolling back by %s" % (str(ancAdjustment)))
return lineage[int(np.floor(ancIndex - ancAdjustment))]
if ancRankIndex > -1:
lastIndex = ancRankIndex
lastIndexedAnc = anc
lastAnc = anc
logger.debug("Nothing found close to %s" % rank)
return default
############
# Tests
############
def test():
import sys
global myAssertEq, myAssertIs
from test import myAssertEq, myAssertIs
ndir = sys.argv[1]
if len(sys.argv) > 2:
loglevel = logging.DEBUG
else:
loglevel = logging.WARN
logging.basicConfig(stream=sys.stderr, level=loglevel)
test_root_node()
test_get_lineage()
test_collapse_counts()
ncbiTree = test_read_ncbi(ndir)
test_get_ancestor(ncbiTree)
test_transmogrify(ncbiTree)
def test_transmogrify(tree):
orgLists = []
orgLists.append(['Bacteria <prokaryote>', 'Archaea',
'Prochlorales', 'Rickettsiales', 'Eukaryota'])
orgLists.append(['Gammaproteobacteria',
'Alphaproteobacteria',
'Deltaproteobacteria'])
ranks = ['phylum', 'superkingdom', 'genus']
ids = [439493, 939841, 655186, 1046240, 333146]
answers = {439493:
{'phylum':
{str(orgLists[0]): 'Rickettsiales',
str(orgLists[1]): 'Alphaproteobacteria'},
'superkingdom':
{str(orgLists[0]): 'Rickettsiales',
str(orgLists[1]): 'Alphaproteobacteria'},
'genus':
{str(orgLists[0]): 'Rickettsiales',
str(orgLists[1]): 'Alphaproteobacteria'}},
939841:
{'phylum':
{str(orgLists[0]): 'Prochlorales',
str(orgLists[1]): 'Cyanobacteria'},
'superkingdom':
{str(orgLists[0]): 'Prochlorales',
str(orgLists[1]): 'Bacteria <prokaryote>'},
'genus':
{str(orgLists[0]): 'Prochlorales',
str(orgLists[1]): 'Prochlorococcus'}},
655186:
{'phylum':
{str(orgLists[0]): 'Bacteria <prokaryote>',
str(orgLists[1]): 'Gammaproteobacteria'},
'superkingdom':
{str(orgLists[0]): 'Bacteria <prokaryote>',
str(orgLists[1]): 'Gammaproteobacteria'},
'genus':
{str(orgLists[0]): 'Bacteria <prokaryote>',
str(orgLists[1]): 'Gammaproteobacteria'}},
1046240:
{'phylum':
{str(orgLists[0]): 'Bacteria <prokaryote>',
str(orgLists[1]): 'Gammaproteobacteria'},
'superkingdom':
{str(orgLists[0]): 'Bacteria <prokaryote>',
str(orgLists[1]): 'Gammaproteobacteria'},
'genus':
{str(orgLists[0]): 'Bacteria <prokaryote>',
str(orgLists[1]): 'Gammaproteobacteria'}},
333146:
{'phylum':
{str(orgLists[0]): 'Archaea',
str(orgLists[1]): 'Euryarchaeota'},
'superkingdom':
{str(orgLists[0]): 'Archaea',
str(orgLists[1]): 'Archaea'},
'genus':
{str(orgLists[0]): 'Archaea',
str(orgLists[1]): 'Ferroplasma'}}
}
for orgs in orgLists:
for rank in ranks:
for node in tree.values():
node.translation = None
for taxid in ids:
node = tree[taxid]
try:
assert node.transmogrify(rank, orgs) == answers[
taxid][rank][str(orgs)]
except AssertionError:
logging.warn(
"%d:%s at rank %s%s goes to: %s, not %s" %
(taxid, node.name, rank, str(orgs), node.transmogrify(
rank, orgs), answers[taxid][rank][
str(orgs)]))
def test_get_ancestor(tree):
answers = ((112233, 'order', 30483), (654321, 'order', 4892),
(1032926, 'phylum', 35493))
try:
for data in answers:
assert tree[data[0]].getAncestorAtRank(data[1]).id == data[2]
except AssertionError:
for data in answers:
node = tree[data[0]]
ance = node.getAncestorAtRank(data[1])
"Ancestor of %d:%s at rank %s is %d:%s, not %d:%s(%s)" % (
data[0], node.name, data[1], ance.id, ance.name,
data[2], tree[data[2]].name)
def test_collapse_counts():
from hits import countHits, translateHits
tree = {}
node1 = TaxNode.addToTreeFromString(
'Bacteria;Cyanobacteria;Prochlorococcus', tree)
node2 = TaxNode.addToTreeFromString(
'Bacteria;Gammaproteobacteria;SUP05', tree)
node3 = TaxNode.addToTreeFromString(
'Archaea;Thermoplasmata;Ferroplasma', tree)
hits = {
'1': node1,
'2': node1,
'3': node2,
'4': node3,
'5': node3,
'6': node3,
'7': node3,
'8': node3,
'9': node3}
(total, counts) = countHits(hits)
collapsedHits = {}
node1.getRootNode().getCollapsedCounts(counts, .3 * 9, collapsedHits)
translateHits(hits, collapsedHits)
(ctotal, collapsedCounts) = countHits(hits)
expectedCounts = {node3: 6, node1.parent.parent: 3}
try:
assert collapsedCounts == expectedCounts
except AssertionError:
print("total: %d, ctotal: %d" % (total, ctotal))
print(str(counts))
print(str(collapsedHits))
print(str(collapsedCounts))
print(str(expectedCounts))
raise AssertionError
def test_root_node():
tree = {}
node1 = TaxNode.addToTreeFromString(
'Bacteria;Cyanobacteria;Prochlorococcus', tree)
node2 = TaxNode.addToTreeFromString(
'Bacteria;Gammaproteobacteria;SUP05', tree)
node3 = TaxNode.addToTreeFromString(
'Archaea;Thermoplasmata;Ferroplasma', tree)
root = node1.getRootNode()
assert root.name == 'root'
assert root.parent is None or root.parent is root
def test_get_lineage():
tree = {}
lineage = 'Bacteria;Cyanobacteria;Prochlorococcus'
node1 = TaxNode.addToTreeFromString(lineage, tree)
try:
assert node1.getLineageString(';') == 'root;' + lineage
except AssertionError:
logging.warn(
"%s is not %s" %
(node1.getLineageString(';'), 'root;' + lineage))
raise AssertionError
lineage = 'Bacteria;Gammaproteobacteria;SUP05'
node2 = TaxNode.addToTreeFromString(lineage, tree)
try:
assert node2.getLineageString(';') == 'root;' + lineage
except AssertionError:
logging.warn(
"%s is not %s" %
(node2.getLineageString(';'), 'root;' + lineage))
raise AssertionError
def test_read_ncbi(ndir):
taxNames = True
taxonomy = readTaxonomy(ndir, taxNames)
taxIds = taxonomy.idMap
taxNames = taxonomy.nameMap
myAssertEq(len(taxIds), 783145)
myAssertEq(len(taxNames), 1101991)
# pick some random things to check
myAssertEq(taxIds[123456].name, 'Psammomoya choretroides')
myAssertIs(
taxNames[
simplifyString('Psammomoya choretroides')],
taxIds[123456])
myAssertIs(taxNames[simplifyString('Psammomoya choretroides '
'(F.Muell.) Diels & Loes.')],
taxIds[123456])
myAssertEq(taxIds[123499].parent.id, 50537)
return taxIds
def add_taxonomy_dir_argument(parser, defaults={}):
parser.add_argument(
"-n",
"--ncbiTaxDir",
dest="taxdir",
metavar="PATH",
default=defaults.get(
"taxdir",
None),
help="Directory with unpacked ncbi tax dump (specifically names.dmp "
"and nodes.dmp) and use to translate org names in desc, "
"otherwise try to find lineage info in desc. Default is: %s" %
(defaults.get("taxdir", None)))
if __name__ == '__main__':
test()
|
# Copyright (c) 2015-2018 The Botogram Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
import botogram.objects
def test_user_avatar(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 1,
"photos": [
[
{
"file_id": "aaaaaa",
"width": 50,
"height": 50,
"file_size": 128,
},
{
"file_id": "bbbbbb",
"width": 25,
"height": 25,
"file_size": 64,
},
],
],
},
},
})
# First of all, make sure the API wrapper is required to fetch avatars
user = botogram.objects.User({"id": 123, "first_name": "Bob"})
with pytest.raises(RuntimeError):
user.avatar # Access the avatar without an API wrapper
# Now use an API
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
# Be sure the avatar isn't loaded yet
assert not hasattr(user, "_avatar")
# Now fetch the avatar
avatar = user.avatar
assert avatar.file_id == "aaaaaa"
# And be sure it's cached
assert hasattr(user, "_avatar")
assert user._avatar == avatar
def test_user_avatar_with_no_photos(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 0,
"photos": [],
},
},
})
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
assert user.avatar is None
def test_user_avatar_history(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 3,
"photos": [
[
{
"file_id": "aaaaaa",
"width": 50,
"height": 50,
"file_size": 128,
},
],
[
{
"file_id": "bbbbbb",
"width": 50,
"height": 50,
"file_size": 128,
},
],
[
{
"file_id": "cccccc",
"width": 50,
"height": 50,
"file_size": 128,
},
],
],
},
},
})
# First of all, make sure the API wrapper is required to fetch avatars
user = botogram.objects.User({"id": 123, "first_name": "Bob"})
with pytest.raises(RuntimeError):
user.avatar_history() # Access the avatar without an API wrapper
# Now use an API
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
files = [avatar.file_id for avatar in user.avatar_history()]
assert files == ["aaaaaa", "bbbbbb", "cccccc"]
def test_user_avatar_history_multiple_requests(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
# This is the double of the avatars provided with this request
# This simulates if the user has more than 100 avatars
"total_count": 4,
"photos": [
[
{
"file_id": "aaaaaa",
"width": 50,
"height": 50,
"file_size": 128,
},
],
[
{
"file_id": "bbbbbb",
"width": 50,
"height": 50,
"file_size": 128,
},
],
],
},
},
})
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
files = [avatar.file_id for avatar in user.avatar_history()]
assert files == ["aaaaaa", "bbbbbb", "aaaaaa", "bbbbbb"]
def test_user_avatar_history_no_photos(api, mock_req):
mock_req({
"getUserProfilePhotos": {
"ok": True,
"result": {
"total_count": 0,
"photos": [],
},
},
})
user = botogram.objects.User({"id": 123, "first_name": "Bob"}, api)
assert user.avatar_history() == []
def test_photo_object():
# The Photo object is custom-made, so it's better to ensure all it's
# working as expected
data = [
{"file_id": "aaaaaa", "width": 10, "height": 10, "file_size": 48},
{"file_id": "aaaaaa", "width": 20, "height": 20, "file_size": 148},
{"file_id": "aaaaaa", "width": 30, "height": 30, "file_size": 248},
]
# Let's create a valid Photo object
photo = botogram.objects.Photo(data)
assert len(photo.sizes) == len(data)
assert photo.sizes[0].file_id == data[0]["file_id"]
assert photo.smallest.file_id == data[0]["file_id"]
assert photo.biggest.file_id == data[-1]["file_id"]
assert photo.biggest.file_id == photo.file_id
assert photo.serialize() == data
# Test if set_api is working
photo2 = botogram.objects.Photo(data, "testapi")
assert photo2._api == "testapi"
assert photo2.sizes[0]._api == "testapi"
photo2.set_api("anotherapi")
assert photo2._api == "anotherapi"
assert photo2.sizes[0]._api == "anotherapi"
# Empty PhotoSize not supported, sorry
with pytest.raises(ValueError):
botogram.objects.Photo([])
# The data provided must be a list
with pytest.raises(ValueError):
botogram.objects.Photo("I'm not a list (doh)")
# And the items inside a list must be PhotoSize
with pytest.raises(ValueError):
botogram.objects.Photo([{"This": "isn't", "a": "PhotoSize"}])
def test_user_name():
# Create a dummy User object
user = botogram.objects.User({"id": 123, "first_name": "John"})
# With only the first name
assert user.name == "John"
# Also with a last name
user.last_name = "Doe"
assert user.name == "<NAME>"
def test_chat_name():
# Create a dummy Chat object
chat = botogram.objects.Chat({"id": 123, "type": "",
"title": "Test", "first_name": "John"})
# With a title
assert chat.name == "Test"
# Without a title
chat.title = None
assert chat.name == "John"
# Without a title and with a last name
chat.last_name = "Doe"
assert chat.name == "<NAME>"
|
<gh_stars>0
import os
import sys
import unittest
import numpy
from os.path import join as pjn
import QENSmodels
# resolve path to reference_data
this_module_path = sys.modules[__name__].__file__
data_dir = pjn(os.path.dirname(this_module_path), 'reference_data')
class TestJumpTranslationalDiffusion(unittest.TestCase):
""" Tests functions related to QENSmodels Jump Translational Diffusion
model """
def test_size_hwhm_jump_translational_diffusion(self):
""" Test size of output of hwhmJumpTranslationalDiffusion
The output should contains 3 elements """
self.assertEqual(
len(QENSmodels.hwhmJumpTranslationalDiffusion(1.)), 3)
self.assertEqual(
len(QENSmodels.hwhmJumpTranslationalDiffusion([1., 2.])), 3)
def test_type_size_hwhm_jump_translational_diffusion_q_nb(self):
""" Tests type and size of outputs if input q is a number """
hwhm, eisf, qisf = QENSmodels.hwhmJumpTranslationalDiffusion(1.)
self.assertIsInstance(hwhm, numpy.ndarray)
self.assertIsInstance(eisf, numpy.ndarray)
self.assertIsInstance(qisf, numpy.ndarray)
self.assertEqual(eisf, 0.)
self.assertEqual(qisf, 1.)
def test_type_size_hwhm_jump_translational_diffusion_q_array(self):
""" Tests type and size of outputs if input q is an array """
q_input = [1., 2.]
hwhm, eisf, qisf = QENSmodels.hwhmJumpTranslationalDiffusion(q_input,
0.5,
1.5)
self.assertIsInstance(hwhm, numpy.ndarray)
self.assertIsInstance(eisf, numpy.ndarray)
self.assertIsInstance(qisf, numpy.ndarray)
numpy.testing.assert_array_almost_equal(hwhm, [0.2857143, 0.5])
self.assertSequenceEqual(eisf.tolist(), numpy.zeros(2).tolist())
self.assertSequenceEqual(qisf.tolist(), numpy.ones(2).tolist())
def test_raised_error_negative_coeffs(self):
""" test that an error is raised if D or resTime are negative
"""
# D = -1, resTime = 1
self.assertRaises(ValueError,
QENSmodels.hwhmJumpTranslationalDiffusion,
1,
-1, 1)
# D = 1, resTime = -1
self.assertRaises(ValueError,
QENSmodels.hwhmJumpTranslationalDiffusion,
1,
1, -1)
# D = -1, resTime = -1
self.assertRaises(ValueError,
QENSmodels.hwhmJumpTranslationalDiffusion,
1,
-1, -1)
def test_type_sqw_jump_translational_diffusion(self):
""" Test type of output """
# w, q are floats
self.assertIsInstance(QENSmodels.sqwJumpTranslationalDiffusion(1, 1),
numpy.ndarray)
# w, q are vectors
output = QENSmodels.sqwJumpTranslationalDiffusion([1, 2, 3],
[0.3, 0.4])
self.assertIsInstance(output, numpy.ndarray)
self.assertEqual(output.size, 6)
self.assertEqual(output.shape, (2, 3))
def test_raised_error_no_q_input(self):
""" test that an error is raised if no values of q are given as input
"""
self.assertRaises(TypeError,
QENSmodels.sqwJumpTranslationalDiffusion,
1)
def test_reference_data(self):
""" Test output values in comparison with reference data
(file in 'reference data' folder) """
# load reference data
ref_data = numpy.loadtxt(
pjn(data_dir, "jump_translational_diffusion_ref_data.dat"))
# generate data from current model
# for info: the parameters' values used for the reference data are
# specified in the README file in the 'reference data' folder
w = numpy.arange(-2, 2.01, 0.01)
q = 0.7
actual_data = numpy.column_stack(
[w, QENSmodels.sqwJumpTranslationalDiffusion(w, q,
scale=1,
center=0,
D=0.23,
resTime=1.25)])
# compare the 2 arrays
numpy.testing.assert_array_almost_equal(ref_data,
actual_data,
decimal=12)
if __name__ == '__main__':
unittest.main()
|
from django.db import models
from django.conf import settings
from easy_thumbnails.fields import ThumbnailerImageField
from easy_thumbnails.files import get_thumbnailer
BASE_TEMPLATE_PART = [
('HEAD', 'Head'),
('BODY', 'Body'),
('NAVBAR', 'Navbar'),
('IO_PREVIEW', 'Image objects preview'),
('IO_VIEW', 'Image objects view'),
('IO_GALARY', 'Image objects galary'),
('POSTS_PREVIEW', 'Posts preview'),
('POSTS_VIEW', 'Posts view'),
('PRICE_VIEW', 'Price view'),
('PRICE_PREVIEW', 'Price preview'),
]
ANALYTICAL_SERVICES = [
('YANDEX_METRIKA_COUNTER_ID', 'YANDEX_METRIKA_COUNTER_ID'),
('GOOGLE_ANALYTICS_GTAG_PROPERTY_ID', 'GOOGLE_ANALYTICS_GTAG_PROPERTY_ID'),
]
WEB_MASTER_SERVICES = [
('yandex-verification', 'yandex-verification'),
('google-site-verification', 'google-site-verification'),
]
class HTMLHeadLink(models.Model):
label = models.TextField(
verbose_name="Label"
, max_length=100
, null=False
, default="This link is needed for ..."
)
content = models.TextField(
verbose_name="Head link content"
, null=False
, default="This code snippet will be inserted into the head section of the base template"
, help_text="This code snippet will be inserted into the head section of the base template"
, unique=True
)
order = models.PositiveSmallIntegerField(
verbose_name="Sort order"
, null=False
, default=1
, help_text="Ordering in template"
)
def __str__(self):
return f'{self.label}'
class Meta:
verbose_name = 'Head link'
verbose_name_plural = 'Head links'
class HTMLHeadMeta(models.Model):
label = models.TextField(
verbose_name="Label"
, max_length=100
, null=False
, default="This meta is needed for ..."
)
content = models.TextField(
verbose_name="Head meta content"
, null=False
, default="This code snippet will be inserted into the head section of the base template"
, help_text="This code snippet will be inserted into the head section of the base template"
, unique=True
)
order = models.PositiveSmallIntegerField(
verbose_name="Sort order"
, null=False
, default=1
, help_text="Ordering in template"
)
def __str__(self):
return f'{self.label}'
class Meta:
verbose_name = 'Head meta'
verbose_name_plural = 'Head meta'
class HTMLBodyScript(models.Model):
label = models.TextField(
verbose_name="Label"
, max_length=100
, null=False
, default="This script is needed for ..."
)
content = models.TextField(
verbose_name="Body script content"
, null=False
, default="This code snippet will be inserted into the body section of the base template"
, help_text="This code snippet will be inserted into the body section of the base template"
, unique=True
)
order = models.PositiveSmallIntegerField(
verbose_name="Sort order"
, null=False
, default=1
, help_text="Ordering in template"
)
def __str__(self):
return f'{self.label}'
class Meta:
verbose_name = 'HTML скрипт (body)'
verbose_name_plural = 'HTML скрипты (body)'
class DjangoAnalyticalServices(models.Model):
service = models.CharField(
max_length=100
, null=False
, choices=ANALYTICAL_SERVICES
, default='YANDEX_METRIKA_COUNTER_ID'
, verbose_name="Service"
, unique=True
)
value = models.TextField(
max_length=100
, null=False
, default='80821312'
)
def __str__(self):
return f'{self.service} {self.value}'
class Meta:
verbose_name = 'Сервис аналитики'
verbose_name_plural = 'Сервисы аналитики'
class Favicon(models.Model):
favicon = ThumbnailerImageField(upload_to='favicon', blank=True)
label = models.TextField(
verbose_name="Label"
, max_length=100
, null=False
, default="This favicon is needed for ..."
)
tile_color = models.CharField(
verbose_name="TileColor for msapplication and macos"
, null=False
, default="#2b5797"
, max_length=100
)
app_name = models.CharField(
verbose_name="Application name"
, null=False
, default="My application"
, max_length=100
)
theme_color = models.CharField(
verbose_name="Theme color"
, null=False
, default="#ffffff"
, max_length=100
)
def get_absolute_url_size(self, size):
options = {'size': (size, size), 'crop': True}
thumb_url = get_thumbnailer(self.favicon).get_thumbnail(options).url
return thumb_url
def __str__(self):
return f'{self.label} {self.favicon}'
class Meta:
verbose_name = 'Значок сайта'
verbose_name_plural = 'Значки сайта'
class CSS(models.Model):
label = models.TextField(
verbose_name="Label"
, max_length=100
, null=False
, default="This css is needed for ..."
)
content = models.FileField(
upload_to='css'
, verbose_name="file css"
)
def __str__(self):
return f'{self.label} {self.content}'
@property
def get_absolute_url_property(self):
return f'{settings.MEDIA_URL}{self.content}'
def get_absolute_url(self):
return f'{settings.MEDIA_URL}{self.content}'
class Meta:
verbose_name = 'CSS файл'
verbose_name_plural = 'CSS файлы'
class Verifications(models.Model):
service = models.CharField(
max_length=100
, null=False
, choices=WEB_MASTER_SERVICES
, default='yandex-verification'
, verbose_name="Service"
, unique=True
)
value = models.TextField(
max_length=300
, null=False
, default='01a933e67426668b'
)
def __str__(self):
return f'{self.service} {self.value}'
class Meta:
verbose_name = 'Подтверждение сайта'
verbose_name_plural = 'Подтверждения сайта'
class BaseTemplateCacheTime(models.Model):
base_template_part = models.CharField(
max_length=100
, null=False
, choices=BASE_TEMPLATE_PART
, default='HEAD'
, verbose_name="Base template part (unique)"
, unique=True
, db_index=True
)
seconds = models.IntegerField(
verbose_name="Value of seconds cache"
)
def __str__(self):
return f'{self.base_template_part} {self.seconds} seconds'
class Meta:
verbose_name = 'CACHE-time'
verbose_name_plural = 'CACHE-time'
class Font(models.Model):
label = models.TextField(
verbose_name="Label"
, max_length=100
, null=False
, default="This font is ..."
)
content = models.FileField(
upload_to='fonts'
, verbose_name="file font"
)
def __str__(self):
return f'{self.label} {self.content}'
@property
def get_absolute_url_property(self):
return f'{settings.MEDIA_URL}{self.content}'
def get_absolute_url(self):
return f'{settings.MEDIA_URL}{self.content}'
class Meta:
verbose_name = 'Шрифт'
verbose_name_plural = 'Шрифты'
|
# #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
import json
from boto3.session import Session
import os
import zipfile
import tempfile
import botocore
from shared.logger import get_logger
from shared.helper import get_client
logger = get_logger(__name__)
# Get the AWS Orginization's setup of "CallAs"
# If delegated admin account is used, CALL_AS = "DELEGATED_ADMIN", else CALL_AS = "SELF").
call_as = os.environ.get("CALL_AS", "SELF")
def find_artifact(artifacts, name):
"""Finds the artifact 'name' among the 'artifacts'
Args:
artifacts: The list of artifacts available to the function
name: The artifact we wish to use
Returns:
The artifact dictionary found
Raises:
Exception: If no matching artifact is found
"""
for artifact in artifacts:
if artifact["name"] == name:
return artifact
raise Exception(f"Input artifact named {name} not found in lambda's event")
def get_template(s3_client, artifact, template_file_in_zip, params_file_in_zip):
"""Gets the template artifact
Downloads the artifact from the S3 artifact store to a temporary file
then extracts the zip and returns the file containing the CloudFormation
template and temaplate parameters.
Args:
s3_client: boto3 configured S3 client
artifact: The artifact to download
template_file_in_zip: The path to the file within the zip containing the template
params_file_in_zip: The path to the file within the zip containing the template parameters
Returns:
The (CloudFormation template as a string, template paramaters as json)
Raises:
Exception: Any exception thrown while downloading the artifact or unzipping it
"""
bucket = artifact["location"]["s3Location"]["bucketName"]
key = artifact["location"]["s3Location"]["objectKey"]
with tempfile.NamedTemporaryFile() as tmp_file:
s3_client.download_file(bucket, key, tmp_file.name)
with zipfile.ZipFile(tmp_file.name, "r") as zip:
template = zip.read(template_file_in_zip).decode()
params = zip.read(params_file_in_zip).decode()
return (template, params)
def update_stackset(stackset_name, template, parameters, org_ids, regions, cf_client):
"""Start a CloudFormation stack update
Args:
stackset_name: The stackset name to update
template: The template to apply
parameters: template parameters
org_ids: list of target org_ids
regions: list of target regions
cf_client: Boto3 CloudFormation client
Returns:
True if an update was started, false if there were no changes
to the template since the last update.
Raises:
Exception: Any exception besides "No updates are to be performed."
"""
try:
cf_client.update_stack_set(
StackSetName=stackset_name,
TemplateBody=template,
Parameters=parameters,
Capabilities=["CAPABILITY_NAMED_IAM"],
PermissionModel="SERVICE_MANAGED",
# If PermissionModel="SERVICE_MANAGED", "OrganizationalUnitIds" must be used
# If PermissionModel="SELF_MANAGED", "AccountIds" must be used
DeploymentTargets={"OrganizationalUnitIds": org_ids},
AutoDeployment={"Enabled": False},
Regions=regions,
CallAs=call_as,
)
return True
except botocore.exceptions.ClientError as e:
logger.error(f"Error updating CloudFormation StackSet {stackset_name}. Error message: {str(e)}")
raise e
def stackset_exists(stackset_name, cf_client):
"""Check if a stack exists or not
Args:
stackset_name: The stackset name to check
cf_client: Boto3 CloudFormation client
Returns:
True or False depending on whether the stack exists
Raises:
Any exceptions raised .describe_stack_set() besides that
the stackset doesn't exist.
"""
try:
logger.info(f"Checking if StackSet {stackset_name} exits.")
cf_client.describe_stack_set(StackSetName=stackset_name, CallAs=call_as)
return True
except Exception as e:
if f"{stackset_name} not found" in str(e) or f"{stackset_name} does not exist" in str(e):
logger.info(f"StackSet {stackset_name} does not exist.")
return False
else:
raise e
def create_stackset_and_instances(stackset_name, template, parameteres, org_ids, regions, cf_client):
"""Starts a new CloudFormation stackset and its instances creation
Args:
stackset_name: The stackset to be created
template: The template for the stackset to be created with
parameters: template parameters
org_ids: list of target org_ids
regions: list of target regions
cf_client: Boto3 CloudFormation client
Throws:
Exception: Any exception thrown by .create_stack_set() or .create_stack_instances()
"""
try:
logger.info(f"creating stackset {stackset_name}")
# create StackSet first
cf_client.create_stack_set(
StackSetName=stackset_name,
TemplateBody=template,
Parameters=parameteres,
Capabilities=["CAPABILITY_NAMED_IAM"],
PermissionModel="SERVICE_MANAGED",
AutoDeployment={"Enabled": False},
CallAs=call_as,
)
# Then create StackSet instances
logger.info(f"creating instances for {stackset_name} StckSet")
cf_client.create_stack_instances(
StackSetName=stackset_name,
DeploymentTargets={"OrganizationalUnitIds": org_ids},
Regions=regions,
CallAs=call_as,
)
except botocore.exceptions.ClientError as e:
logger.error(f"Error creating StackSet {stackset_name} and its inatances")
raise e
def get_stackset_instance_status(stackset_name, stack_instance_account_id, region, cf_client):
"""Get the status of an existing CloudFormation stackset's instance
Args:
stackset_name: The name of the stackset to check
stack_instance_account_id: the account id, where the stack instance is deployed
region: The region of the stackset's instance
cf_client: Boto3 CloudFormation client
Returns:
The CloudFormation status string of the stackset instance
('PENDING'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED'|'INOPERABLE')
Raises:
Exception: Any exception thrown by .describe_stack_instance()
"""
try:
logger.info(f"Checking the status of {stackset_name} instance")
stack_instance_description = cf_client.describe_stack_instance(
StackSetName=stackset_name,
StackInstanceAccount=stack_instance_account_id,
StackInstanceRegion=region,
CallAs=call_as,
)
# Status could be one of 'PENDING'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED'|'INOPERABLE'
return stack_instance_description["StackInstance"]["StackInstanceStatus"]["DetailedStatus"]
except botocore.exceptions.ClientError as e:
logger.error(
f"Error describing StackSet {stackset_name} instance in {region} for account {stack_instance_account_id}"
)
raise e
def put_job_success(job_id, message, cp_client):
"""Notify CodePipeline of a successful job
Args:
job_id: The CodePipeline job ID
message: A message to be logged relating to the job status
cp_client: Boto3 CodePipeline client
Raises:
Exception: Any exception thrown by .put_job_success_result()
"""
logger.info(f"Putting job success for jobId: {job_id} with message: {message}")
cp_client.put_job_success_result(jobId=job_id)
def put_job_failure(job_id, message, cp_client):
"""Notify CodePipeline of a failed job
Args:
job_id: The CodePipeline job ID
message: A message to be logged relating to the job status
cp_client: Boto3 CodePipeline client
Raises:
Exception: Any exception thrown by .put_job_failure_result()
"""
logger.info(f"Putting job failure for jobId: {job_id} with message: {message}")
cp_client.put_job_failure_result(jobId=job_id, failureDetails={"message": message, "type": "JobFailed"})
def put_job_continuation(job_id, message, cp_client):
"""Notify CodePipeline of a continuing job
This will cause CodePipeline to invoke the function again with the
supplied continuation token.
Args:
job_id: The JobID
message: A message to be logged relating to the job status
continuation_token: The continuation token
cp_client: Boto3 CodePipeline client
Raises:
Exception: Any exception thrown by .put_job_success_result()
"""
logger.info(f"Putting continuation token for jobId: {job_id} with message: {message}")
# This data will be available when a new job is scheduled to continue the current execution
continuation_token = json.dumps({"previous_job_id": job_id})
cp_client.put_job_success_result(jobId=job_id, continuationToken=continuation_token)
def start_stackset_update_or_create(
job_id, # NOSONAR:S107 this function is designed to take many arguments
stackset_name,
template,
parameteres,
stack_instance_account_ids,
org_ids,
regions,
cf_client,
cp_client,
):
"""Starts the stackset update or create process
If the stackset exists then update, otherwise create.
Args:
job_id: The ID of the CodePipeline job
stackset_name: The stackset to create or update
template: The template to create/update the stackset with
parameters: template parameters
stack_instance_account_ids: list of target account ids
org_ids: list of target org_ids
regions: list of target regions
cf_client: Boto3 CloudFormation client
cp_client: Boto3 CodePipeline client
"""
if stackset_exists(stackset_name, cf_client):
logger.info(f"Stackset {stackset_name} exists")
status = get_stackset_instance_status(stackset_name, stack_instance_account_ids[0], regions[0], cf_client)
# If the CloudFormation stackset instance is not in a 'SUCCEEDED' state, it can not be updated
if status != "SUCCEEDED":
# if the StackSet instance in a failed state, fail the job
put_job_failure(
job_id,
(
f"StackSet cannot be updated when status is: {status}. Delete the faild stackset/instance,"
" fix the issue, and retry."
),
cp_client,
)
return
# Update the StackSet and its instances
were_updates = update_stackset(stackset_name, template, parameteres, org_ids, regions, cf_client)
if were_updates:
# If there were updates then continue the job so it can monitor
# the progress of the update.
logger.info(f"Starting update for {stackset_name} StackSet")
put_job_continuation(job_id, "StackSet update started", cp_client)
else:
# If there were no updates then succeed the job immediately
logger.info(f"No updates for {stackset_name} StackSet")
put_job_success(job_id, "There were no StackSet updates", cp_client)
else:
# If the StackSet doesn't already exist then create it and its instances
create_stackset_and_instances(stackset_name, template, parameteres, org_ids, regions, cf_client)
logger.info(f"Creatiation of {stackset_name} StackSet and its instances started")
# Continue the job so the pipeline will wait for the StackSet and its instances to be created
put_job_continuation(job_id, "StackSet and its instances creatiation started", cp_client)
def check_stackset_update_status(job_id, stackset_name, stack_instance_account_id, region, cf_client, cp_client):
"""Monitor an already-running CloudFormation StackSet and its instance update/create
Succeeds, fails or continues the job depending on the stack status.
Args:
job_id: The CodePipeline job ID
stackset_name: The stackset to monitor
stack_instance_account_id: the account id
region: The region, where the StackSet's instance is deployed
cf_client: Boto3 CloudFormation client
cp_client: Boto3 CodePipeline client
"""
status = get_stackset_instance_status(stackset_name, stack_instance_account_id, region, cf_client)
if status == "SUCCEEDED":
# If the update/create finished successfully then
# succeed the job and don't continue.
put_job_success(job_id, "StackSet and its instance update complete", cp_client)
elif status in [
"RUNNING",
"PENDING",
]:
# If the job isn't finished yet then continue it
put_job_continuation(job_id, "StackSet update still in progress", cp_client)
else:
# The stackSet update/create has failed so end the job with
# a failed result.
put_job_failure(job_id, f"Update failed: {status}", cp_client)
def validate_user_params(decoded_params, list_of_required_params):
"""Validate user provided parameters via codepipline event
Raise an exception if one of the required parameters is missing.
Args:
decoded_params: json object of user parameters passed via codepipline's event
list_of_required_params: list of reqyured parameters
Raises:
Your UserParameters JSON must include <missing parameter's name>
"""
for param in list_of_required_params:
if param not in decoded_params:
raise Exception(f"Your UserParameters JSON must include {param}")
def get_user_params(job_data):
"""Decodes the JSON user parameters passed by codepipeline's event.
Args:
job_data: The job data structure containing the UserParameters string which should be a valid JSON structure
Returns:
The JSON parameters decoded as a dictionary.
Raises:
Exception: The JSON can't be decoded.
"""
required_params = [
"stackset_name",
"artifact",
"template_file",
"stage_params_file",
"accound_ids",
"org_ids",
"regions",
]
try:
# Get the user parameters which contain the stackset_name, artifact, template_name,
# stage_params, accound_ids, org_ids, and regions
user_parameters = job_data["actionConfiguration"]["configuration"]["UserParameters"]
decoded_parameters = json.loads(user_parameters)
except Exception as e:
# We're expecting the user parameters to be encoded as JSON
# so we can pass multiple values. If the JSON can't be decoded
# then fail the job with a helpful message.
raise Exception("UserParameters could not be decoded as JSON", e)
# Validate required params were provided
validate_user_params(
decoded_parameters,
required_params,
)
return decoded_parameters
def setup_s3_client(job_data):
"""Creates an S3 client
Uses the credentials passed in the event by CodePipeline. These
credentials can be used to access the artifact bucket.
Args:
job_data: The job data structure
Returns:
An S3 client with the appropriate credentials
"""
key_id = job_data["artifactCredentials"]["accessKeyId"]
key_secret = job_data["artifactCredentials"]["secretAccessKey"]
session_token = job_data["artifactCredentials"]["sessionToken"]
session = Session(aws_access_key_id=key_id, aws_secret_access_key=key_secret, aws_session_token=session_token)
return session.client("s3", config=botocore.client.Config(signature_version="s3v4"))
|
#!/usr/bin/python3
__author__ = 'ziyan.yin'
import asyncio
import logging
from typing import List
from nado_utils import cryptutils
import os
import pickle
from ._publisher import Publisher
BUF_SIZE = 1024
MAX_SIZE = 2**20 * 5
HOST = ''
PORT = 11210
data_format = {
'success': True,
'data': '',
'message': '',
'code': 0,
}
logger = logging.getLogger('MQ')
class ParamsError(Exception):
def __str__(self):
return '[10004]'
class OutOfBoundError(Exception):
def __str__(self):
return '[10005]out of bounds'
class MessageQueue:
def __init__(self, cwd='', maxsize=-1):
self._cursor = 0
self._maxsize = maxsize if maxsize > 0 else 0
self._cache = asyncio.Queue(maxsize=self._maxsize)
self._cwd = cwd
def __repr__(self):
return 'message_queue'
async def put(self, channel, data):
await self._cache.put((channel, data))
async def get(self):
channel, data = await self._cache.get()
return channel, data
async def next(self):
self._cursor += 1
def task_done(self):
return self._cache.task_done()
async def reload(self):
self._cursor = 0
def empty(self):
return self._cache.empty()
def qsize(self):
return self._cache.qsize()
class MessageWorker:
__slots__ = ['queue']
def __init__(self, cwd='', maxsize=-1):
self.queue = MessageQueue(cwd=cwd, maxsize=maxsize)
async def initial(self):
await self.queue.reload()
async def produce(self, channel, data):
await self.queue.put(channel, data)
logger.info(f'{channel} input')
class MessageConsumer:
__slots__ = ['publishers', 'queue']
def __init__(self, queue: MessageQueue, publishers: List[Publisher]):
self.queue = queue
self.publishers = publishers
def register(self, publisher: Publisher):
self.publishers.append(publisher)
async def consume(self):
async def _publish(task):
try:
await task.publish(data)
logger.info(f'{channel} publish to {task.__class__.__name__}')
except Exception as ex:
logger.error(
f'{channel}:'
f'[{ex.__class__.__name__}] {task.__class__.__name__} broken {ex}'
)
while True:
channel, data = await self.queue.get()
for publisher in self.publishers:
if publisher.contains_channel(channel):
asyncio.ensure_future(_publish(publisher))
await self.queue.next()
def __create_task(message):
command = pickle.loads(message)
if 'signature' not in command:
raise ParamsError()
else:
signature = command['signature']
del command['signature']
if signature != cryptutils.sha256(str(command) + 'NadoUnit'):
raise ParamsError()
if 'channel' in command and 'data' in command:
return command['channel'], command['data']
else:
raise ParamsError()
def __struct(data):
return memoryview(pickle.dumps(data, protocol=5))
def setup(
work_dir: str = '',
port: int = PORT,
publishers: List[Publisher] = None,
maxsize: int = -1,
consumers: int = -1
):
loop = asyncio.get_event_loop()
worker = MessageWorker(work_dir, maxsize)
loop.run_until_complete(worker.queue.reload())
consumers = consumers if consumers > 0 else min(32, (os.cpu_count() or 1) + 4)
for i in range(consumers):
logger.info(f'Consumer {i + 1} started')
consumer = MessageConsumer(worker.queue, publishers)
asyncio.ensure_future(consumer.consume())
async def handle(reader, writer):
try:
content_length = int((await reader.read(16)).removesuffix(b'\r\n\r\n'))
if content_length > MAX_SIZE:
raise OutOfBoundError()
message = await reader.read(content_length)
channel, data = __create_task(message)
await worker.produce(channel, data)
res = data_format.copy()
res['success'] = True
writer.write(__struct(res))
except (ParamsError, OutOfBoundError) as ex:
res = data_format.copy()
res['success'] = False
res['message'] = str(ex)
writer.write(__struct(res))
finally:
await writer.drain()
writer.close()
coro = asyncio.start_server(handle, '', port)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
logger.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
raise
|
# MIT License
#
# Copyright (c) 2020 <NAME> <tony[dot]wu(at)nyu[dot]edu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import csv
import logging
import os
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Set
import simplejson as json
from ..utils import JSONDict
class MappingExporter(ABC):
def __init__(self, output: Path, filename: str, escape: Callable[[str], str] = None):
self.output = output
self.filename = filename
self.ext = ''.join(Path(filename).suffixes)
self.escape = escape or (lambda s: s)
self.files = {}
self.logger = logging.getLogger('exporter')
self.opened = 0
@abstractmethod
def format(self, item: JSONDict):
return item
def get_file(self, item: JSONDict):
if self.opened > 200:
for f in self.files.values():
f.close()
self.opened = 0
filename = self.escape(self.filename % item)
if filename[-1] == '/':
filename = f'{filename}index{self.ext}'
if filename == '.':
filename = '-.'
if filename == '..':
filename = '-..'
path = self.output / filename
f, new = self.open_file(path)
return f, path, new
def open_file(self, path):
out = self.files.get(path)
is_newfile = out is None
if not out or out.closed:
os.makedirs(path.parent, exist_ok=True)
if is_newfile:
self.logger.info(f'New file {path}')
self.files[path] = out = open(path, 'a+')
self.opened += 1
return out, is_newfile
def write(self, item: JSONDict):
out, _, _ = self.get_file(item)
out.write(f'{self.format(item)}\n')
def close(self):
if not self.files:
self.logger.warning('Exported nothing!')
for f in self.files.values():
f.close()
def __enter__(self):
return self
def __exit__(self, typ, val=None, tb=None):
self.close()
if not typ:
return True
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
class MappingJSONExporter(MappingExporter):
def __init__(self, key: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = key
self.storage = {}
def format(self, item: JSONDict):
return super().format(item)
def write(self, item: JSONDict):
_, fn, _ = self.get_file(item)
s = self.storage.setdefault(fn, {})
s[item[self.key]] = item
def close(self):
for k in self.files:
f, _ = self.open_file(k)
json.dump(self.storage[k], f)
return super().close()
class MappingLineExporter(MappingExporter):
def __init__(self, key: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = key
def format(self, item):
return item[self.key]
class MappingCSVExporter(MappingExporter):
def __init__(self, fieldnames: Set[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.writers = {}
self.fieldnames = fieldnames
def format(self, item: JSONDict):
return super().format(item)
def get_file(self, item: JSONDict):
if len(self.writers) > 200:
for k in self.writers:
self.writers[k] = False
f, fn, new = super().get_file(item)
if not self.fieldnames:
self.fieldnames = tuple(item.keys())
writer = self.writers.get(fn)
if not writer or not new:
writer = self.writers[fn] = csv.DictWriter(f, self.fieldnames, extrasaction='ignore')
if new:
writer.writeheader()
return writer
def write(self, item: JSONDict):
self.get_file(item).writerow({**item})
|
from candidate import Candidate
from sqlalchemy import ForeignKey, Column, Integer, String
from sqlalchemy.orm import relationship, backref
from sqlalchemy.dialects import mysql
from sqlalchemy.schema import FetchedValue
from base import Base
import json
import datetime
from datetime import timedelta
import calendar
class Interview(Base):
__tablename__ = 'interview'
id = Column(Integer, primary_key = True)
interviewer_email = Column(String(50), ForeignKey('interviewer.email'))
start_time = Column(mysql.TIMESTAMP, nullable = False)
end_time = Column(mysql.TIMESTAMP, nullable = False)
candidate_name = Column(String(50), ForeignKey('candidate.candidate_name'), nullable = False)
phone_number_to_use = Column(String(10), nullable = False)
room = Column(String(50), nullable = False)
technical_score = Column(mysql.FLOAT(), nullable = True)
cultural_score = Column(mysql.FLOAT(), nullable = True)
notes = Column(mysql.TEXT, nullable = True)
number_of_pings = Column(mysql.TINYINT, nullable = False, default = 0)
hire = Column(mysql.TINYINT, nullable = False, default = -1)
# The following timestamp columns are only updated by triggers. They are effectively readonly for the model code.
# An update to these columns will be caught by a MySQL trigger and will not be performed.
technical_score_ts = Column(mysql.TIMESTAMP, server_default = FetchedValue(), server_onupdate = FetchedValue(for_update=True))
cultural_score_ts = Column(mysql.TIMESTAMP, server_default = FetchedValue(), server_onupdate = FetchedValue(for_update=True))
notes_ts = Column(mysql.TIMESTAMP, server_default = FetchedValue(), server_onupdate = FetchedValue(for_update=True))
def __init__(self, email, start_time, end_time, candidate_name, room):
self.interviewer_email = email
self.start_time = start_time
self.end_time = end_time
self.candidate_name = candidate_name
self.room = room
self.number_of_pings = 0
self.hire = -1
def __repr__(self):
return "<Interview<'%s'>" % self.candidate_name
def is_coffee_break(self):
return (self.end_time - self.start_time).total_seconds() <= self.candidate.department.maximum_coffee_break_length;
@staticmethod
def datetime_to_string(ts):
if ts is None:
return None
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+03d:%02d" % (ts.isoformat(), hh, mm)
def dict_representation(self, show_scores = False):
interviewer = self.interviewer.dict_representation()
if self.interviewer.avatar_url is not None:
interviewer['avatar_url'] = self.interviewer.avatar_url
interview_dict = {
'id' : self.id,
'interviewer' : interviewer,
'start_time' : Interview.datetime_to_string(self.start_time),
'end_time' : Interview.datetime_to_string(self.end_time),
'candidate_name' : self.candidate_name,
'room' : self.room,
'number_of_pings' : self.number_of_pings,
'is_coffee_break' : self.is_coffee_break(),
'technical_score_ts' : Interview.datetime_to_string(self.technical_score_ts),
'cultural_score_ts' : Interview.datetime_to_string(self.cultural_score_ts),
'notes_ts' : Interview.datetime_to_string(self.notes_ts)
}
if show_scores is True:
interview_dict['technical_score'] = self.technical_score
interview_dict['cultural_score'] = self.cultural_score
interview_dict['hire'] = self.hire
interview_dict['notes'] = self.notes
return interview_dict
def time_to_feedback(self):
time_to_feedback = timedelta.max
if self.cultural_score_ts is not None:
time_to_feedback = self.cultural_score_ts - (self.end_time + timedelta(minutes = 5))
if time_to_feedback < timedelta(seconds = 0):
time_to_feedback = 0
return time_to_feedback
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Copyright/License Notice (Modified BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2008-2012, 2014, <NAME> - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# * This Software is not to be used for safety purposes. #
# #
# * You agree and abide the Disclaimer for your Boltek products. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
###################################################
# StormForce XR (XMLRPC) Server #
###################################################
# Version: v0.5.0 #
###################################################
from twisted.web import xmlrpc
###########
# Classes #
###########
class Database():
def __init__(self, server, database, username, password, debug_mode):
from danlog import DanLog
from datetime import datetime
import psycopg2
self.datetime = datetime
self.log = DanLog("Database")
self.psycopg2 = psycopg2
self.DEBUG_MODE = debug_mode
self.POSTGRESQL_DATABASE = database
self.POSTGRESQL_PASSWORD = password
self.POSTGRESQL_SERVER = server
self.POSTGRESQL_USERNAME = username
def addColumnSQLString(self, table, column_name, column_type):
if self.DEBUG_MODE:
self.log.info("Starting...")
return """
DO $$
BEGIN
BEGIN
ALTER TABLE %s ADD COLUMN %s %s;
EXCEPTION WHEN duplicate_column THEN
-- Nothing
END;
END
$$
""" % (table, column_name, column_type)
def createTableSQLString(self, table):
if self.DEBUG_MODE:
self.log.info("Starting...")
return """
DO $$
BEGIN
BEGIN
CREATE TABLE %s(ID bigserial PRIMARY KEY);
EXCEPTION WHEN duplicate_table THEN
-- Nothing
END;
END
$$
""" % table
def createIndexSQLString(self, name, table, columns, conn = []):
if self.DEBUG_MODE:
self.log.info("Starting...")
return """
DO $$
BEGIN
IF NOT EXISTS (
SELECT c.relname
FROM pg_class c
INNER JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = lower('%s') AND n.nspname = 'public' AND c.relkind = 'i'
) THEN
CREATE INDEX %s ON %s (%s);
END IF;
END
$$
""" % (name, name, table, columns)
def connectToDatabase(self, conn = []):
if self.DEBUG_MODE:
self.log.info("Starting...")
newconn = self.psycopg2.connect(database = self.POSTGRESQL_DATABASE, host = self.POSTGRESQL_SERVER, user = self.POSTGRESQL_USERNAME, password = self.POSTGRESQL_PASSWORD)
newconn.autocommit = True
newconn.set_isolation_level(self.psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
if len(conn) > 0:
for item in conn:
item = None
del conn
conn.append(newconn)
def danLookup(self, strfield, strtable, strwhere, parameters = (), conn = []):
if self.DEBUG_MODE:
self.log.info("Starting...")
if len(conn) >= 1:
strsql = ""
if strwhere == "":
strsql = "SELECT %s FROM %s LIMIT 1" % (strfield, strtable)
else:
strsql = "SELECT %s FROM %s WHERE %s LIMIT 1" % (strfield, strtable, strwhere)
strsql = strsql.replace("?", """%s""") # "?" doesn't seem to work, work around it
try:
cur = conn[0].cursor()
cur.execute(strsql, parameters)
row = cur.fetchone()
if row is not None:
row = row[0]
else:
row = None
cur.close()
return row
except Exception, ex:
if self.DEBUG_MODE:
self.log.error(str(ex))
return None
else:
if self.DEBUG_MODE:
self.log.warn("Connection has not been passed.")
return None
def disconnectFromDatabase(self, conn = []):
if self.DEBUG_MODE:
self.log.info("Starting...")
if len(conn) == 1:
conn[0].close()
conn[0] = None
def executeSQLCommand(self, strsql, parameters = (), conn = []):
if self.DEBUG_MODE:
self.log.info("Starting...")
if len(conn) >= 1:
try:
strsql = strsql.replace("?", """%s""") # "?" doesn't seem to work, work around it
cur = conn[0].cursor()
cur.execute(strsql, parameters)
cur.close()
return True
except Exception, ex:
if self.DEBUG_MODE:
self.log.error(str(ex))
self.log.error(str(strsql))
return False
else:
if self.DEBUG_MODE:
self.log.warn("Connection has not been passed.")
return None
def executeSQLQuery(self, strsql, parameters = (), conn = []):
if self.DEBUG_MODE:
self.log.info("Starting...")
if len(conn) >= 1:
try:
strsql = strsql.replace("?", """%s""") # "?" doesn't seem to work, work around it
cur = conn[0].cursor()
cur.execute(strsql, parameters)
rows = cur.fetchall()
cur.close()
return rows
except Exception, ex:
if self.DEBUG_MODE:
self.log.error(str(ex))
return None
else:
if self.DEBUG_MODE:
self.log.warn("Connection has not been passed.")
return None
def sqlDateTime(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
t = self.datetime.now()
return str(t.strftime("%Y/%m/%d %H:%M:%S"))
class EFM100():
def __init__(self, port, speed, bits, parity, stopbits, trigger_sub = None, debug_mode = False):
from danlog import DanLog
import threading
import time
self.log = DanLog("EFM100")
self.serial = None
self.thread = None
self.thread_alive = False
self.threading = threading
self.time = time
self.trigger = trigger_sub
self.DEBUG_MODE = debug_mode
self.EFM_NEGATIVE = "$-"
self.EFM_POSITIVE = "$+"
self.SENTENCE_END = "\n"
self.SENTENCE_START = "$"
# Setup everything we need
self.log.info("Initialising EFM-100...")
self.setupUnit(port, speed, bits, parity, stopbits)
self.start()
def dispose(self):
if self.DEBUG_MODE:
self.log.info("Running...")
self.thread_alive = False
if self.serial is not None:
self.serial.close()
self.serial = None
def rxThread(self):
if self.DEBUG_MODE:
self.log.info("Running...")
buffer = bytearray()
while self.thread_alive:
extracted = None
bytes = self.serial.inWaiting()
if bytes > 0:
if self.DEBUG_MODE:
self.log.info("%d bytes are waiting in the serial buffer." % bytes)
# Ensure we're thread-safe
lock = self.threading.Lock()
with lock:
try:
buffer.extend(self.serial.read(bytes))
except Exception, ex:
if self.DEBUG_MODE:
self.log.error(str(ex))
x = buffer.find(self.SENTENCE_START)
if x <> -1:
y = buffer.find(self.SENTENCE_END, x)
if y <> -1:
if self.DEBUG_MODE:
self.log.info("A sentence has been found in the buffer.")
y += len(self.SENTENCE_END)
# There appears to be complete sentence in there, extract it
extracted = str(buffer[x:y])
if extracted is not None:
# Remove it from the buffer first
newbuf = str(buffer).replace(extracted, "", 1)
buffer = bytearray()
buffer.extend(newbuf)
# Now trigger any events
if self.trigger is not None:
if self.DEBUG_MODE:
self.log.info("Triggering sentence subroutine...")
self.trigger(extracted)
else:
self.log.warn("Trigger subroutine not defined, cannot raise sentence event.")
self.time.sleep(0.01)
def setupUnit(self, port, speed, bits, parity, stopbits):
if self.DEBUG_MODE:
self.log.info("Running...")
import serial
self.serial = serial.Serial()
self.serial.baudrate = speed
self.serial.bytesize = bits
self.serial.parity = parity
self.serial.port = port
self.serial.stopbits = stopbits
self.serial.timeout = 10.
self.serial.writeTimeout = None
self.serial.xonxoff = False
self.serial.open()
self.serial.flushInput()
self.serial.flushOutput()
def start(self):
if self.DEBUG_MODE:
self.log.info("Running...")
self.thread_alive = True
self.thread = self.threading.Thread(target = self.rxThread)
self.thread.setDaemon(1)
self.thread.start()
class LD250(EFM100):
#
# LD sentence key:
#
# <bbb.b> = bearing to strike 0-359.9 degrees
# <ccc> = close strike rate 0-999 strikes/minute
# <ca> = close alarm status (0 = inactive, 1 = active)
# <cs> = checksum
# <ddd> = corrected strike distance (0-300 miles)
# <hhh.h> = current heading from GPS/compass
# <sa> = severe alarm status (0 = inactive, 1 = active)
# <sss> = total strike rate 0-999 strikes/minute
# <uuu> = uncorrected strike distance (0-300 miles)
def __init__(self, port, speed, bits, parity, stopbits, squelch = 0, trigger_sub = None, debug_mode = False):
from danlog import DanLog
import threading
import time
self.log = DanLog("LD250")
self.serial = None
self.squelch = int(squelch)
self.thread = None
self.thread_alive = False
self.threading = threading
self.time = time
self.trigger = trigger_sub
self.DEBUG_MODE = debug_mode
self.LD_NOISE = "$WIMLN" # $WIMLN*<cs>
self.LD_STATUS = "$WIMST" # $WIMST,<ccc>,<sss>,<ca>,<sa>,<hhh.h>*<cs>
self.LD_STRIKE = "$WIMLI" # $WIMLI,<ddd>,<uuu>,<bbb.b>*<cs>
self.SENTENCE_END = "\n"
self.SENTENCE_START = "$"
# Setup everything we need
self.log.info("Initialising LD-250...")
self.setupUnit(port, speed, bits, parity, stopbits)
self.start()
def setupUnit(self, port, speed, bits, parity, stopbits):
if self.DEBUG_MODE:
self.log.info("Running...")
import serial
self.serial = serial.Serial()
self.serial.baudrate = speed
self.serial.bytesize = bits
self.serial.parity = parity
self.serial.port = port
self.serial.stopbits = stopbits
self.serial.timeout = 10.
self.serial.writeTimeout = None
self.serial.xonxoff = None
self.serial.open()
self.serial.flushInput()
self.serial.flushOutput()
# Attempt to set the squelch
self.log.info("Setting squelch...")
ok = False
for x in xrange(0, 3):
self.serial.write("SQ%d\r" % self.squelch)
self.serial.flush()
o = self.serial.readline().replace("\r", "").replace("\n", "")
if o.startswith(":SQUELCH %d (0-15)" % self.squelch):
ok = True
break
if not ok:
if self.DEBUG_MODE:
self.log.warn("The squelch doesn't appear to have been set.")
class LD350(EFM100):
#
# LD sentence key:
#
# <bbb.b> = bearing to strike 0-359.9 degrees
# <ccc> = close strike rate 0-999 strikes/minute
# <ca> = close alarm status (0 = inactive, 1 = active)
# <cs> = checksum
# <ddd> = corrected strike distance (0-300 miles)
# <hhh.h> = current heading from GPS/compass
# <sa> = severe alarm status (0 = inactive, 1 = active)
# <ldns1> = lightning network 1 connection state
# <ldns2> = lightning network 2 connection state
# <sss> = total strike rate 0-999 strikes/minute
# <uuu> = uncorrected strike distance (0-300 miles)
def __init__(self, port, speed, bits, parity, stopbits, squelch = 0, trigger_sub = None, debug_mode = False):
from danlog import DanLog
import threading
import time
self.log = DanLog("LD350")
self.serial = None
self.squelch = int(squelch)
self.thread = None
self.thread_alive = False
self.threading = threading
self.time = time
self.trigger = trigger_sub
self.DEBUG_MODE = debug_mode
self.LD_NOISE = "$WIMLN" # $WIMLN*<cs>
self.LD_STATUS = "$WIMSU" # $WIMSU,<ccc>,<sss>,<ca>,<sa>,<ldns1>,<ldns2>,<hhh.h>*<cs>
self.LD_STRIKE = "$WIMLI" # $WIMLI,<ddd>,<uuu>,<bbb.b>*<cs>
self.SENTENCE_END = "\n"
self.SENTENCE_START = "$"
# Setup everything we need
self.log.info("Initialising LD-350...")
self.setupUnit(port, speed, bits, parity, stopbits)
self.start()
def setupUnit(self, port, speed, bits, parity, stopbits):
if self.DEBUG_MODE:
self.log.info("Running...")
import serial
self.serial = serial.Serial()
self.serial.baudrate = speed
self.serial.bytesize = bits
self.serial.parity = parity
self.serial.port = port
self.serial.stopbits = stopbits
self.serial.timeout = 10.
self.serial.writeTimeout = None
self.serial.xonxoff = None
self.serial.open()
self.serial.flushInput()
self.serial.flushOutput()
# Attempt to set the squelch
self.log.info("Setting squelch...")
ok = False
for x in xrange(0, 3):
self.serial.write("SQ%d\r" % self.squelch)
self.serial.flush()
o = self.serial.readline().replace("\r", "").replace("\n", "")
if o.startswith(":SQUELCH %d (0-100)" % self.squelch):
ok = True
break
if not ok:
if self.DEBUG_MODE:
self.log.warn("The squelch doesn't appear to have been set.")
class SXRServer():
def __init__(self):
from danlog import DanLog
from datetime import datetime
from twisted.internet import defer, reactor
from twisted.web import resource, server
from xml.dom import minidom
import math
import os
import sys
import threading
import time
self.db = None # Initialised in main()
self.cron_alive = False
self.cron_thread = None
self.datetime = datetime
self.efmunit = None
self.ldunit = None
self.log = DanLog("SXR")
self.math = math
self.minidom = minidom
self.os = os
self.sys = sys
self.threading = threading
self.time = time
self.twisted_internet_defer = defer
self.twisted_internet_reactor = reactor
self.twisted_web_resource = resource
self.twisted_web_server = server
self.CLOSE_DISTANCE = 15
self.DB_VERSION = 1008
self.DEBUG_MODE = False
self.EFM100_BITS = 8
self.EFM100_NAME = "Boltek EFM-100"
self.EFM100_PARITY = "N"
self.EFM100_PORT = ""
self.EFM100_SPEED = 9600
self.EFM100_STOPBITS = 1
self.LD250_BITS = 8
self.LD250_NAME = "Boltek LD-250"
self.LD250_PARITY = "N"
self.LD250_PORT = "/dev/ttyu0"
self.LD250_SQUELCH = 0
self.LD250_SPEED = 9600
self.LD250_STOPBITS = 1
self.LD250_USE_UNCORRECTED_STRIKES = False
self.MAP_MATRIX_CENTRE = (300, 300)
self.MAP_MATRIX_SIZE = (600, 600)
self.POSTGRESQL_DATABASE = "stormforce_xr"
self.POSTGRESQL_PASSWORD = ""
self.POSTGRESQL_SERVER = "localhost"
self.POSTGRESQL_USERNAME = "stormforce"
self.SERVER_COPYRIGHT = "(c)2008-2012, 2014 - <NAME>"
self.SERVER_NAME = "StormForce XR"
self.SERVER_PORT = 7397
self.SERVER_VERSION = "0.5.0"
self.STRIKE_COPYRIGHT = "Lightning Data (c) %d - <NAME>" % self.datetime.now().year
self.TRAC_DETECTION_METHOD = 0
self.TRAC_SENSITIVITY = 10
self.TRAC_STORM_WIDTH = 30 # miles
self.TRAC_UPDATE_TIME = 2 # minutes
self.TRAC_VERSION = "0.4.0"
self.XML_SETTINGS_FILE = "sxrserver-settings.xml"
def cBool(self, value):
if self.DEBUG_MODE:
self.log.info("Starting...")
if str(value).lower() == "false" or str(value) == "0":
return False
elif str(value).lower() == "true" or str(value) == "1":
return True
else:
return False
def cron(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
while self.cron_alive:
t = self.datetime.now()
if t.second == 0:
myconn = []
self.db.connectToDatabase(myconn)
if (t.hour == 0 and t.minute == 0):
# New day, reset grand counters
if self.DEBUG_MODE:
self.log.info("New day has started, resetting grand counters...")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET CloseTotal = %(N)s, NoiseTotal = %(N)s, StrikesTotal = %(N)s, StrikesOutOfRange = %(N)s", {"N": 0}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero noise total into the database.")
# Reset the per minute counters
if self.DEBUG_MODE:
self.log.info("New minute has started, resetting minute counters...")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET CloseMinute = %(N)s, NoiseMinute = %(N)s, StrikesMinute = %(N)s", {"N": 0}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero noise minute into the database.")
# Reset counters if excessive
if self.DEBUG_MODE:
self.log.info("New minute has started, resetting counters if excessive...")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET StrikesMinute = %(StrikesMinute)s WHERE StrikesMinute > %(MaxStrikes)s", {"StrikesMinute": 0, "MaxStrikes": 999}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero excessive strike minute into the database.")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET NoiseMinute = %(NoiseMinute)s WHERE NoiseMinute > %(MaxNoise)s", {"NoiseMinute": 0, "MaxNoise": 999}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero excessive noise minute into the database.")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET StrikesOutOfRange = %(StrikesOutOfRange)s WHERE StrikesOutOfRange > %(MaxOOR)s", {"StrikesOutOfRange": 0, "MaxOOR": 999}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero excessive strike out of range into the database.")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET CloseTotal = %(CloseTotal)s WHERE CloseTotal > %(MaxClose)s", {"CloseTotal": 0, "MaxClose": 999999}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero excessive close total into the database.")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET NoiseTotal = %(NoiseTotal)s WHERE NoiseTotal > %(MaxNoise)s", {"NoiseTotal": 0, "MaxNoise": 999999}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero excessive noise total into the database.")
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET StrikesTotal = %(StrikesTotal)s WHERE StrikesTotal > %(MaxStrikes)s", {"StrikesTotal": 0, "MaxStrikes": 999999}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the zero excessive strike total into the database.")
self.db.disconnectFromDatabase(myconn)
if t.minute % self.TRAC_UPDATE_TIME == 0:
# See if TRAC finds any thunderstorms
r = self.threading.Thread(target = self.trac)
r.setDaemon(1)
r.start()
self.time.sleep(1.)
def exitProgram(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
# Cron
self.cron_alive = False
# EFM-100
if self.efmunit is not None:
self.efmunit.dispose()
self.efmunit = None
# LD-250
if self.ldunit is not None:
self.ldunit.dispose()
self.ldunit = None
self.sys.exit(0)
def ifNoneReturnZero(self, strinput):
if self.DEBUG_MODE:
self.log.info("Starting...")
if strinput is None:
return 0
else:
return strinput
def iif(self, testval, trueval, falseval):
if self.DEBUG_MODE:
self.log.info("Starting...")
if testval:
return trueval
else:
return falseval
def main(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
print """
#########################################################################
# Copyright/License Notice (Modified BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2008-2012, 2014, <NAME> - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# * This Software is not to be used for safety purposes. #
# #
# * You agree and abide the Disclaimer for your Boltek products. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
"""
self.log.info("")
self.log.info("StormForce XR - Server")
self.log.info("======================")
self.log.info("Checking settings...")
if not self.os.path.exists(self.XML_SETTINGS_FILE):
self.log.warn("The XML settings file doesn't exist, create one...")
self.xmlXRSettingsWrite()
self.log.info("The XML settings file has been created using the default settings. Please edit it and restart the SXR server once you're happy with the settings.")
exitProgram()
else:
self.log.info("Reading XML settings...")
self.xmlXRSettingsRead()
# This will ensure it will have any new settings in
if self.os.path.exists(self.XML_SETTINGS_FILE + ".bak"):
self.os.unlink(self.XML_SETTINGS_FILE + ".bak")
self.os.rename(self.XML_SETTINGS_FILE, self.XML_SETTINGS_FILE + ".bak")
self.xmlXRSettingsWrite()
self.log.info("Setting up database...")
self.db = Database(self.POSTGRESQL_SERVER, self.POSTGRESQL_DATABASE, self.POSTGRESQL_USERNAME, self.POSTGRESQL_PASSWORD, self.DEBUG_MODE)
self.updateDatabase()
self.log.info("Connecting to LD-250...")
self.ldunit = LD250(self.LD250_PORT, self.LD250_SPEED, self.LD250_BITS, self.LD250_PARITY, self.LD250_STOPBITS, self.LD250_SQUELCH, self.sentenceRX, self.DEBUG_MODE)
if self.EFM100_PORT <> "":
self.log.info("Connecting to EFM-100...")
self.efmunit = EFM100(self.EFM100_PORT, self.EFM100_SPEED, self.EFM100_BITS, self.EFM100_PARITY, self.EFM100_STOPBITS, self.sentenceRX, self.DEBUG_MODE)
self.log.info("Starting cron...")
self.cron_alive = True
cron_thread = self.threading.Thread(target = self.cron)
cron_thread.setDaemon(1)
cron_thread.start()
self.log.info("Configuring server...")
f = XRXMLRPCFunctions(self.POSTGRESQL_SERVER, self.POSTGRESQL_DATABASE, self.POSTGRESQL_USERNAME, self.POSTGRESQL_PASSWORD, self.DEBUG_MODE)
xmlrpc.addIntrospection(f)
s = self.twisted_web_resource.Resource()
s.putChild("xmlrpc", f)
self.log.info("Starting server...")
try:
self.twisted_internet_reactor.listenTCP(self.SERVER_PORT, self.twisted_web_server.Site(s))
self.twisted_internet_reactor.run()
except KeyboardInterrupt:
pass
except Exception, ex:
self.log.error(str(ex))
self.log.info("Exiting...")
self.exitProgram()
def sentenceRX(self, sentence):
if self.DEBUG_MODE:
self.log.info("Starting...")
myconn = []
self.db.connectToDatabase(myconn)
sentence = sentence.replace("\r", "").replace("\n", "")
star_split = sentence.split("*")
if sentence.startswith(self.ldunit.LD_NOISE):
# Noise
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET NoiseMinute = NoiseMinute + %(N)s, NoiseTotal = NoiseTotal + %(N)s", {"N": 1}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the noise minute into the database.")
elif sentence.startswith(self.ldunit.LD_STATUS):
# Status update
if len(star_split) == 2:
data_split = star_split[0].split(",")
if len(data_split) == 6:
close_strikes = int(data_split[1])
total_strikes = int(data_split[2])
close_alarm = self.cBool(data_split[3])
severe_alarm = self.cBool(data_split[4])
gps_heading = float(data_split[5])
# Update the alarm status
if not self.db.executeSQLCommand("UPDATE tblUnitStatus SET CloseAlarm = %(CloseAlarm)s, SevereAlarm = %(SevereAlarm)s, ReceiverLastDetected = LOCALTIMESTAMP WHERE Hardware = %(Hardware)s", {"CloseAlarm": close_alarm, "SevereAlarm": severe_alarm, "Hardware": self.LD250_NAME}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to update the database with the unit status.")
elif sentence.startswith(self.ldunit.LD_STRIKE):
# Strike
if len(star_split) == 2:
data_split = star_split[0].split(",")
if len(data_split) == 4:
strike_distance_corrected = int(data_split[1])
strike_distance = int(data_split[2])
strike_angle = float(data_split[3])
strike_type = "CG"
strike_polarity = ""
# Use a bit of trignonmetry to get the X,Y co-ords
#
# ^
# /|
# / |
# H / | O
# / |
# / |
# / )X |
# /-------
# A
new_distance = 0.
if self.LD250_USE_UNCORRECTED_STRIKES:
new_distance = strike_distance
else:
new_distance = strike_distance_corrected
o = self.math.sin(self.math.radians(strike_angle)) * float(new_distance)
a = self.math.cos(self.math.radians(strike_angle)) * float(new_distance)
if not self.db.executeSQLCommand("INSERT INTO tblStrikes(X, Y, DateTimeOfStrike, CorrectedStrikeDistance, UncorrectedStrikeDistance, StrikeAngle, StrikeType, StrikePolarity) VALUES(%(X)s, %(Y)s, LOCALTIMESTAMP, %(CorrectedStrikeDistance)s, %(UncorrectedStrikeDistance)s, %(StrikeAngle)s, %(StrikeType)s, %(StrikePolarity)s)", {"X": int(self.MAP_MATRIX_CENTRE[0] + o), "Y": int(self.MAP_MATRIX_CENTRE[1] + -a), "CorrectedStrikeDistance": strike_distance_corrected, "UncorrectedStrikeDistance": strike_distance, "StrikeAngle": strike_angle, "StrikeType": "CG", "StrikePolarity": ""}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the strike into the database.")
if new_distance <= 300.:
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET StrikesMinute = StrikesMinute + %(N)s, StrikesTotal = StrikesTotal + %(N)s", {"N": 1}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the strike into the database.")
if new_distance <= self.CLOSE_DISTANCE:
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET CloseMinute = CloseMinute + %(N)s, CloseTotal = CloseTotal + %(N)s", {"N": 1}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the close strike into the database.")
else:
if not self.db.executeSQLCommand("UPDATE tblStrikeCounter SET StrikesOutOfRange = StrikesOutOfRange + %(N)s", {"N": 1}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to write the out of range strike into the database.")
else:
if self.efmunit is not None:
if sentence.startswith(self.efmunit.EFM_POSITIVE) or sentence.startswith(self.efmunit.EFM_NEGATIVE):
data_split = star_split[0].split(",")
if len(data_split) == 2:
electric_field_level = data_split[0]
fault_present = self.cBool(data_split[1])
efl = float(electric_field_level.replace("$", ""))
if not self.db.executeSQLCommand("INSERT INTO tblElectricFieldStrength(DateTimeOfMeasurement, kVm) VALUES(LOCALTIMESTAMP, %(kVm)s)", {"kVm": efl}, myconn):
if self.DEBUG_MODE:
self.log.warn("Failed to write out the field strength to the database.")
if not self.db.executeSQLCommand("UPDATE tblUnitStatus SET SevereAlarm = %(SevereAlarm)s, ReceiverLastDetected = LOCALTIMESTAMP WHERE Hardware = %(Hardware)s", {"SevereAlarm": fault_present, "Hardware": self.EFM100_NAME}, myconn):
if self.DEBUG_MODE:
self.log.warn("Unable to update the database with the unit status.")
self.db.disconnectFromDatabase(myconn)
def trac(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
# Moved to new class
try:
t = TRAC(self.POSTGRESQL_SERVER, self.POSTGRESQL_DATABASE, self.POSTGRESQL_USERNAME, self.POSTGRESQL_PASSWORD, self.TRAC_DETECTION_METHOD, self.DEBUG_MODE)
t.run()
t = None
except Exception, ex:
self.log.error("An error occurred while running TRAC.")
self.log.error(ex)
finally:
if self.DEBUG_MODE:
self.log.info("Completed.")
def updateDatabase(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
myconn = []
self.db.connectToDatabase(myconn)
##########
# Tables #
##########
if self.DEBUG_MODE:
self.log.info("Creating tables...")
# tblElectricFieldStrength
self.log.info("TABLE: tblElectricFieldStrength")
self.db.executeSQLCommand(self.db.createTableSQLString("tblElectricFieldStrength"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblElectricFieldStrength", "DateTimeOfMeasurement", "timestamp"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblElectricFieldStrength", "kVm", "decimal(4,2)"), conn = myconn)
# tblServerDetails
self.log.info("TABLE: tblServerDetails")
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblServerDetails CASCADE", conn = myconn)
self.db.executeSQLCommand("CREATE TABLE tblServerDetails(ID bigserial PRIMARY KEY)", conn = myconn) # MEMORY
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerStarted timestamp", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerApplication varchar(20)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerCopyright varchar(100)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerVersion varchar(8)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN StrikeCopyright varchar(100)", conn = myconn)
self.db.executeSQLCommand("INSERT INTO tblServerDetails(ServerStarted, ServerApplication, ServerCopyright, ServerVersion, StrikeCopyright) VALUES(LOCALTIMESTAMP, %(ServerApplication)s, %(ServerCopyright)s, %(ServerVersion)s, %(StrikeCopyright)s)", {"ServerApplication": self.SERVER_NAME, "ServerCopyright": self.SERVER_COPYRIGHT, "ServerVersion": self.SERVER_VERSION, "StrikeCopyright": self.STRIKE_COPYRIGHT}, myconn)
# tblStrikeCounter
self.log.info("TABLE: tblStrikeCounter")
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblStrikeCounter CASCADE", conn = myconn)
self.db.executeSQLCommand("CREATE TABLE tblStrikeCounter(ID bigserial PRIMARY KEY)", conn = myconn) # MEMORY
self.db.executeSQLCommand("ALTER TABLE tblStrikeCounter ADD COLUMN CloseMinute int", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblStrikeCounter ADD COLUMN CloseTotal int", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblStrikeCounter ADD COLUMN NoiseMinute int", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblStrikeCounter ADD COLUMN NoiseTotal int", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblStrikeCounter ADD COLUMN StrikesMinute int", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblStrikeCounter ADD COLUMN StrikesTotal int", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblStrikeCounter ADD COLUMN StrikesOutOfRange int", conn = myconn)
self.db.executeSQLCommand("INSERT INTO tblStrikeCounter(CloseMinute, CloseTotal, NoiseMinute, NoiseTotal, StrikesMinute, StrikesTotal, StrikesOutOfRange) VALUES(%(N)s, %(N)s, %(N)s, %(N)s, %(N)s, %(N)s, %(N)s)", {"N": 0}, myconn)
# tblStrikes
self.log.info("TABLE: tblStrikes")
self.db.executeSQLCommand(self.db.createTableSQLString("tblStrikes"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "X", "smallint"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "Y", "smallint"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "DateTimeOfStrike", "timestamp"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "CorrectedStrikeDistance", "decimal(6,3)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "UncorrectedStrikeDistance", "decimal(6,3)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "StrikeType", "varchar(2)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "StrikePolarity", "varchar(1)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblStrikes", "StrikeAngle", "decimal(4,1)"), conn = myconn)
# tblSystem
self.log.info("TABLE: tblSystem")
self.db.executeSQLCommand(self.db.createTableSQLString("tblSystem"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblSystem", "DatabaseVersion", "int"), conn = myconn)
rowcount = int(self.ifNoneReturnZero(self.db.danLookup("COUNT(ID)", "tblSystem", "", conn = myconn)))
if rowcount == 0:
self.db.executeSQLCommand("INSERT INTO tblSystem(DatabaseVersion) VALUES(%(DatabaseVersion)s)", {"DatabaseVersion": 0}, myconn)
# tblTRACDetails
self.log.info("TABLE: tblTRACDetails")
self.db.executeSQLCommand(self.db.createTableSQLString("tblTRACDetails"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACDetails", "HeaderID", "bigint"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACDetails", "DateTimeOfReading", "timestamp"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACDetails", "DateTimeOfLastStrike", "timestamp"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACDetails", "CurrentStrikeRate", "int"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACDetails", "TotalStrikes", "int"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACDetails", "Intensity", "varchar(12)"), conn = myconn)
# tblTRACGrid
self.log.info("TABLE: tblTRACGrid")
self.db.executeSQLCommand(self.db.createTableSQLString("tblTRACGrid"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACGrid", "X", "smallint"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACGrid", "Y", "smallint"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACGrid", "Counter", "int"), conn = myconn)
rowcount = int(self.ifNoneReturnZero(self.db.danLookup("COUNT(ID)", "tblTRACGrid", "", conn = myconn)))
if rowcount < 360000:
self.log.warn("The TRAC grid hasn't been populated (or is invalid), this may take a while to build (%d)..." % rowcount)
self.db.executeSQLCommand("""
DO $$
BEGIN
DELETE FROM tblTRACGrid;
FOR y IN 0..%d LOOP
FOR x IN 0..%d LOOP
INSERT INTO tblTRACGrid(X, Y, Counter) VALUES(x, y, 0);
END LOOP;
END LOOP;
END
$$
""" % (self.MAP_MATRIX_SIZE[1] - 1, self.MAP_MATRIX_SIZE[0] - 1), conn = myconn)
else:
self.db.executeSQLCommand("UPDATE tblTRACGrid SET Counter = 0 WHERE Counter <> 0", conn = myconn)
# tblTRACHeader
self.log.info("TABLE: tblTRACHeader")
self.db.executeSQLCommand(self.db.createTableSQLString("tblTRACHeader"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACHeader", "GID", "varchar(40)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACHeader", "CRC32", "varchar(8)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACHeader", "DateTimeOfDiscovery", "timestamp"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACHeader", "Bearing", "decimal(10,5)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACHeader", "Distance", "decimal(10,5)"), conn = myconn)
self.db.executeSQLCommand(self.db.addColumnSQLString("tblTRACHeader", "DetectionMethod", "smallint"), conn = myconn)
# tblTRACStatus
self.log.info("TABLE: tblTRACStatus")
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblTRACStatus", conn = myconn)
self.db.executeSQLCommand("CREATE TABLE tblTRACStatus(ID bigserial PRIMARY KEY)", conn = myconn) # MEMORY
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN Version varchar(6)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN DetectionMethod smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN Active boolean", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN NoOfStorms smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN MostActive varchar(14)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN MostActiveDistance decimal(10,5)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN Closest varchar(14)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN ClosestDistance decimal(10,5)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStatus ADD COLUMN Width smallint", conn = myconn)
self.db.executeSQLCommand("INSERT INTO tblTRACStatus(Version, DetectionMethod, Active, NoOfStorms, MostActive, MostActiveDistance, Closest, ClosestDistance, Width) VALUES(%(Version)s, %(DetectionMethod)s, %(Active)s, %(NoOfStorms)s, %(MostActive)s, %(MostActiveDistance)s, %(Closest)s, %(ClosestDistance)s, %(Width)s)", {"Version": self.TRAC_VERSION, "DetectionMethod": self.TRAC_DETECTION_METHOD, "Active": False, "NoOfStorms": 0, "MostActive": "", "MostActiveDistance": 0, "Closest": "", "ClosestDistance": 0, "Width": self.TRAC_STORM_WIDTH}, myconn)
# tblTRACStorms
self.log.info("TABLE: tblTRACStorms")
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblTRACStorms CASCADE", conn = myconn)
self.db.executeSQLCommand("CREATE TABLE tblTRACStorms(ID bigserial PRIMARY KEY)", conn = myconn) # MEMORY
self.db.executeSQLCommand("ALTER TABLE tblTRACStorms ADD COLUMN X smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStorms ADD COLUMN Y smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStorms ADD COLUMN XOffset smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStorms ADD COLUMN YOffset smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStorms ADD COLUMN Name varchar(14)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStorms ADD COLUMN Intensity smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblTRACStorms ADD COLUMN Distance decimal(10,5)", conn = myconn)
# tblUnitStatus
self.log.info("TABLE: tblUnitStatus")
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblUnitStatus CASCADE", conn = myconn)
self.db.executeSQLCommand("CREATE TABLE tblUnitStatus(ID bigserial PRIMARY KEY)", conn = myconn) # MEMORY
self.db.executeSQLCommand("ALTER TABLE tblUnitStatus ADD COLUMN Hardware varchar(20)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblUnitStatus ADD COLUMN SquelchLevel smallint", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblUnitStatus ADD COLUMN UseUncorrectedStrikes boolean", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblUnitStatus ADD COLUMN CloseAlarm boolean", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblUnitStatus ADD COLUMN SevereAlarm boolean", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblUnitStatus ADD COLUMN ReceiverLastDetected timestamp", conn = myconn)
self.db.executeSQLCommand("INSERT INTO tblUnitStatus(Hardware, SquelchLevel, UseUncorrectedStrikes, CloseAlarm, SevereAlarm, ReceiverLastDetected) VALUES(%(Hardware)s, %(SquelchLevel)s, %(UseUncorrectedStrikes)s, %(CloseAlarm)s, %(SevereAlarm)s, NULL)", {"Hardware": self.LD250_NAME, "SquelchLevel": self.LD250_SQUELCH, "UseUncorrectedStrikes": self.LD250_USE_UNCORRECTED_STRIKES, "CloseAlarm": False, "SevereAlarm": False}, myconn)
if self.EFM100_PORT <> "":
self.db.executeSQLCommand("INSERT INTO tblUnitStatus(Hardware, SquelchLevel, UseUncorrectedStrikes, CloseAlarm, SevereAlarm, ReceiverLastDetected) VALUES(%(Hardware)s, %(SquelchLevel)s, %(UseUncorrectedStrikes)s, %(CloseAlarm)s, %(SevereAlarm)s, NULL)", {"Hardware": self.EFM100_NAME, "SquelchLevel": 0, "UseUncorrectedStrikes": False, "CloseAlarm": False, "SevereAlarm": False}, myconn)
#########
# Views #
#########
if self.DEBUG_MODE:
self.log.info("Creating views...")
self.db.executeSQLCommand("DROP VIEW IF EXISTS vwStrikesPersistence CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP VIEW IF EXISTS vwStrikesSummaryByMinute CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP VIEW IF EXISTS vwTRACPersistence CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP VIEW IF EXISTS vwTRACStrikesPeak CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP VIEW IF EXISTS vwUnitStatus CASCADE", conn = myconn)
self.log.info("VIEW: vwStrikesPersistence")
self.db.executeSQLCommand("""CREATE VIEW vwStrikesPersistence AS
SELECT ID, X, Y, DateTimeOfStrike, CAST(EXTRACT(epoch from (LOCALTIMESTAMP - DateTimeOfStrike)) AS smallint) AS StrikeAge
FROM tblStrikes
WHERE DateTimeOfStrike >= LOCALTIMESTAMP - INTERVAL '1 HOUR' AND DateTimeOfStrike >= (SELECT ServerStarted FROM tblServerDetails LIMIT 1)""", conn = myconn)
self.log.info("VIEW: vwStrikesSummaryByMinute")
self.db.executeSQLCommand("""CREATE VIEW vwStrikesSummaryByMinute AS
SELECT CAST(to_char(DateTimeOfStrike, 'YYYY/MM/DD HH24:MI:00') AS timestamp) AS Minute, ((CAST(EXTRACT(epoch from (CAST(to_char(LOCALTIMESTAMP, 'YYYY/MM/DD HH24:MI:00') AS timestamp) - CAST(to_char(DateTimeOfStrike, 'YYYY/MM/DD HH24:MI:00') AS timestamp))) AS smallint)) / 60) AS StrikeAge, COUNT(ID) AS NumberOfStrikes
FROM vwStrikesPersistence
GROUP BY CAST(to_char(DateTimeOfStrike, 'YYYY/MM/DD HH24:MI:00') AS timestamp), ((CAST(EXTRACT(epoch from (CAST(to_char(CAST(to_char(LOCALTIMESTAMP, 'YYYY/MM/DD HH24:MI:00') AS timestamp), 'YYYY/MM/DD HH24:MI:00') AS timestamp) - CAST(to_char(DateTimeOfStrike, 'YYYY/MM/DD HH24:MI:00') AS timestamp))) AS smallint)) / 60)""", conn = myconn)
self.log.info("VIEW: vwTRACPersistence")
self.db.executeSQLCommand("""CREATE VIEW vwTRACPersistence AS
SELECT ID, X, Y, DateTimeOfStrike, EXTRACT(epoch from (LOCALTIMESTAMP - DateTimeOfStrike)) AS StrikeAge
FROM tblStrikes
WHERE DateTimeOfStrike >= LOCALTIMESTAMP - INTERVAL '30 MINUTES' AND DateTimeOfStrike >= (SELECT ServerStarted FROM tblServerDetails LIMIT 1)""", conn = myconn)
self.log.info("VIEW: vwTRACStrikesPeak")
self.db.executeSQLCommand("""CREATE VIEW vwTRACStrikesPeak AS
SELECT COUNT(ID) AS StrikeCount, CAST(to_char(DateTimeOfStrike, 'YYYY/MM/DD HH24:MI:00') AS timestamp) AS PeakTime, MIN(X) AS MinX, MIN(Y) AS MinY
FROM vwTRACPersistence
GROUP BY CAST(to_char(DateTimeOfStrike, 'YYYY/MM/DD HH24:MI:00') AS timestamp)""", conn = myconn)
self.log.info("VIEW: vwUnitStatus")
self.db.executeSQLCommand("""CREATE VIEW vwUnitStatus AS
SELECT ID, Hardware, SquelchLevel, UseUncorrectedStrikes, CloseAlarm, SevereAlarm, ReceiverLastDetected, (CASE WHEN ReceiverLastDetected IS NULL THEN TRUE ELSE (CASE WHEN EXTRACT(epoch from (LOCALTIMESTAMP - ReceiverLastDetected)) >= 5 THEN TRUE ELSE FALSE END) END) AS ReceiverLost
FROM tblUnitStatus""", conn = myconn)
###########
# Indices #
###########
if self.DEBUG_MODE:
self.log.info("Indices...")
self.log.info("INDEX: tblElectricFieldStrength_DateTimeOfMeasurement")
self.db.executeSQLCommand(self.db.createIndexSQLString("tblElectricFieldStrength_DateTimeOfMeasurement", "tblElectricFieldStrength", "DateTimeOfMeasurement"), conn = myconn)
self.log.info("INDEX: tblStrikes_X_Y")
self.db.executeSQLCommand(self.db.createIndexSQLString("tblStrikes_X_Y", "tblStrikes", "X, Y"), conn = myconn)
self.log.info("INDEX: tblStrikes_DateTimeOfStrike")
self.db.executeSQLCommand(self.db.createIndexSQLString("tblStrikes_DateTimeOfStrike", "tblStrikes", "DateTimeOfStrike"), conn = myconn)
self.log.info("INDEX: tblTRACDetails_HeaderID")
self.db.executeSQLCommand(self.db.createIndexSQLString("tblTRACDetails_HeaderID", "tblTRACDetails", "HeaderID"), conn = myconn)
self.log.info("INDEX: tblTRACGrid_X_Y")
self.db.executeSQLCommand(self.db.createIndexSQLString("tblTRACGrid_X_Y", "tblTRACGrid", "X, Y"), conn = myconn)
#############
# Functions #
#############
if self.DEBUG_MODE:
self.log.info("Functions...")
self.log.info("FUNCTION: fnTRAC")
s = """
CREATE OR REPLACE FUNCTION fnTRAC(detectionmethod INT) RETURNS INT AS $$
DECLARE
strikes_header RECORD;
strikes_details RECORD;
trend RECORD;
TRAC_FULL SMALLINT;
TRAC_HALF SMALLINT;
TRAC_QUARTER SMALLINT;
TRAC_SENSITIVITY SMALLINT;
TRAC_UPDATE_TIME SMALLINT;
x_offset INT;
y_offset INT;
offset_x INT;
offset_y INT;
top_left BIGINT;
top_right BIGINT;
bottom_left BIGINT;
bottom_right BIGINT;
tl INT;
tr INT;
bl INT;
br INT;
new_x INT;
new_y INT;
o INT;
a INT;
deg_offset DECIMAL(10,5);
degx DECIMAL(10,5);
deg DECIMAL(10,5);
distance DECIMAL(10,5);
abs_distance DECIMAL(10,5);
total_count BIGINT;
first_recorded_activity TIMESTAMP;
last_recorded_activity TIMESTAMP;
current_strike_rate BIGINT;
peak_strike_rate BIGINT;
guid VARCHAR;
guid_ss INT;
crc32 VARCHAR;
intensity_class VARCHAR;
intensity_trend VARCHAR;
intensity_trend_symbol VARCHAR;
rises INT;
falls INT;
average_strike_count DECIMAL(10,5);
diff DECIMAL(10,5);
amount DECIMAL(10,5);
current_name VARCHAR;
tracid BIGINT;
trac_most_active VARCHAR;
trac_most_active_distance DECIMAL(10,5);
trac_closest VARCHAR;
trac_closest_distance DECIMAL(10,5);
corrected_strikes_in_sector BIGINT;
strikes_in_sector BIGINT;
storms_found INT;
BEGIN
-- Populate the variables
x_offset := 0;
y_offset := 0;
storms_found := 0;
trac_closest_distance := 300.;
trac_most_active_distance := 0.;
-- Populate the "constants"
TRAC_FULL := (SELECT Width FROM tblTRACStatus LIMIT 1);
IF TRAC_FULL %% 2 > 0 THEN
TRAC_FULL := TRAC_FULL - 1;
END IF;
TRAC_HALF := TRAC_FULL / 2;
TRAC_QUARTER := TRAC_HALF / 2;
TRAC_SENSITIVITY := 5;
TRAC_UPDATE_TIME := 1;
RAISE NOTICE 'TRAC detection method is %%', detectionmethod;
RAISE NOTICE 'TRAC_FULL is %%', TRAC_FULL;
RAISE NOTICE 'TRAC_HALF is %%', TRAC_HALF;
RAISE NOTICE 'TRAC_QUARTER is %%', TRAC_QUARTER;
RAISE NOTICE 'TRAC_SENSITIVITY is %%', TRAC_SENSITIVITY;
-- Reset any tables
UPDATE tblTRACGrid SET Counter = 0 WHERE Counter <> 0;
UPDATE tblTRACStatus SET Active = FALSE, NoOfStorms = 0, MostActive = '', MostActiveDistance = 0, Closest = '', ClosestDistance = 0;
DELETE FROM tblTRACStorms;
-- Get the unique areas where the strikes are
DROP TABLE IF EXISTS tmpStrikesHeader;
IF detectionmethod = 0 THEN
-- Fixed-grid
CREATE TEMPORARY TABLE tmpStrikesHeader AS
SELECT div(X, TRAC_FULL) * TRAC_FULL AS X, div(Y, TRAC_FULL) * TRAC_FULL AS Y
FROM vwTRACPersistence
GROUP BY div(X, TRAC_FULL) * TRAC_FULL, div(Y, TRAC_FULL) * TRAC_FULL
HAVING COUNT(ID) >= TRAC_SENSITIVITY
;
ELSIF detectionmethod = 1 THEN
-- Freestyle-grid
CREATE TEMPORARY TABLE tmpStrikesHeader AS
SELECT DISTINCT X, Y
FROM vwTRACPersistence
GROUP BY X, Y
;
ELSE
RAISE EXCEPTION 'Unknown TRAC detection method %%.', detectionmethod;
END IF;
FOR strikes_header IN SELECT X, Y
FROM tmpStrikesHeader
ORDER BY X, Y
LOOP
IF detectionmethod = 0 THEN
strikes_in_sector = COALESCE(( SELECT COUNT(ID) - (SELECT SUM(Counter) FROM tblTRACGrid WHERE (X >= strikes_header.X AND X < strikes_header.X + TRAC_FULL) AND (Y >= strikes_header.Y AND Y < strikes_header.Y + TRAC_FULL)) AS NoOfStrikes
FROM vwTRACPersistence
WHERE (X >= strikes_header.X AND X < strikes_header.X + TRAC_FULL) AND (Y >= strikes_header.Y AND Y < strikes_header.Y + TRAC_FULL)
), 0);
ELSIF detectionmethod = 1 THEN
strikes_in_sector = COALESCE(( SELECT COUNT(ID) - (SELECT SUM(Counter) FROM tblTRACGrid WHERE (X >= strikes_header.X - TRAC_HALF AND X < strikes_header.X + TRAC_HALF) AND (Y >= strikes_header.Y - TRAC_HALF AND Y < strikes_header.Y + TRAC_HALF)) AS NoOfStrikes
FROM vwTRACPersistence
WHERE (X >= strikes_header.X - TRAC_HALF AND X < strikes_header.X + TRAC_HALF) AND (Y >= strikes_header.Y - TRAC_HALF AND Y < strikes_header.Y + TRAC_HALF)
), 0);
END IF;
IF strikes_in_sector = 0 THEN
RAISE NOTICE 'WARN: Zero strikes where found in the sector.';
END IF;
corrected_strikes_in_sector := strikes_in_sector;
RAISE NOTICE 'INFO: %% strikes were found within the vicinity of (%%, %%).', strikes_in_sector, strikes_header.X, strikes_header.Y;
IF strikes_in_sector >= TRAC_SENSITIVITY THEN
-- This "sector" may have a storm in it, dig deeper...
DROP TABLE IF EXISTS tmpStrikesDetails;
IF detectionmethod = 0 THEN
CREATE TEMPORARY TABLE tmpStrikesDetails AS
SELECT COUNT(ID) AS NoOfStrikes, (SELECT Counter FROM tblTRACGrid WHERE X = vwTRACPersistence.X AND Y = vwTRACPersistence.Y) AS TrackedStrikes, X, Y
FROM vwTRACPersistence
WHERE (X >= strikes_header.X AND X < strikes_header.X + TRAC_FULL) AND (Y >= strikes_header.Y AND Y < strikes_header.Y)
GROUP BY X, Y
;
ELSIF detectionmethod = 1 THEN
CREATE TEMPORARY TABLE tmpStrikesDetails AS
SELECT COUNT(ID) AS NoOfStrikes, (SELECT Counter FROM tblTRACGrid WHERE X = vwTRACPersistence.X AND Y = vwTRACPersistence.Y) AS TrackedStrikes, X, Y
FROM vwTRACPersistence
WHERE (X >= strikes_header.X - TRAC_HALF AND X < strikes_header.X + TRAC_HALF) AND (Y >= strikes_header.Y - TRAC_HALF AND Y < strikes_header.Y + TRAC_HALF)
GROUP BY X, Y
;
END IF;
FOR strikes_details IN SELECT NoOfStrikes, TrackedStrikes, X, Y
FROM tmpStrikesDetails
ORDER BY X, Y
LOOP
corrected_strikes_in_sector := corrected_strikes_in_sector - strikes_details.TrackedStrikes;
IF corrected_strikes_in_sector >= TRAC_SENSITIVITY THEN
UPDATE tblTRACGrid SET Counter = Counter + (strikes_details.NoOfStrikes - strikes_details.TrackedStrikes) WHERE X = strikes_details.X AND Y = strikes_details.Y;
END IF;
END LOOP;
DROP TABLE IF EXISTS tmpStrikesDetails;
IF corrected_strikes_in_sector >= TRAC_SENSITIVITY THEN
RAISE NOTICE 'INFO: Deep scan found a storm in (%%, %%).', strikes_header.X, strikes_header.Y;
x_offset := 0;
y_offset := 0;
storms_found := storms_found + 1;
-- Prepare to register the storm
IF detectionmethod = 0 THEN
-- No offset required
offset_x := strikes_header.X;
offset_y := strikes_header.Y;
ELSIF detectionmethod = 1 THEN
-- Apply the offset since we search *around* the strike
offset_x := strikes_header.X - TRAC_HALF;
offset_y := strikes_header.Y - TRAC_HALF;
END IF;
top_left := ( SELECT COUNT(ID) AS OffsetCount
FROM vwTRACPersistence
WHERE (X >= offset_x AND X < offset_x + TRAC_HALF) AND (Y >= offset_y AND Y < offset_y + TRAC_HALF)
);
top_right := ( SELECT COUNT(ID) AS OffsetCount
FROM vwTRACPersistence
WHERE (X >= offset_x + TRAC_HALF AND X < offset_x + TRAC_FULL) AND (Y >= offset_y AND Y < offset_y + TRAC_HALF)
);
bottom_left := ( SELECT COUNT(ID) AS OffsetCount
FROM vwTRACPersistence
WHERE (X >= offset_x AND X < offset_x + TRAC_HALF) AND (Y >= offset_y + TRAC_HALF AND Y < offset_y + TRAC_FULL)
);
bottom_right := ( SELECT COUNT(ID) AS OffsetCount
FROM vwTRACPersistence
WHERE (X >= offset_x + TRAC_HALF AND X < offset_x + TRAC_FULL) AND (Y >= offset_y + TRAC_HALF AND Y < offset_y + TRAC_FULL)
);
total_count := top_left + top_right + bottom_left + bottom_right;
IF total_count <> strikes_in_sector THEN
RAISE NOTICE 'WARN: The total strike count doesn''t appear match the count in the sector (%%, %%)', total_count, strikes_in_sector;
END IF;
RAISE NOTICE 'DEBUG: Offset 1 - %% %% %% %%', top_left, top_right, bottom_left, bottom_right;
tl := CAST((top_left / total_count) * CAST(TRAC_QUARTER AS DECIMAL) AS INT);
tr := CAST((top_right / total_count) * CAST(TRAC_QUARTER AS DECIMAL) AS INT);
bl := CAST((bottom_left / total_count) * CAST(TRAC_QUARTER AS DECIMAL) AS INT);
br := CAST((bottom_right / total_count) * CAST(TRAC_QUARTER AS DECIMAL) AS INT);
RAISE NOTICE 'DEBUG: Offset 2 - %% %% %% %%', tl, tr, bl, br;
-- The greater percentage will make the centre offset to the corner
x_offset := x_offset + -tl;
y_offset := y_offset + -tl;
x_offset := x_offset + tr;
y_offset := y_offset + -tr;
x_offset := x_offset + -bl;
y_offset := y_offset + bl;
x_offset := x_offset + br;
y_offset := y_offset + br;
-- Apply the offset since we search *around* the strike
IF detectionmethod = 1 THEN
x_offset := x_offset + -TRAC_HALF;
y_offset := y_offset + -TRAC_HALF;
END IF;
RAISE NOTICE 'DEBUG: Offset 3 - %% %%', x_offset, y_offset;
------------------------
-- Register the storm --
------------------------
UPDATE tblTRACStatus SET Active = TRUE, NoOfStorms = NoOfStorms + 1;
-- Calculate the degrees and miles from the X and Y points
new_x := strikes_header.X + x_offset;
new_y := strikes_header.Y + y_offset;
o := 0;
a := 0;
deg_offset := 0;
IF (new_x >= 0 and new_x < 300) and (new_y >= 0 and new_y < 300) THEN
-- Top left
o := 300 - new_x;
a := 300 - new_y;
deg_offset := 270;
ELSIF (new_x >= 300 and new_x < 600) and (new_y >= 0 and new_y < 300) THEN
-- Top right
o := new_x - 300;
a := 300 - new_y;
deg_offset := 0;
ELSIF (new_x >= 0 and new_x < 300) and (new_y >= 300 and new_y < 600) THEN
-- Bottom left
o := 300 - new_x;
a := new_y - 300;
deg_offset := 180;
ELSE
-- Bottom right
o := new_x - 300;
a := new_y - 300;
deg_offset := 90;
END IF;
-- Numbers will be zero based, so add one
o := o + 1;
a := a + 1;
RAISE NOTICE 'DEBUG: O = %%, A = %%', o, a;
-- Time for a bit of trigonometry
degx := degrees(atan(o / a));
deg := degx + deg_offset;
distance := sqrt(power(o, 2) + power(a, 2));
abs_distance := abs(distance);
RAISE NOTICE 'DEBUG: Degrees = %%, X = %%, H = %%', deg, degx, distance;
-- Gather some stats
IF detectionmethod = 0 THEN
first_recorded_activity := (SELECT MIN(DateTimeOfStrike) AS FirstRecordedActivity FROM vwTRACPersistence WHERE (X >= strikes_header.X AND X < strikes_header.X + TRAC_FULL) AND (Y >= strikes_header.Y AND Y < strikes_header.Y + TRAC_FULL));
last_recorded_activity := (SELECT MAX(DateTimeOfStrike) AS LastRecordedActivity FROM vwTRACPersistence WHERE (X >= strikes_header.X AND X < strikes_header.X + TRAC_FULL) AND (Y >= strikes_header.Y AND Y < strikes_header.Y + TRAC_FULL));
current_strike_rate := (SELECT COUNT(ID) FROM vwTRACPersistence WHERE DateTimeOfStrike >= LOCALTIMESTAMP - (TRAC_UPDATE_TIME || ' MINUTES')::INTERVAL AND (X >= strikes_header.X AND X < strikes_header.X + TRAC_FULL) AND (Y >= strikes_header.Y AND Y < strikes_header.Y + TRAC_FULL));
peak_strike_rate := (SELECT MAX(StrikeCount) FROM vwTRACStrikesPeak WHERE (MinX >= strikes_header.X AND MinX < strikes_header.X + TRAC_FULL) AND (MinY >= strikes_header.Y AND MinY < strikes_header.Y + TRAC_FULL));
ELSIF detectionmethod = 1 THEN
first_recorded_activity := (SELECT MIN(DateTimeOfStrike) AS FirstRecordedActivity FROM vwTRACPersistence WHERE (X >= strikes_header.X - TRAC_HALF AND X < strikes_header.X + TRAC_HALF) AND (Y >= strikes_header.Y - TRAC_HALF AND Y < strikes_header.Y + TRAC_HALF));
last_recorded_activity := (SELECT MAX(DateTimeOfStrike) AS LastRecordedActivity FROM vwTRACPersistence WHERE (X >= strikes_header.X - TRAC_HALF AND X < strikes_header.X + TRAC_HALF) AND (Y >= strikes_header.Y - TRAC_HALF AND Y < strikes_header.Y + TRAC_HALF));
current_strike_rate := (SELECT COUNT(ID) FROM vwTRACPersistence WHERE DateTimeOfStrike >= LOCALTIMESTAMP - (TRAC_UPDATE_TIME || ' MINUTES')::INTERVAL AND (X >= strikes_header.X - TRAC_HALF AND X < strikes_header.X + TRAC_HALF) AND (Y >= strikes_header.Y - TRAC_HALF AND Y < strikes_header.Y + TRAC_HALF));
peak_strike_rate := (SELECT MAX(StrikeCount) FROM vwTRACStrikesPeak WHERE (MinX >= strikes_header.X - TRAC_HALF AND MinX < strikes_header.X + TRAC_HALF) AND (MinY >= strikes_header.Y - TRAC_HALF AND MinY < strikes_header.Y + TRAC_HALF));
END IF;
IF peak_strike_rate = 0 THEN
peak_strike_rate := current_strike_rate;
END IF;
guid := encode(digest(concat(strikes_header.X, strikes_header.X + TRAC_FULL, strikes_header.Y, strikes_header.Y + TRAC_FULL, first_recorded_activity), 'sha1'), 'hex');
-- Pick the middle eight characters since we don't have CRC32, we just need it unique for the session
guid_ss := (length(guid) / 2) - 4;
crc32 := substring(guid from guid_ss for 8);
RAISE NOTICE 'DEBUG: guid = %%, guid_ss = %%, crc32 = %%', guid, guid_ss, crc32;
-- Since we have the strike rate we can determine the classification of the storm
intensity_class := 'N/A';
intensity_trend := 'N/A';
intensity_trend_symbol := '';
If current_strike_rate < 10 THEN
intensity_class := 'Very Weak';
ELSIF current_strike_rate < 20 THEN
intensity_class := 'Weak';
ELSIF current_strike_rate < 40 THEN
intensity_class := 'Moderate';
ELSIF current_strike_rate < 50 THEN
intensity_class := 'Strong';
ELSIF current_strike_rate < 60 THEN
intensity_class := 'Very Strong';
ELSE
intensity_class := 'Severe';
END IF;
-- Calculate the trend by counting the rises and the falls based on the average strike rate, not the best way but can be improved later
rises := 0;
falls := 0;
IF detectionmethod = 0 THEN
average_strike_count := (SELECT SUM(StrikeCount) / COUNT(*) FROM vwTRACStrikesPeak WHERE (MinX >= strikes_header.X AND MinX < strikes_header.X + TRAC_FULL) AND (MinY >= strikes_header.Y AND MinY < strikes_header.Y + TRAC_FULL));
ELSIF detectionmethod = 1 THEN
average_strike_count := (SELECT SUM(StrikeCount) / COUNT(*) FROM vwTRACStrikesPeak WHERE (MinX >= strikes_header.X - TRAC_HALF AND MinX < strikes_header.X + TRAC_HALF) AND (MinY >= strikes_header.Y - TRAC_HALF AND MinY < strikes_header.Y + TRAC_HALF));
END IF;
DROP TABLE IF EXISTS tmpStrikesTrend;
IF detectionmethod = 0 THEN
CREATE TEMPORARY TABLE tmpStrikesTrend AS
SELECT StrikeCount, PeakTime
FROM vwTRACStrikesPeak
WHERE (MinX >= strikes_header.X AND MinX < strikes_header.X + TRAC_FULL) AND (MinY >= strikes_header.Y AND MinY < strikes_header.Y + TRAC_FULL)
;
ELSIF detectionmethod = 1 THEN
CREATE TEMPORARY TABLE tmpStrikesTrend AS
SELECT StrikeCount, PeakTime
FROM vwTRACStrikesPeak
WHERE (MinX >= strikes_header.X - TRAC_HALF AND MinX < strikes_header.X + TRAC_HALF) AND (MinY >= strikes_header.Y - TRAC_HALF AND MinY < strikes_header.Y + TRAC_HALF)
;
END IF;
FOR trend IN SELECT StrikeCount
FROM tmpStrikesTrend
ORDER BY PeakTime
LOOP
diff := trend.StrikeCount - average_strike_count;
IF diff > 0 THEN
rises := rises + 1;
ELSIF diff < 0 THEN
falls := falls + 1;
END IF;
END LOOP;
DROP TABLE IF EXISTS tmpStrikesTrend;
RAISE NOTICE 'DEBUG: Rises = %%, falls = %%', rises, falls;
IF rises > falls THEN
intensity_trend := 'Intensifying';
intensity_trend_symbol := '^';
ELSIF falls > rises THEN
intensity_trend := 'Weakening';
intensity_trend_symbol := '.';
ELSE
intensity_trend := 'No Change';
intensity_trend_symbol := '-';
END IF;
-- Strike rate amount (mainly for the progress bar)
amount := 0.;
IF current_strike_rate > 50 THEN
amount := 1.;
ELSE
amount := current_strike_rate / 50.;
END IF;
current_name := crc32 || intensity_trend_symbol || current_strike_rate;
RAISE NOTICE 'INFO: Storm name is %%', current_name;
-- Make log of the storm in the database
tracid := COALESCE((SELECT ID FROM tblTRACHeader WHERE GID = guid LIMIT 1), 0);
IF tracid = 0 THEN
-- Storm not found in database, add new entry
RAISE NOTICE 'INFO: Storm GUID %% not found in header, creating entry...', guid;
INSERT INTO tblTRACHeader(GID, CRC32, DateTimeOfDiscovery, Bearing, Distance, DetectionMethod)
VALUES(guid, crc32, first_recorded_activity, deg, abs_distance, 1);
tracid := COALESCE((SELECT ID FROM tblTRACHeader WHERE GID = guid LIMIT 1));
IF tracid = 0 THEN
RAISE NOTICE 'WARN: Failed to locate the newly created record for storm ID %%', guid;
END IF;
END IF;
-- Double-check
IF tracid > 0 THEN
INSERT INTO tblTRACDetails(HeaderID, DateTimeOfReading, DateTimeOfLastStrike, CurrentStrikeRate, TotalStrikes, Intensity)
VALUES(tracid, LOCALTIMESTAMP, last_recorded_activity, current_strike_rate, total_count, intensity_trend);
END IF;
RAISE NOTICE 'DEBUG: total_count = %%, trac_most_active_distance = %%', total_count, trac_most_active_distance;
IF total_count > trac_most_active_distance THEN
trac_most_active := current_name;
trac_most_active_distance := abs_distance;
UPDATE tblTRACStatus SET MostActive = trac_most_active, MostActiveDistance = trac_most_active_distance;
END IF;
RAISE NOTICE 'DEBUG: abs_distance = %%, trac_closest_distance = %%', abs_distance, trac_closest_distance;
IF abs_distance < trac_closest_distance THEN
trac_closest := current_name;
trac_closest_distance := abs_distance;
UPDATE tblTRACStatus SET Closest = trac_closest, ClosestDistance = trac_closest_distance;
END IF;
-- Now for client purposes
INSERT INTO tblTRACStorms(X, Y, XOffset, YOffset, Name, Intensity, Distance)
VALUES(strikes_header.X, strikes_header.Y, x_offset, y_offset, current_name, amount, abs_distance);
END IF;
END IF;
END LOOP;
-- Clean up
DROP TABLE IF EXISTS tmpStrikesHeader;
DROP TABLE IF EXISTS tmpStrikesDetails;
DROP TABLE IF EXISTS tmpStrikesTrend;
-- Return
RAISE NOTICE 'TRAC has found %% storms.', storms_found;
RETURN storms_found;
END
$$ LANGUAGE plpgsql;
"""
self.db.executeSQLCommand(s, conn = myconn)
###########
# Updates #
###########
if self.DEBUG_MODE:
self.log.info("Updating data...")
curr_db_version = int(self.ifNoneReturnZero(self.db.danLookup("DatabaseVersion", "tblSystem", "", conn = myconn)))
if curr_db_version < self.DB_VERSION:
# Update needed
self.db.executeSQLCommand("ALTER TABLE tblElectricFieldStrength ALTER COLUMN kVm TYPE decimal(4,2)", conn = myconn)
self.db.executeSQLCommand("DROP INDEX IF EXISTS StrikeGridRef", conn = myconn)
self.db.executeSQLCommand("DROP INDEX IF EXISTS GridRef0", conn = myconn)
self.db.executeSQLCommand("DROP INDEX IF EXISTS GridRef1", conn = myconn)
self.db.executeSQLCommand("DROP INDEX IF EXISTS GridRef2", conn = myconn)
self.db.executeSQLCommand("DROP INDEX IF EXISTS GridRef3", conn = myconn)
self.db.executeSQLCommand("DROP INDEX IF EXISTS GridRef4", conn = myconn)
self.db.executeSQLCommand("DROP INDEX IF EXISTS GridRef5", conn = myconn)
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblStrikesPersistence0 CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblStrikesPersistence1 CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblStrikesPersistence2 CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblStrikesPersistence3 CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblStrikesPersistence4 CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblStrikesPersistence5 CASCADE", conn = myconn)
self.db.executeSQLCommand("DROP VIEW IF EXISTS vwStrikesPeak CASCADE", conn = myconn)
# Finally, update the db version
self.db.executeSQLCommand("UPDATE tblSystem SET DatabaseVersion = %(DatabaseVersion)s", {"DatabaseVersion": self.DB_VERSION}, myconn)
self.db.disconnectFromDatabase(myconn)
def xmlXRSettingsRead(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
if self.os.path.exists(self.XML_SETTINGS_FILE):
xmldoc = self.minidom.parse(self.XML_SETTINGS_FILE)
myvars = xmldoc.getElementsByTagName("Setting")
for var in myvars:
for key in var.attributes.keys():
val = str(var.attributes[key].value)
# Now put the correct values to correct key
if key == "ServerPort":
self.SERVER_PORT = int(val)
elif key == "LD250Bits":
self.LD250_BITS = int(val)
elif key == "LD250Parity":
self.LD250_PARITY = val
elif key == "LD250Port":
self.LD250_PORT = val
elif key == "LD250Squelch":
self.LD250_SQUELCH = int(val)
elif key == "LD250Speed":
self.LD250_SPEED = int(val)
elif key == "LD250StopBits":
self.LD250_STOPBITS = int(val)
elif key == "LD250UseUncorrectedStrikes":
self.LD250_USE_UNCORRECTED_STRIKES = self.cBool(val)
elif key == "EFM100Bits":
self.EFM100_BITS = int(val)
elif key == "EFM100Parity":
self.EFM100_PARITY = val
elif key == "EFM100Port":
self.EFM100_PORT = val
elif key == "EFM100Speed":
self.EFM100_SPEED = int(val)
elif key == "EFM100StopBits":
self.EFM100_STOPBITS = int(val)
elif key == "PostgreSQLDatabase":
self.POSTGRESQL_DATABASE = val
elif key == "PostgreSQLPassword":
self.POSTGRESQL_PASSWORD = val
elif key == "PostgreSQLServer":
self.POSTGRESQL_SERVER = val
elif key == "PostgreSQLUsername":
self.POSTGRESQL_USERNAME = val
elif key == "CloseDistance":
self.CLOSE_DISTANCE = int(val)
elif key == "TRACDetectionMethod":
self.TRAC_DETECTION_METHOD = int(val)
elif key == "TRACSensitivity":
self.TRAC_SENSITIVITY = int(val)
elif key == "TRACStormWidth":
self.TRAC_STORM_WIDTH = int(val)
elif key == "TRACUpdateTime":
self.TRAC_UPDATE_TIME = int(val)
elif key == "StrikeCopyright":
self.STRIKE_COPYRIGHT = val
elif key == "DebugMode":
self.DEBUG_MODE = self.cBool(val)
else:
self.log.warn("XML setting attribute \"%s\" isn't known. Ignoring..." % key)
def xmlXRSettingsWrite(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
if not self.os.path.exists(self.XML_SETTINGS_FILE):
xmloutput = file(self.XML_SETTINGS_FILE, "w")
xmldoc = self.minidom.Document()
# Create header
settings = xmldoc.createElement("SXRServer")
xmldoc.appendChild(settings)
# Write each of the details one at a time, makes it easier for someone to alter the file using a text editor
var = xmldoc.createElement("Setting")
var.setAttribute("ServerPort", str(self.SERVER_PORT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("LD250Port", str(self.LD250_PORT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("LD250Speed", str(self.LD250_SPEED))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("LD250Bits", str(self.LD250_BITS))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("LD250Parity", str(self.LD250_PARITY))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("LD250StopBits", str(self.LD250_STOPBITS))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("LD250Squelch", str(self.LD250_SQUELCH))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("LD250UseUncorrectedStrikes", str(self.LD250_USE_UNCORRECTED_STRIKES))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("EFM100Port", str(self.EFM100_PORT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("EFM100Speed", str(self.EFM100_SPEED))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("EFM100Bits", str(self.EFM100_BITS))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("EFM100Parity", str(self.EFM100_PARITY))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("EFM100StopBits", str(self.EFM100_STOPBITS))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("PostgreSQLServer", str(self.POSTGRESQL_SERVER))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("PostgreSQLDatabase", str(self.POSTGRESQL_DATABASE))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("PostgreSQLUsername", str(self.POSTGRESQL_USERNAME))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("PostgreSQLPassword", str(self.POSTGRESQL_PASSWORD))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("CloseDistance", str(self.CLOSE_DISTANCE))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("TRACDetectionMethod", str(self.TRAC_DETECTION_METHOD))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("TRACSensitivity", str(self.TRAC_SENSITIVITY))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("TRACStormWidth", str(self.TRAC_STORM_WIDTH))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("TRACUpdateTime", str(self.TRAC_UPDATE_TIME))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("StrikeCopyright", str(self.STRIKE_COPYRIGHT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("DebugMode", str(self.DEBUG_MODE))
settings.appendChild(var)
# Finally, save to the file
xmloutput.write(xmldoc.toprettyxml())
xmloutput.close()
class TRAC():
def __init__(self, database_server, database_database, database_username, database_password, trac_detection_method, debug_mode):
self.db = Database(database_server, database_database, database_username, database_password, debug_mode)
self.log = DanLog("TRAC")
self.DEBUG_MODE = debug_mode
self.TRAC_DETECTION_METHOD = trac_detection_method
def run(self):
if self.DEBUG_MODE:
self.log.info("Starting...")
myconn = []
self.db.connectToDatabase(myconn)
trac_result = self.db.executeSQLQuery("SELECT fnTRAC(%(A)s)", {"A": self.TRAC_DETECTION_METHOD}, myconn)
if trac_result is not None:
for t in trac_result:
self.log.info("TRAC has detected %d storms." % int(t[0]))
break
else:
if self.DEBUG_MODE:
self.log.warn("TRAC failed to run, review any SQL errors.")
self.db.disconnectFromDatabase(myconn)
class XRXMLRPCFunctions(xmlrpc.XMLRPC):
def __init__(self, database_server, database_database, database_username, database_password, debug_mode):
xmlrpc.XMLRPC.__init__(self)
from danlog import DanLog
from twisted.internet import threads
from xml.dom import minidom
from StringIO import StringIO
import gzip
import xmlrpclib
self.DEBUG_MODE = debug_mode
self.db = Database(database_server, database_database, database_username, database_password, debug_mode)
self.gzip = gzip
self.log = DanLog("XRXMLRPCFunctions")
self.minidom = minidom
self.stringio = StringIO
self.twisted_internet_threads = threads
self.xmlrpclib = xmlrpclib
def compressData(self, data):
dio = self.stringio()
com = self.gzip.GzipFile(fileobj = dio, mode = "wb", compresslevel = 9)
com.write(data)
com.close()
return self.xmlrpclib.Binary(dio.getvalue())
def xmlrpc_fieldCounter(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT kVm FROM tblElectricFieldStrength ORDER BY ID DESC LIMIT 1", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("kVm", str(row[0]))
sxrdataset.appendChild(var)
break
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_fieldCounter.help = "Returns the electric field strength from the Boltek EFM-100."
xmlrpc_fieldCounter.signature = [["SXRDataSet[kVm]", "none"]]
def xmlrpc_lastHourOfStrikesByMinute(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT Minute, StrikeAge, NumberOfStrikes FROM vwStrikesSummaryByMinute ORDER BY Minute", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("Minute", str(row[0]))
var.setAttribute("StrikeAge", str(row[1]))
var.setAttribute("NumberOfStrikes", str(row[2]))
sxrdataset.appendChild(var)
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_lastHourOfStrikesByMinute.help = "Returns the number of strikes in the last hour grouped per minute, the strike age is represented in minutes."
xmlrpc_lastHourOfStrikesByMinute.signature = [["SXRDataSet[Minute, StrikeAge, NumberOfStrikes]", "none"]]
def xmlrpc_serverDetails(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT ServerStarted, ServerApplication, ServerCopyright, ServerVersion, StrikeCopyright FROM tblServerDetails LIMIT 1", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
self.log.info("Row...")
var = xmldoc.createElement("Row")
var.setAttribute("ServerStarted", str(row[0]))
var.setAttribute("ServerApplication", str(row[1]))
var.setAttribute("ServerCopyright", str(row[2]))
var.setAttribute("ServerVersion", str(row[3]))
var.setAttribute("StrikeCopyright", str(row[4]))
sxrdataset.appendChild(var)
break
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_serverDetails.help = "Returns specific details about the server StormForce XR is running on."
xmlrpc_serverDetails.signature = [["SXRDataSet[ServerStarted, ServerApplication, ServerCopyright, ServerVersion, StrikeCopyright]", "none"]]
def xmlrpc_serverUptime(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT DATE_PART('epoch', ServerStarted) AS ServerStartedUT FROM tblServerDetails LIMIT 1", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("ServerStartedUT", str(row[0]))
sxrdataset.appendChild(var)
break
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_serverUptime.help = "Returns the server started date in UNIX timestamp format."
xmlrpc_serverUptime.signature = [["SXRDataSet[ServerStartedUT]", "none"]]
def xmlrpc_strikeCounter(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT CloseMinute, CloseTotal, NoiseMinute, NoiseTotal, StrikesMinute, StrikesTotal, StrikesOutOfRange FROM tblStrikeCounter LIMIT 1", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("CloseMinute", str(row[0]))
var.setAttribute("CloseTotal", str(row[1]))
var.setAttribute("NoiseMinute", str(row[2]))
var.setAttribute("NoiseTotal", str(row[3]))
var.setAttribute("StrikesMinute", str(row[4]))
var.setAttribute("StrikesTotal", str(row[5]))
var.setAttribute("StrikesOutOfRange", str(row[6]))
sxrdataset.appendChild(var)
break
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_strikeCounter.help = "Returns the strike counters."
xmlrpc_strikeCounter.signature = [["SXRDataSet[CloseMinute, CloseTotal, NoiseMinute, NoiseTotal, StrikesMinute, StrikesTotal, StrikesOutOfRange]", "none"]]
def xmlrpc_strikePersistence(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT DISTINCT StrikeAge, X, Y, X - 300 AS RelativeX, Y - 300 AS RelativeY, DateTimeOfStrike FROM vwStrikesPersistence ORDER BY DateTimeOfStrike ASC", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("StrikeAge", str(row[0]))
var.setAttribute("X", str(row[1]))
var.setAttribute("Y", str(row[2]))
var.setAttribute("RelativeX", str(row[3]))
var.setAttribute("RelativeY", str(row[4]))
var.setAttribute("DateTimeOfStrike", str(row[5]))
sxrdataset.appendChild(var)
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_strikePersistence.help = "Returns the persistence data based on the current time minus one hour, remember that depending on the server settings the X,Y co-ords maybe using uncorrected strike factors (default is to use corrected strike factors). The relative values are based on the centre of the map and the strike age is represented in seconds."
xmlrpc_strikePersistence.signature = [["SXRDataSet[StrikeAge, X, Y, RelativeX, RelativeY, DateTimeOfStrike]", "none"]]
def xmlrpc_tracStatus(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT Version, Active, NoOfStorms, MostActive, MostActiveDistance, Closest, ClosestDistance, Width FROM tblTRACStatus LIMIT 1", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("Version", str(row[0]))
var.setAttribute("Active", str(row[1]))
var.setAttribute("NoOfStorms", str(row[2]))
var.setAttribute("MostActive", str(row[3]))
var.setAttribute("MostActiveDistance", str(row[4]))
var.setAttribute("Closest", str(row[5]))
var.setAttribute("ClosestDistance", str(row[6]))
var.setAttribute("Width", str(row[7]))
sxrdataset.appendChild(var)
break
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_tracStatus.help = "Returns the status of the TRAC engine."
xmlrpc_tracStatus.signature = [["SXRDataSet[Version, Active, NoOfStorms, MostActive, MostActiveDistance, Closest, ClosestDistance, Width]", "none"]]
def xmlrpc_tracStorms(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT X, Y, XOffset, YOffset, Name, Intensity, Distance FROM tblTRACStorms ORDER BY ID", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("X", str(row[0]))
var.setAttribute("Y", str(row[1]))
var.setAttribute("XOffset", str(row[2]))
var.setAttribute("YOffset", str(row[3]))
var.setAttribute("Name", str(row[4]))
var.setAttribute("Intensity", str(row[5]))
var.setAttribute("Distance", str(row[6]))
sxrdataset.appendChild(var)
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_tracStorms.help = "Returns the storms TRAC is monitoring for drawing on-screen."
xmlrpc_tracStorms.signature = [["SXRDataSet[X, Y, XOffset, YOffset, Name, Intensity, Distance]", "none"]]
def xmlrpc_unitStatus(self):
self.log.info("Starting...")
def cb():
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT Hardware, SquelchLevel, UseUncorrectedStrikes, CloseAlarm, SevereAlarm, ReceiverLastDetected, ReceiverLost FROM vwUnitStatus ORDER BY Hardware", conn = myconn)
self.db.disconnectFromDatabase(myconn)
xmldoc = self.minidom.Document()
sxrdataset = xmldoc.createElement("SXRDataSet")
xmldoc.appendChild(sxrdataset)
for row in rows:
var = xmldoc.createElement("Row")
var.setAttribute("Hardware", str(row[0]))
var.setAttribute("SquelchLevel", str(row[1]))
var.setAttribute("UseUncorrectedStrikes", str(row[2]))
var.setAttribute("CloseAlarm", str(row[3]))
var.setAttribute("SevereAlarm", str(row[4]))
var.setAttribute("ReceiverLastDetected", str(row[5]))
var.setAttribute("ReceiverLost", str(row[6]))
sxrdataset.appendChild(var)
return self.compressData(xmldoc.toprettyxml())
return self.twisted_internet_threads.deferToThread(cb)
xmlrpc_unitStatus.help = "Returns information about the Boltek LD-250 and Boltek EFM-100."
xmlrpc_unitStatus.signature = [["SXRDataSet[Hardware, SquelchLevel, UseUncorrectedStrikes, CloseAlarm, SevereAlarm, ReceiverLastDetected, ReceiverLost]", "none"]]
########
# Main #
########
if __name__ == "__main__":
l = None
try:
from danlog import DanLog
l = DanLog("Main")
l.info("Preparing...")
sxr = SXRServer()
sxr.main()
sxr = None
except Exception, ex:
if l is not None:
l.fatal(str(ex))
else:
print "Exception: %s" % str(ex)
|
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import gym
# #############################################################################
# PARAMETERS
# #############################################################################
tensorboard_log_path = 'data/board/img'
net_name = 'eye2'
num_epochs = 1
batch_size = 1
train_size = 50000
# train_size = 100
# #############################################################################
env = gym.make('ND-v0')
output_size = env.action_space.n # number of actions
image_width = 28
input_size = image_width * image_width
def convertToOneHot(vector, num_classes=None):
assert isinstance(vector, np.ndarray)
assert len(vector) > 0
if num_classes is None:
num_classes = np.max(vector)+1
else:
assert num_classes > 0
assert num_classes >= np.max(vector)
result = np.zeros(shape=(len(vector), num_classes))
result[np.arange(len(vector)), vector] = 1
return result.astype(int)
def get_result_figure(input_data, actions):
# line corners
margin = 0
line_thickness = 20
x_int = [[image_width-1-margin, image_width-1-margin], [margin, image_width-1-margin],
[margin, margin], [margin, image_width-1-margin]]
y_int = [[margin, image_width-1-margin], [image_width-1-margin, image_width-1-margin],
[margin, image_width-1-margin], [margin, margin]]
# reshape input array
image = input_data.reshape(image_width, image_width)
# draw image
fig = plt.figure(num=0)
fig.clf()
plt.imshow(image)
ax = plt.gca()
# draw indication line
cur_action = np.argmax(actions)
if 0 == cur_action:
for i in range(4):
ax.add_line(mlines.Line2D(x_int[i], y_int[i], color='r', linewidth=line_thickness))
elif cur_action < 5:
ax.add_line(mlines.Line2D(x_int[cur_action-1], y_int[cur_action-1], color='r', linewidth=line_thickness))
elif cur_action == 5: # Expand
for i in range(4):
ax.add_line(mlines.Line2D(x_int[i], y_int[i], color='g', linewidth=line_thickness))
else:
for i in range(4):
ax.add_line(mlines.Line2D(x_int[i], y_int[i], color='b', linewidth=line_thickness))
# plt.show()
return fig
def fig2rgb_array(fig, expand=True):
fig.canvas.draw()
buf = fig.canvas.tostring_rgb()
ncols, nrows = fig.canvas.get_width_height()
shape = (nrows, ncols, 3) if not expand else (1, nrows, ncols, 3)
return np.fromstring(buf, dtype=np.uint8).reshape(shape)
def make_labels(training_batch, hit_range=1):
# Get stored information from the buffer
state, cur_x, cur_y, gt_x, gt_y, scale, whole_img = training_batch[0]
cur_class = [0, 0]
cur_action = [0, 0, 0, 0, 0, 0, 0]
d_x, d_y = gt_x-(cur_x+0.5*scale), gt_y-(cur_y+0.5*scale)
gt_box = [(gt_x-0.5*28), (gt_y-0.5*28), (gt_x-0.5*28)+28, (gt_y-0.5*28)+28]
pt_box = [cur_x, cur_y, cur_x+scale, cur_y+scale]
cover = env.get_cover(pt_box, gt_box)
# class label
distance = d_x * d_x + d_y * d_y
not_hit = 0 if hit_range * hit_range > distance else 1
cur_class[not_hit] = 1
# print(cover)
# print(scale)
# action label
if cover > 0.5:
if 1 == cover:
if image_width == scale:
action_label = 0
else:
action_label = 6
else:
if np.random.rand(1) > 0.5:
if d_x > 0:
action_label = 1 # right
else:
action_label = 3 # left
else:
if d_y > 0:
action_label = 2 # down
else:
action_label = 4 # up
else:
action_label = 5
# stacking label data
cur_action[action_label] = 1
return cur_action
def main():
training_batch = []
# =================================================================
# image visualize
# =================================================================
# prepare the plot
fig = get_result_figure(np.zeros((1, input_size)), [1, 0, 0, 0, 0, 0, 0])
# =================================================================
# Training loop
# =================================================================
count_iter = 0
buffer = deque()
sample_buffer = deque()
for episode in range(train_size):
done = 0
step_count = 0
state, cur_x, cur_y, gt_x, gt_y, scale, whole_image = env.reset()
# plt.imshow(whole_image)
# plt.plot(gt_x,gt_y, 'r+')
# plt.savefig('img/'+str(episode)+'_'+'0'+'.png')
while not done:
buffer.append((state, cur_x, cur_y, gt_x, gt_y, scale, whole_image))
sample_buffer.append((state, cur_x, cur_y, gt_x, gt_y, scale))
action = make_labels(buffer)
new_state, _, done, new_x, new_y, new_scale = env.step(action)
state = new_state
cur_x = new_x
cur_y = new_y
scale = new_scale
step_count += 1
if step_count > 100:
fig = get_result_figure(state, action)
name = 'img/'+str(episode)+'_'+str(step_count)+'.png'
fig.savefig(name)
gt_box = [(gt_x - 0.5 * 28), (gt_y - 0.5 * 28), (gt_x - 0.5 * 28) + 28, (gt_y - 0.5 * 28) + 28]
pt_box = [cur_x, cur_y, cur_x + scale, cur_y + scale]
cover = env.get_cover(pt_box, gt_box)
print(gt_box)
print(pt_box)
buffer.clear()
# if 1 == done:
# break
#
# if step_count>100:
# break
print(episode)
if 0 == episode % 1000:
sample = np.asarray(sample_buffer)
name = 'data/' + str(episode) + '.npy'
np.save(name, sample)
# save point
if __name__ == "__main__":
main()
|
<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, GraphFst
from nemo_text_processing.text_normalization.en.utils import get_abs_path, load_labels
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class RomanFst(GraphFst):
"""
Finite state transducer for classifying roman numbers:
e.g. "IV" -> tokens { roman { integer: "four" } }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True, lm: bool = False):
super().__init__(name="roman", kind="classify", deterministic=deterministic)
roman_dict = load_labels(get_abs_path("data/roman/roman_to_spoken.tsv"))
default_graph = pynini.string_map(roman_dict).optimize()
default_graph = pynutil.insert("integer: \"") + default_graph + pynutil.insert("\"")
graph_teens = pynini.string_map([x[0] for x in roman_dict[:19]]).optimize()
# up to five digit roman numerals with a preceding name are converted to ordinal form
names = get_names()
graph = (
pynutil.insert("key_the_ordinal: \"")
+ names
+ pynutil.insert("\"")
+ pynini.accep(" ")
+ graph_teens @ default_graph
).optimize()
# single symbol roman numerals with preceding key words are converted to cardinal form
key_words = pynini.string_map(load_labels(get_abs_path("data/roman/key_word.tsv"))).optimize()
graph |= (
pynutil.insert("key_cardinal: \"") + key_words + pynutil.insert("\"") + pynini.accep(" ") + default_graph
).optimize()
if deterministic:
# two digit roman numerals up to 49
roman_to_cardinal = pynini.compose(
pynini.closure(NEMO_ALPHA, 2),
(
pynutil.insert("default_cardinal: \"default\" ")
+ (pynini.string_map([x[0] for x in roman_dict[:50]]).optimize()) @ default_graph
),
)
elif not lm:
# two or more digit roman numerals
roman_to_cardinal = pynini.compose(
pynini.closure(NEMO_ALPHA, 2),
(
pynutil.insert("default_cardinal: \"default\" ")
+ (pynini.string_map([x[0] for x in roman_dict[:50]]).optimize()) @ default_graph
),
)
# convert three digit roman or up with suffix to ordinal
roman_to_ordinal = pynini.compose(
pynini.closure(NEMO_ALPHA, 3),
(pynutil.insert("default_ordinal: \"default\" ") + graph_teens @ default_graph + pynutil.delete("th")),
)
graph |= roman_to_cardinal | roman_to_ordinal
# # add a higher weight when roman number consists of a single symbol
# graph = pynini.compose(pynini.closure(NEMO_CHAR, 2), graph) | pynutil.add_weight(
# pynini.compose(NEMO_CHAR, graph), 101
# )
# graph = graph.optimize() + pynini.closure(pynutil.delete("."), 0, 1)
# graph = pynutil.insert("integer: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
def get_names():
"""
Returns the graph that matched common male and female names.
"""
male_labels = load_labels(get_abs_path("data/roman/male.tsv"))
female_labels = load_labels(get_abs_path("data/roman/female.tsv"))
male_labels.extend([[x[0].upper()] for x in male_labels])
female_labels.extend([[x[0].upper()] for x in female_labels])
names = pynini.string_map(male_labels).optimize()
names |= pynini.string_map(female_labels).optimize()
return names
|
'Volume generation and augmentation'
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# MIT License
import keras
import os.path
import numpy as np
from tqdm import tqdm
from sklearn.neighbors import KDTree
from sklearn.decomposition import PCA
from enzynet.PDB import PDB_backbone
current_directory = os.path.dirname(os.path.abspath(__file__))
precomputed_path = os.path.join(current_directory, '../files/precomputed/')
PDB_path = os.path.join(current_directory, '../files/PDB/')
class VolumeDataGenerator(keras.utils.Sequence):
"""
Generates batches of volumes containing 3D structure of enzymes as well
as their associated class labels on the fly.
To be passed as argument in the fit_generator function of Keras.
Parameters
----------
v_size : int (optional, default is 32)
Size of each side of the output volumes.
flips : tuple of floats (optional, default is (0.2, 0.2, 0.2))
Probabilities that the volumes are flipped respectively with respect
to x, y, and z.
batch_size : int (optional, default is 32)
Number of samples in output array of each iteration of the 'generate'
method.
directory_precomputed : string (optional, default points to 'files/precomputed')
Path of the precomputed files.
directory_pdb : string (optional, default points to 'files/PDB')
Path of the PDB files.
labels : dict
Dictionary linking PDB IDs to their labels.
list_enzymes : list of strings
List of enzymes to generate.
shuffle : boolean (optional, default is True)
If True, shuffles order of exploration.
p : int (optional, default is 5)
Interpolation of enzymes with p added coordinates between each pair
of consecutive atoms.
max_radius : float (optional, default is 40)
Maximum radius of sphere that will completely fit into the volume.
noise_treatment : boolean (optional, default is False)
If True, voxels with no direct neighbor will be deleted.
weights : list of strings (optional, default is [])
List of weights (among the values ['hydropathy', 'charge']) to consider
as additional channels.
scaling_weights : boolean (optional, default is True)
If True, divides all weights by the weight that is maximum in absolute
value.
Example
-------
>>> from enzynet.volume import VolumeDataGenerator
>>> from enzynet.tools import read_dict
>>> labels = read_dict('../datasets/dataset_single.csv')
>>> partition_red = read_dict('../../datasets/partition_single_red.csv')
>>> exec("partition_red['train'] = " + partition_red['train'])
>>> generator = VolumeDataGenerator(partition_red['train'], labels,
v_size=64, flips=(0.2, 0.2, 0.2),
batch_size=32, shuffle=True, p=5,
max_radius=40, noise_treatment=False,
weights=[], scaling_weights=True)
"""
def __init__(self, list_enzymes, labels, v_size=32, flips=(0.2, 0.2, 0.2), batch_size=32,
directory_precomputed=precomputed_path, directory_pdb=PDB_path,
shuffle=True, p=5, max_radius=40, noise_treatment=False,
weights=[], scaling_weights=True):
'Initialization'
self.batch_size = batch_size
self.directory_precomputed = directory_precomputed
self.directory_pdb = directory_pdb
self.flips = flips
self.labels = labels
self.list_enzymes = list_enzymes
self.max_radius = max_radius
self.noise_treatment = noise_treatment
self.n_channels = max(1, len(weights))
self.p = p
self.scaling_weights = scaling_weights
self.shuffle = shuffle
self.v_size = v_size
self.weights = weights
self.on_epoch_end()
def check_precomputed(self):
'Checks if all coordinates and weights have been precomputed, and precomputes them otherwise'
# Initialization
list_enzymes = list(self.labels)
counter = 0
# Loop over all enzymes
for pdb_id in tqdm(list_enzymes):
# Find name of paths
names = [precomputed_name(pdb_id, self.directory_precomputed, 'coords', self.p)] + \
[precomputed_name(pdb_id, self.directory_precomputed, 'weights', self.p,
weight, self.scaling_weights)
for weight in self.weights]
# Precomputes all files
if all([os.path.isfile(name) for name in names]): # Check if all already exist
pass
else: # Precomputes files otherwise
save_coords_weights(pdb_id, self.weights, self.p, self.scaling_weights,
self.directory_pdb, self.directory_precomputed)
counter += 1
print("Had to compute files of {0} enzymes.".format(counter))
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_enzymes))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_enzymes) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_enzymes_temp = [self.list_enzymes[k] for k in indexes]
# Generate data
X, y = self.__data_augmentation(list_enzymes_temp)
return X, y
def __data_augmentation(self, list_enzymes_temp):
'Returns augmented data with batch_size enzymes' # X : (n_samples, v_size, v_size, v_size, n_channels)
# Initialization
X = np.empty((self.batch_size, # n_enzymes
self.v_size, # dimension w.r.t. x
self.v_size, # dimension w.r.t. y
self.v_size, # dimension w.r.t. z
self.n_channels)) # n_channels
y = np.empty((self.batch_size), dtype=int)
# Computations
for i in range(self.batch_size):
# Store class
y[i] = self.labels[list_enzymes_temp[i]]
# Load precomputed coordinates
coords = load_coords(list_enzymes_temp[i], self.p, self.directory_precomputed)
coords = coords_center_to_zero(coords)
coords = adjust_size(coords, v_size=self.v_size, max_radius=self.max_radius)
# Get weights
local_weights = []
for weight in self.weights:
local_weight = load_weights(list_enzymes_temp[i], weight, self.p,
self.scaling_weights, self.directory_precomputed) # Compute extended weights
local_weights += [local_weight] # Store
# PCA
coords = PCA(n_components=3).fit_transform(coords)
# Do flip
coords_temp = flip_around_axis(coords, axis=self.flips)
if len(self.weights) == 0:
# Convert to volume and store
X[i, :, :, :, 0] = coords_to_volume(coords_temp, self.v_size,
noise_treatment=self.noise_treatment)
else:
# Compute to weights of volume and store
for k in range(self.n_channels):
X[i, :, :, :, k] = weights_to_volume(coords_temp, local_weights[k],
self.v_size, noise_treatment=self.noise_treatment)
return X, sparsify(y)
def coords_to_volume(coords, v_size, noise_treatment=False):
'Converts coordinates to binary voxels' # Input is centered on [0,0,0]
return weights_to_volume(coords=coords, weights=1, v_size=v_size, noise_treatment=noise_treatment)
def weights_to_volume(coords, weights, v_size, noise_treatment=False):
'Converts coordinates to voxels with weights' # Input is centered on [0,0,0]
# Initialization
volume = np.zeros((v_size, v_size, v_size))
# Translate center
coords = coords + np.full((coords.shape[0], 3), (v_size-1)/2)
# Round components
coords = coords.astype(int)
# Filter rows with values that are out of the grid
mask = ((coords >= 0) & (coords < v_size)).all(axis=1)
# Convert to volume
volume[tuple(coords[mask].T)] = weights[mask] if type(weights) != int else weights
# Remove noise
if noise_treatment == True:
volume = remove_noise(coords, volume)
return volume
def coords_center_to_zero(coords):
'Centering coordinates on [0,0,0]'
barycenter = get_barycenter(coords)
return coords - np.full((coords.shape[0], 3), barycenter)
def adjust_size(coords, v_size=32, max_radius=40):
return np.multiply((v_size/2-1)/max_radius, coords)
def sparsify(y):
'Returns labels in binary NumPy array'
n_classes = 6
return np.array([[1 if y[i] == j+1 else 0 for j in range(n_classes)]
for i in range(y.shape[0])])
def flip_around_axis(coords, axis=(0.2, 0.2, 0.2)):
'Flips coordinates randomly w.r.t. each axis with its associated probability'
for col in range(3):
if np.random.binomial(1, axis[col]):
coords[:,col] = np.negative(coords[:,col])
return coords
def get_barycenter(coords):
'Gets barycenter point of a Nx3 matrix'
return np.array([np.mean(coords, axis=0)])
def remove_noise(coords, volume):
'Removes isolated atoms from voxel structure'
# Parameters
v_size = volume.shape[0]
# Computations
for i in range(coords.shape[0]):
if all(valeur < v_size-1 for valeur in coords[i,:]) and \
all(valeur > 0 for valeur in coords[i,:]): # Point inside volume
if np.array_equal(volume[coords[i,0]-1:coords[i,0]+2,coords[i,1]-1:coords[i,1]+2,coords[i,2]-1:coords[i,2]+2],
np.pad(np.array([[[1]]]),1,'constant') * volume[tuple(coords[i])]) == True: # Isolated point
volume[coords[i,0]-1:coords[i,0]+2,
coords[i,1]-1:coords[i,1]+2,
coords[i,2]-1:coords[i,2]+2] = np.zeros((3,3,3))
return volume
def precomputed_name(pdb_id, path, type_file, desired_p, weights_name=None, scaling=True):
'Returns path in string of precomputed file'
if type_file == 'coords':
return os.path.join(path, pdb_id.lower() + '_coords_p' + str(desired_p) + '.npy')
elif type_file == 'weights':
return os.path.join(path, pdb_id.lower() + '_' + weights_name + '_p' + str(desired_p) + '_scaling' + str(scaling) + '.npy')
def save_coords_weights(pdb_id, list_weights, desired_p, scaling_weights,
source_path, dest_path):
'Computes coordinates and weights and saves them into .npy files'
# Initialize local PDB
local_PDB = PDB_backbone(pdb_id=pdb_id, path=source_path)
# Coordinates
local_PDB.get_coords_extended(p=desired_p) # Compute
coords = local_PDB.backbone_coords_ext # Store
np.save(precomputed_name(pdb_id, dest_path, 'coords', desired_p), coords) # Save
# Weights
for weights_name in list_weights:
local_PDB.get_weights_extended(desired_p, weights=weights_name,
scaling=scaling_weights) # Compute
weights = local_PDB.backbone_weights_ext # Store
np.save(precomputed_name(pdb_id, dest_path, 'weights', desired_p, weights_name, scaling_weights),
weights) # Save
def load_coords(pdb_id, desired_p, source_path):
'Loads precomputed coordinates'
return np.load(precomputed_name(pdb_id, source_path, 'coords', desired_p))
def load_weights(pdb_id, weights_name, desired_p, scaling, source_path):
'Loads precomputed weights'
return np.load(precomputed_name(pdb_id, source_path, 'weights', desired_p, weights_name, scaling))
|
import audio
import graphic
import os
import output
import scipy.io.wavfile as wav
from util import *
def read_wav_dirty(f):
samplerate, signal = wav.read(f)
f = filename.truncate_extension(f)
return (f, signal, samplerate)
def read_wav(f):
samplerate, signal = wav.read(f)
#if len(signal.shape) > 1:
# signal = signal[:,0]
f = filename.truncate_extension(filename.clean(f))
return (f, signal, samplerate)
def apply_melfilter(f, signal, samplerate):
filterbank_energies = audio.melfilterbank.logfilter(samplerate, signal, winlen=0.00833, winstep=0.00833, nfilt=39, lowfreq=0, preemph=1.0)
#print f, samplerate, filterbank_energies.shape
return (f, filterbank_energies)
def generate_spectrograms(f, signal, samplerate):
Sxx = audio.spectrogram.spectrogram_cutoff(samplerate, signal, winlen=0.00833, winstep=0.00833)
return (f, Sxx)
def sliding_audio(f, signal, samplerate):
for window_name, window in audio.windowing.sliding_with_filename(f, signal, samplerate, 5, 5, 0.6):
yield (window_name, window, samplerate)
def downsample(f, signal, samplerate):
target_samplerate = 16000
downsampled_signal, downsampled_samplerate = audio.resample.downsample(signal, samplerate, target_samplerate)
return (f, downsampled_signal, downsampled_samplerate)
def wav_to_images(sound_file, output_path):
'''Converts a WAV file input several images and writes them to disk'''
if not os.path.isdir(output_path):
os.mkdir(output_path)
# filenames of the generated images
image_files = {
"spectros" : [],
"melfilter" : []
}
window_size = 600 # MFCC sliding window
f, signal, samplerate = read_wav_dirty(sound_file)
segments = sliding_audio(f, signal, samplerate)
for (filename, signal, samplerate) in segments:
_, signal, samplerate = downsample(filename, signal, samplerate)
_, mel_image = apply_melfilter(filename, signal, samplerate)
_, spectro_image = generate_spectrograms(f, signal, samplerate)
mel_image = graphic.colormapping.to_grayscale(mel_image, bytes=True)
mel_image = graphic.histeq.histeq(mel_image)
mel_image = graphic.histeq.clamp_and_equalize(mel_image)
mel_image = graphic.windowing.cut_or_pad_window(mel_image, window_size)
spectro_image = graphic.colormapping.to_grayscale(spectro_image, bytes=True)
spectro_image = graphic.histeq.histeq(spectro_image)
spectro_image = graphic.histeq.clamp_and_equalize(spectro_image)
spectro_image = graphic.windowing.cut_or_pad_window(spectro_image, window_size)
mel_filename = "melfilter_%s" % os.path.basename(filename)
spectro_filename = "spectrogram_%s" % os.path.basename(filename)
output.image.save(mel_filename, mel_image, output_path)
output.image.save(spectro_filename, spectro_image, output_path)
image_files["melfilter"].append(os.path.join(output_path, mel_filename + ".png"))
image_files["spectros"].append(os.path.join(output_path, spectro_filename + ".png"))
return image_files |
import os
import shutil
import zipfile
import pathlib
import gitlab
import click
from atacac._utils import log, tower_send
@click.command()
@click.option('--gitlab-url', envvar='GITLAB_URL')
@click.option('--gitlab-token', envvar='GITLAB_TOKEN')
@click.option('--gitlab-project', envvar='GITLAB_PROJECT')
@click.option('--artifacts-job', envvar='ARTIFACTS_JOB', default='backup')
@click.argument('pipeline_id')
def main(gitlab_url, gitlab_token, gitlab_project, artifacts_job, pipeline_id):
"""
Restore backup made by 'backup' command.
\t
Downloads job artifacts archive (backup of assets), extracts the archive
locally, uploads the content to Tower
\b
Folloving arguments can be passed via environment variables:
* GITLAB_URL
* GITLAB_TOKEN
* GITLAB_PROJECT
* ARTIFACTS_JOB
"""
# downloaded archive file path (artifacts.zip)
dest_file_path = os.path.join(os.getcwd(), 'artifacts.zip')
# extracted archive destination directory path
dest_dir_path = os.path.join(os.getcwd(), 'artifacts')
gitlab_download(
gitlab_url,
gitlab_token,
gitlab_project,
artifacts_job,
pipeline_id,
dest_file_path
)
extract(dest_file_path, dest_dir_path)
upload(dest_dir_path)
def gitlab_download(url, token, project, artifacts_job, pipeline_id, file_path):
gl = gitlab.Gitlab(url, token)
gl.auth()
project = gl.projects.get(project)
pipeline = project.pipelines.get(pipeline_id)
log('INFO', (f"Pipeline: {pipeline.id}\n"
f" Status: {pipeline.attributes['status']}\n"
f" Commit: {pipeline.attributes['sha']}\n"
f" URL: {pipeline.attributes['web_url']}"))
if pipeline.attributes['status'] != 'success':
log('ERROR', "Pipeline's status is not 'success'!", fatal=True)
job_backup = next(job for job in pipeline.jobs.list()
if job.name == artifacts_job)
log('INFO', (f" Job: {artifacts_job}\n"
f" Status: {job_backup.attributes['status']}\n"
f" URL: {job_backup.attributes['web_url']}"))
if job_backup.attributes['status'] != 'success':
log('ERROR', "Job's status is not 'success'!", fatal=True)
for artifact in job_backup.artifacts:
if artifact['filename'] == 'artifacts.zip':
log('INFO', (f" Artifact: {artifact['filename']}\n"
f" Format: {artifact['file_format']}\n"
f" Size: {artifact['size']}"))
break
else:
log('ERROR', "Invalid artifact!", fatal=True)
try:
if os.path.isfile(file_path):
log('WARNING', f"Rewriting file: '{file_path}'")
with open(file_path, "wb") as f:
project.jobs.get(job_backup.id).artifacts(streamed=True, action=f.write)
except EnvironmentError:
log('ERROR', f"Failed to write to the file! Path: {file_path}", fatal=True)
log('INFO', "File path (artifacts - archive): '{file_path}'")
log('INFO', "Successfully downloaded")
def extract(src_archive, dest_dir):
if os.path.isdir(dest_dir):
log('WARNING', f"Rewriting directory: '{dest_dir}'")
shutil.rmtree(dest_dir, ignore_errors=True)
with zipfile.ZipFile(src_archive, 'r') as zf:
zf.extractall(dest_dir)
log('INFO', f"Directory path (artifacts - extracted): '{dest_dir}'")
log('INFO', "Successfully extracted")
def upload(src_dir):
for path in pathlib.Path(src_dir).rglob('*.yml'):
asset_name = os.path.basename( # noqa: F841 (variable never used)
os.path.splitext(path.name)[0].replace('_', ' '))
log('INFO', f"Sending '{asset_name}' from '{path}'")
tower_send(path)
log('INFO', "Successfully sent")
if __name__ == "__main__":
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
main()
|
# -*- coding: utf-8 -*-
"""Pathway analysis methods."""
import logging
from collections import Counter, defaultdict
from typing import Dict, Iterable, List, Mapping
import numpy as np
import pandas as pd
from networkx import DiGraph
from scipy.stats import fisher_exact
from statsmodels.stats.multitest import multipletests
from .constants import KEGG_GENESETS, REACTOME_GENESETS, WIKIPATHWAYS_GENESETS
logger = logging.getLogger(__name__)
PATHWAY_PREFIXES = {
'kegg': 'hsa',
'reactome': 'R-HSA',
'wikipathways': 'WP',
}
def _prepare_json(paths):
"""Prepare json."""
return {
index: [
cosa
for cosa in path
]
for index, path in enumerate(paths)
}
def analyze_paths(
reduced_graph: DiGraph,
paths: List[List[int]],
id2node: Mapping[int, str],
genesets: Mapping[str, Iterable[str]],
min_count: int = 0,
min_proportion: int = 0,
):
"""Analyze paths.
:param reduced_graph: graph
:param paths: paths
:param id2node: mapping between ids and node names
:param genesets: pathway genesets
:param min_count: minimum number of times at a given lmax
:param min_proportion: minimum proportion of that node based on the total
"""
results = defaultdict(Counter)
# Iter over all paths
for path in paths:
# Iterate through each node in the path while tracking its position in the path
for index, node in enumerate(path):
results[index][id2node[node]] += 1
polarity_dict = {1: '->', -1: '-|'}
final_paths = set()
for path in paths:
reconstructed_path = []
for index, node in enumerate(path):
# Avoid crashing
if index + 1 == len(path):
continue
polarity = polarity_dict[reduced_graph[node][path[index + 1]]['polarity']]
if index == 0:
reconstructed_path.append(node)
reconstructed_path.append(polarity)
reconstructed_path.append(path[index + 1])
else:
reconstructed_path.append(polarity)
reconstructed_path.append(path[index + 1])
final_paths.add(
tuple(
id2node[cosa] if cosa in id2node else cosa
for cosa in reconstructed_path
)
)
final_paths = _prepare_json(final_paths)
df_dict = {}
for lmax, counter in results.items():
# Total number of nodes (incl. duplicates) on that path position
total = sum(counter.values())
# Zip list of tuples into two lists keeping the same order
sorted_most_common_nodes, sorted_count = map(list, zip(*[
(node, count)
for node, count in counter.most_common()
if count > min_count and (count * 100) / total > min_proportion
# Threshold on absolute count and proportion
]))
df_dict[lmax] = sorted_most_common_nodes
df_dict[f'count_{lmax}'] = sorted_count
# Convert dict to pandas datafrae
df = pd.DataFrame({
key: pd.Series(list(values))
for key, values in df_dict.items()
})
df.fillna('', inplace=True)
enrichment_results = pathway_enrichment(df, genesets)
return df, final_paths, enrichment_results
def analyze_paths_with_intermediates(
reduced_graph: DiGraph,
paths: List[List[int]],
id2node: Mapping[int, str],
intermediate_nodes: List[str],
):
"""Analyze paths.
:param reduced_graph: graph
:param paths: paths
:param id2node: mapping between ids and node names
:param intermediate_nodes: nodes that must be present in the paths
"""
results = defaultdict(Counter)
nodes_present = set()
# Iter over all paths
for path in paths:
# Iterate through each node in the path while tracking its position in the path
for index, node in enumerate(path):
results[index][id2node[node]] += 1
polarity_dict = {1: '->', -1: '-|'}
final_paths = set()
for path in paths:
reconstructed_path = []
for index, node in enumerate(path):
# Avoid crashing
if index + 1 == len(path):
continue
polarity = polarity_dict[reduced_graph[node][path[index + 1]]['polarity']]
if index == 0:
reconstructed_path.append(node)
reconstructed_path.append(polarity)
reconstructed_path.append(path[index + 1])
else:
reconstructed_path.append(polarity)
reconstructed_path.append(path[index + 1])
"""New snippet different to 'analyze_paths'"""
# From ids to real node names
path_reconstructed = tuple(
id2node[cosa] if cosa in id2node else cosa
for cosa in reconstructed_path
)
if not any([
True if i in path_reconstructed else False
for i in intermediate_nodes
]):
continue
for node in path_reconstructed:
if node in intermediate_nodes:
nodes_present.add(node)
final_paths.add(path_reconstructed)
final_paths = _prepare_json(final_paths)
if not final_paths:
return None, None
return final_paths, nodes_present
def pathway_enrichment(df: pd.DataFrame, geneset, prefix: str = 'ncbigene:') -> pd.DataFrame:
"""Enrich pathways on each lmax."""
pathway_enrichment_df = pd.DataFrame()
# Iterate over columns
for lmax_column in df:
# Skip the columns with a count
if str(lmax_column).startswith('count_'):
continue
nodes = {
node.replace(prefix, '')
for node in df[lmax_column]
if pd.notna(node)
}
enrichment_for_specific_lmax = perform_hypergeometric_test(
genes_to_test=nodes,
pathway_dict=geneset,
)
# Skip if no pathways are enriched
if enrichment_for_specific_lmax.empty:
continue
pathway_enrichment_df[f'database_{lmax_column}'] = enrichment_for_specific_lmax['database']
pathway_enrichment_df[f'pathway_id_{lmax_column}'] = enrichment_for_specific_lmax['pathway_id']
pathway_enrichment_df[f'q_values_{lmax_column}'] = enrichment_for_specific_lmax['qval']
return pathway_enrichment_df
def get_genesets():
"""Get gene sets as dicts."""
return (
parse_gmt_file(KEGG_GENESETS),
parse_gmt_file(REACTOME_GENESETS),
parse_gmt_file(WIKIPATHWAYS_GENESETS),
)
def parse_gmt_file(gmt_file: str, min_size=3, max_size=3000) -> Dict[str, List]:
"""Parse gmt file."""
with open(gmt_file, 'r') as file:
geneset_dict = {
line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in file.readlines()
}
return {
k: v for k, v in geneset_dict.items()
if min_size <= len(v) <= max_size
}
def _prepare_hypergeometric_test(
query_gene_set,
pathway_gene_set,
gene_universe,
):
"""Prepare the matrix for hypergeometric test calculations.
:param query_gene_set: gene set to test against pathway
:param pathway_gene_set: pathway gene set
:param gene_universe: number of HGNC symbols
:return: 2x2 matrix
"""
# Cast lists to sets
if not isinstance(query_gene_set, set):
query_gene_set = set(query_gene_set)
if not isinstance(pathway_gene_set, set):
pathway_gene_set = set(pathway_gene_set)
# Return matrix to test hyper-geometric test
return np.array([
[
len(query_gene_set.intersection(pathway_gene_set)),
len(query_gene_set.difference(pathway_gene_set)),
],
[
len(pathway_gene_set.difference(query_gene_set)),
gene_universe - len(pathway_gene_set.union(query_gene_set)),
],
])
def perform_hypergeometric_test(
genes_to_test,
pathway_dict,
gene_universe: int = 41714,
apply_threshold=True,
threshold=0.05,
):
"""Perform hypergeometric tests.
:param genes_to_test: gene set to test against pathway
:param pathway_dict: pathway name to gene set
:param gene_universe: number of HGNC symbols
:param apply_threshold: return only significant pathways
:param threshold: significance threshold (by default 0.05)
"""
rows = []
for pathway_id, pathway_gene_set in pathway_dict.items():
# Prepare the test table to conduct the fisher test
test_table = _prepare_hypergeometric_test(genes_to_test, pathway_gene_set, gene_universe)
# Calculate fisher test (returns tuple of odds ratio and p_value
p_value = fisher_exact(test_table, alternative='greater')[1]
database = [
db_name
for db_name, prefix in PATHWAY_PREFIXES.items()
if pathway_id.startswith(prefix)
]
rows.append(
(
database[0] if database else 'unknown',
pathway_id,
p_value,
)
)
df = pd.DataFrame(rows, columns=['database', 'pathway_id', 'pval'])
correction_test = multipletests(df.pval, method='fdr_bh')
df['qval'] = correction_test[1]
if apply_threshold:
logger.debug(f'Filtering out pathways with q-values > {threshold} according to fdr_bh')
df = df[df['qval'] < threshold]
# Sort by q value and reset index
df.sort_values(by=['qval'], ascending=False, inplace=True)
df.reset_index(inplace=True)
return df
|
#!/usr/bin/env python3
#<NAME> (<EMAIL>)
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.Seq import reverse_complement
from Bio.SeqUtils import GC
import os,sys,argparse,regex
from xopen import xopen
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--in_fasta', action = 'store', dest = 'in_fasta', required = True, help = 'Fasta file containing sequences/scaffolds/contigs/assemblies')
parser.add_argument('-o', '--out_fasta', action = 'store', dest = 'out_fasta', required = True, help = 'Create a fasta with error sequences')
parser.add_argument('-t', '--table_out', action = 'store', dest = 'out_table', required = True, help = 'Output table destination/path')
parser.add_argument('--force', action = 'store_true', dest = 'force', required = False, help = 'Overwrite existing file(s)')
parser.add_argument('-c', '--circular_only', action = 'store_true', dest = 'circular_only', required = False)
parser.add_argument('-m', '--min_bp', action = 'store', required = False)
args = parser.parse_args()
fasta = os.path.expanduser(args.in_fasta)
out_fasta = os.path.expanduser(args.out_fasta)
out_table = os.path.expanduser(args.out_table)
if not args.force:
if os.path.exists(out_fasta) and os.path.exists(out_table):
print('Output fasta and table exist')
sys.exit()
elif os.path.exists(out_fasta):
print('Output fasta exists')
sys.exit()
elif os.path.exists(out_table):
print('Output table exists')
sys.exit()
else:
if os.path.exists(out_fasta):
os.remove(out_fasta)
if os.path.exists(out_table):
os.remove(out_table)
overlap_length = 50
def write_fasta():
fa = '>' + header + '\n' + seq
out_fasta.write(fa + '\n')
def write_table():
out_line = '\t'.join([id_, str(bp), str(gc), error_type, str(repeat_length)])
out_table.write(out_line + '\n')
def check_direct_repeat(sequence):
fraction_bp = int(0.2 * bp)
fraction_seq = seq[:fraction_bp]
rx = f'({fraction_seq})' + regex_subs
fraction_count = regex.findall(rx, seq)
fraction_count = len(fraction_count)
if fraction_count >= 2:
return len(fraction_seq)
else:
return False
def check_circular(sequence, overlap_length):
seq_length = len(sequence)
half_seq_length = int(seq_length / 2)
beg_seq,end_seq = sequence[:50],sequence[-half_seq_length:]
beg_seq_in_end_seq_index = end_seq.rfind(beg_seq)
if beg_seq_in_end_seq_index != -1:
end_match = end_seq[beg_seq_in_end_seq_index:]
len_end_match = len(end_match)
beg_match = seq[:len_end_match]
if beg_match == end_match:
return len_end_match
else:
return False
def check_palindrome(sequence):
sequence_rc = reverse_complement(sequence)
if sequence == sequence_rc:
return True
substitutions = '2'
regex_subs = '{s<=' + substitutions + '}'
with xopen(out_fasta, 'w') as out_fasta, xopen(out_table, 'w') as out_table:
out_header = ['sequence_id', 'bp', 'gc', 'repeat_type', 'repeat_length']
out_header = '\t'.join(out_header)
out_table.write(out_header + '\n')
for header,seq in tqdm(SimpleFastaParser(xopen(fasta)), unit=' Sequences processed'):
header,seq = header.strip(),seq.strip()
seq = seq.upper()
id_ = header.split(' ')[0]
bp,gc = len(seq),round(GC(seq),2)
if args.circular_only:
circular_repeat_length = check_circular(seq,overlap_length)
if circular_repeat_length:
error_type = 'potential_circular'
repeat_length = circular_repeat_length
else:
continue
else:
try:
seq_rc = reverse_complement(seq)
except ValueError:
continue
if check_palindrome(seq):
error_type = 'full_palindrome'
repeat_length = bp
else:
direct_repeat_length = check_direct_repeat(seq)
if direct_repeat_length:
error_type = 'direct_repeat'
repeat_length = direct_repeat_length
else:
circular_repeat_length = check_circular(seq,overlap_length)
if circular_repeat_length:
error_type = 'potential_circular'
repeat_length = circular_repeat_length
else:
continue
write_fasta()
write_table()
|
<filename>ansible_bender/api.py
import logging
import os
import datetime
import sys
from ansible_bender.builder import get_builder
from ansible_bender.builders.base import BuildState
from ansible_bender.constants import OUT_LOGGER, OUT_LOGGER_FORMAT
from ansible_bender.core import AnsibleRunner
from ansible_bender.db import Database
from ansible_bender.exceptions import AbBuildUnsuccesful
from ansible_bender.utils import set_logging
logger = logging.getLogger(__name__)
out_logger = logging.getLogger(OUT_LOGGER)
class Application:
def __init__(self, debug=False, db_path=None, verbose=False, init_logging=True):
"""
:param debug: bool, provide debug output if True
:param db_path: str, path to json file where the database stores the data persistently
:param verbose: bool, print verbose output
:param init_logging: bool, set up logging if True
"""
if init_logging:
self.set_logging(debug=debug, verbose=verbose)
self.verbose = verbose
self.debug = debug
self.db = Database(db_path=db_path)
self.db_path = self.db.db_root_path
@staticmethod
def set_logging(debug=False, verbose=False):
""" configure logging """
if debug:
set_logging(level=logging.DEBUG)
elif verbose:
set_logging(level=logging.INFO)
set_logging(logger_name=OUT_LOGGER, level=logging.INFO, format=OUT_LOGGER_FORMAT,
handler_kwargs={"stream": sys.stdout})
else:
set_logging(level=logging.WARNING)
set_logging(logger_name=OUT_LOGGER, level=logging.INFO, format=OUT_LOGGER_FORMAT,
handler_kwargs={"stream": sys.stdout})
def build(self, build):
"""
build container image
:param build: instance of Build
"""
if not os.path.isfile(build.playbook_path):
raise RuntimeError("No such file or directory: %s" % build.playbook_path)
build.validate()
build.metadata.validate()
build.debug = self.debug
build.verbose = self.verbose
# we have to record as soon as possible
self.db.record_build(build)
try:
builder = self.get_builder(build)
builder.sanity_check()
# before we start messing with the base image, we need to check for its presence first
if not builder.is_base_image_present():
builder.pull()
build.pulled = True
builder.check_container_creation()
# let's record base image as a first layer
base_image_id = builder.get_image_id(build.base_image)
build.record_layer(None, base_image_id, None, cached=True)
a_runner = AnsibleRunner(build.playbook_path, builder, build, debug=self.debug)
# we are about to perform the build
build.build_start_time = datetime.datetime.now()
self.db.record_build(build, build_state=BuildState.IN_PROGRESS)
if not build.python_interpreter:
build.python_interpreter = builder.find_python_interpreter()
builder.create()
except Exception:
self.db.record_build(
None,
build_id=build.build_id,
build_state=BuildState.FAILED,
set_finish_time=True
)
raise
try:
try:
output = a_runner.build(self.db_path)
except AbBuildUnsuccesful as ex:
b = self.db.record_build(None, build_id=build.build_id,
build_state=BuildState.FAILED,
set_finish_time=True)
b.log_lines = ex.output.split("\n")
self.db.record_build(b)
# TODO: since this overwrites previous runs, we should likely add timestamp here
image_name = build.target_image + "-failed"
b.target_image = image_name
image_id = builder.commit(image_name)
b.final_layer_id = image_id
self.record_progress(b, None, image_id)
out_logger.info("Image build failed /o\\")
out_logger.info("The progress is saved into image '%s'", image_name)
raise
b = self.db.record_build(None, build_id=build.build_id, build_state=BuildState.DONE,
set_finish_time=True)
b.log_lines = output
# commit the final image and apply all metadata
b.final_layer_id = builder.commit(build.target_image)
if not b.is_layering_on():
self.record_progress(b, None, b.final_layer_id)
else:
self.db.record_build(b)
out_logger.info("Image '%s' was built successfully \\o/", build.target_image)
finally:
builder.clean()
def get_build(self, build_id=None):
"""
get selected build or latest build if build_id is None
:param build_id: str or None
:return: build
"""
if build_id is None:
return self.db.get_latest_build()
return self.db.get_build(build_id)
def get_logs(self, build_id=None):
"""
get logs for a specific build, if build_id is not, select the latest build
:param build_id: str or None
:return: list of str
"""
build = self.get_build(build_id=build_id)
return build.log_lines
def list_builds(self):
return self.db.load_builds()
def inspect(self, build_id=None):
"""
provide detailed information about the selected build
:param build_id: str or None
:return: dict
"""
build = self.get_build(build_id=build_id)
di = build.to_dict()
del di["log_lines"] # we have a dedicated command for that
del di["layer_index"] # internal info
return di
def push(self, target, build_id=None, force=False):
"""
push built image into a remote location, this method raises an exception when:
* the push failed or the image can't be found
* the build haven't finished yet
:param target: str, transport:details
:param build_id: id of the build or None
:param force: bool, bypass checks if True
:return: None
"""
build = self.get_build(build_id=build_id)
builder = self.get_builder(build)
builder.push(build, target, force=force)
def get_builder(self, build):
return get_builder(build.builder_name)(build, debug=self.debug)
def maybe_load_from_cache(self, content, build_id):
build = self.db.get_build(build_id)
builder = self.get_builder(build)
if not build.cache_tasks:
return
base_image_id, layer_id = self.record_progress(build, content, None)
builder.swap_working_container()
return layer_id
def get_layer(self, content, base_image_id):
"""
provide a layer for given content and base_image_id; if there
is such layer in cache store, return its layer_id
:param content:
:param base_image_id:
:return:
"""
return self.db.get_cached_layer(content, base_image_id)
def record_progress(self, build, content, layer_id, build_id=None):
"""
record build progress to the database
:param build:
:param content: str or None
:param layer_id:
:param build_id:
:return:
"""
if build_id:
build = self.db.get_build(build_id)
base_image_id = build.get_top_layer_id()
was_cached = False
if not layer_id:
# skipped task, it was cached
if content:
layer_id = self.get_layer(content, base_image_id)
builder = self.get_builder(build)
if not builder.is_image_present(layer_id):
logger.info("layer %s for content %s does not exist", layer_id, content)
layer_id = None
if not layer_id:
return None, None
was_cached = True
build.record_layer(content, layer_id, base_image_id, cached=was_cached)
self.db.record_build(build)
return base_image_id, layer_id
def create_new_layer(self, content, build):
builder = self.get_builder(build)
timestamp = datetime.datetime.now().strftime("%Y%M%d-%H%M%S")
image_name = "%s-%s" % (build.target_image, timestamp)
# buildah doesn't accept upper case
image_name = image_name.lower()
layer_id = builder.commit(image_name, print_output=False)
base_image_id, _ = self.record_progress(build, content, layer_id)
return image_name, layer_id, base_image_id
def cache_task_result(self, content, build):
""" snapshot the container after a task was executed """
image_name, layer_id, base_image_id = self.create_new_layer(content, build)
if not build.cache_tasks: # actually we could still cache results
return
self.db.save_layer(layer_id, base_image_id, content)
return image_name
def clean(self):
self.db.release()
|
<reponame>internetarchive/pdf_trio
import os
import json
import pytest
import responses
import pdf_trio
from fixtures import flask_client
def test_api_misc_routes(flask_client):
misc_routes = [
"/",
"/api/list",
]
for r in misc_routes:
resp = flask_client.get(r)
assert resp.status_code == 200
def test_api_classify_url(flask_client):
"""
Test Classify By URL
"""
headers = {"content-type": "application/json"}
url_map = {"urls": [
"https://arxiv.org/pdf/1607.01759.pdf",
"https://example.com/maps/foo.pdf",
]}
url_json = json.dumps(url_map)
json_response = flask_client.post(
"/classify/research-pub/url",
data=url_json,
headers=headers,
)
assert json_response.status_code == 200
# expecting json like: { "url1": 0.88, "url2": 0.23 }
print("verbatim response=%s" % (json_response.data))
predictions = json_response.get_json()["predictions"]
for k in predictions:
print("%.2f : %s" % (predictions[k], k))
assert type(predictions[url_map['urls'][0]]) == float
assert predictions[url_map['urls'][0]] != 0.5
assert predictions[url_map['urls'][1]] != 0.5
@responses.activate
def test_api_classify_pdf(flask_client):
test_pdf_path = 'tests/files/research/submission_363.pdf'
tf_bert_json = {'outputs': [[0.000686553773, 0.999313474]]}
tf_image_json = {'predictions': [[0.999999881, 1.45352288e-07]]}
# these are the version fetches
tf_bert_model_version = "asdf1234"
tf_image_model_version = "qwert9866"
responses.add(responses.GET, 'http://localhost:8501/v1/models/bert_model',
status=200, json={
"model_version_status": [
{
"state": "AVAILABLE",
"version": tf_bert_model_version,
}
]
})
responses.add(responses.GET, 'http://localhost:8501/v1/models/image_model',
status=200, json={
"model_version_status": [
{
"state": "AVAILABLE",
"version": tf_image_model_version,
}
]
})
# these are the actual classify calls
responses.add(responses.POST, 'http://localhost:8501/v1/models/bert_model:predict',
json=tf_bert_json, status=200)
responses.add(responses.POST, 'http://localhost:8501/v1/models/image_model:predict',
json=tf_image_json, status=200)
for mode in ('all', 'auto'):
with open(test_pdf_path, 'rb') as f:
form_data = {
"pdf_content": (test_pdf_path, f, "application/octet-stream")
}
response = flask_client.post(
"/classify/research-pub/" + mode,
data=form_data,
)
assert response.status_code == 200
assert response.json['status'] == "success"
# check that the responses aren't default values
assert response.json['ensemble_score'] != 0.5
assert response.json['linear_score'] != 0.5
assert response.json['versions']['git_rev']
assert response.json['versions']['pdftrio_version'] == pdf_trio.__version__
assert response.json['versions']['image_model'] == tf_image_model_version
assert response.json['versions']['bert_model'] == tf_bert_model_version
assert response.json['versions']['linear_model'] # from environ
assert response.json['versions']['models_date'] # from environ
assert len(responses.calls) == 4
|
<filename>xgboost_ray/util.py
from typing import Dict, Optional, List
import asyncio
import ray
from ray.util.annotations import DeveloperAPI
from ray.util.queue import Queue as RayQueue, Empty, Full
@DeveloperAPI
class Unavailable:
"""No object should be instance of this class"""
def __init__(self):
raise RuntimeError("This class should never be instantiated.")
class _EventActor:
def __init__(self):
self._event = asyncio.Event()
def set(self):
self._event.set()
def clear(self):
self._event.clear()
def is_set(self):
return self._event.is_set()
@DeveloperAPI
class Event:
def __init__(self, actor_options: Optional[Dict] = None):
actor_options = {} if not actor_options else actor_options
self.actor = ray.remote(_EventActor).options(**actor_options).remote()
def set(self):
self.actor.set.remote()
def clear(self):
self.actor.clear.remote()
def is_set(self):
return ray.get(self.actor.is_set.remote())
def shutdown(self):
if self.actor:
ray.kill(self.actor)
self.actor = None
# Remove after Ray 1.2 release.
if getattr(RayQueue, "shutdown", None) is not None:
from ray.util.queue import _QueueActor
else:
# Have to copy the class here so that we can subclass this for mocking.
# If we have the @ray.remote decorator, then we can't subclass it.
class _QueueActor:
def __init__(self, maxsize):
self.maxsize = maxsize
self.queue = asyncio.Queue(self.maxsize)
def qsize(self):
return self.queue.qsize()
def empty(self):
return self.queue.empty()
def full(self):
return self.queue.full()
async def put(self, item, timeout=None):
try:
await asyncio.wait_for(self.queue.put(item), timeout)
except asyncio.TimeoutError:
raise Full
async def get(self, timeout=None):
try:
return await asyncio.wait_for(self.queue.get(), timeout)
except asyncio.TimeoutError:
raise Empty
def put_nowait(self, item):
self.queue.put_nowait(item)
def put_nowait_batch(self, items):
# If maxsize is 0, queue is unbounded, so no need to check size.
if self.maxsize > 0 and len(items) + self.qsize() > self.maxsize:
raise Full(f"Cannot add {len(items)} items to queue of size "
f"{self.qsize()} and maxsize {self.maxsize}.")
for item in items:
self.queue.put_nowait(item)
def get_nowait(self):
return self.queue.get_nowait()
def get_nowait_batch(self, num_items):
if num_items > self.qsize():
raise Empty(f"Cannot get {num_items} items from queue of size "
f"{self.qsize()}.")
return [self.queue.get_nowait() for _ in range(num_items)]
# Remove after Ray 1.2 release.
@DeveloperAPI
class Queue(RayQueue):
def __init__(self, maxsize: int = 0,
actor_options: Optional[Dict] = None) -> None:
actor_options = actor_options or {}
self.maxsize = maxsize
self.actor = ray.remote(_QueueActor).options(**actor_options).remote(
self.maxsize)
def shutdown(self):
if getattr(RayQueue, "shutdown", None) is not None:
super(Queue, self).shutdown()
else:
if self.actor:
ray.kill(self.actor)
self.actor = None
@DeveloperAPI
class MultiActorTask:
"""Utility class to hold multiple futures.
The `is_ready()` method will return True once all futures are ready.
Args:
pending_futures (list): List of object references (futures)
that should be tracked.
"""
def __init__(self, pending_futures: Optional[List[ray.ObjectRef]] = None):
self._pending_futures = pending_futures or []
self._ready_futures = []
def is_ready(self):
if not self._pending_futures:
return True
ready = True
while ready:
ready, not_ready = ray.wait(self._pending_futures, timeout=0)
if ready:
for obj in ready:
self._pending_futures.remove(obj)
self._ready_futures.append(obj)
return not bool(self._pending_futures)
@DeveloperAPI
def get_current_node_resource_key() -> str:
"""Get the Ray resource key for current node.
It can be used for actor placement.
If using Ray Client, this will return the resource key for the node that
is running the client server.
"""
current_node_id = ray.get_runtime_context().node_id.hex()
for node in ray.nodes():
if node["NodeID"] == current_node_id:
# Found the node.
for key in node["Resources"].keys():
if key.startswith("node:"):
return key
else:
raise ValueError("Cannot found the node dictionary for current node.")
@DeveloperAPI
def force_on_current_node(task_or_actor):
"""Given a task or actor, place it on the current node.
If the task or actor that is passed in already has custom resource
requirements, then they will be overridden.
If using Ray Client, the current node is the client server node.
"""
node_resource_key = get_current_node_resource_key()
options = {"resources": {node_resource_key: 0.01}}
return task_or_actor.options(**options)
|
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import pandas as pd
from ..common.toolbox import embedding,correlation,decide_dim
from ..common.surrogate import twin_surrogate
from ..common.distance import calic_dist_l2
import tensorflow as tf
CCM_PARAM = {
"save_path": "./",
"emb_dim": 5,
"discard": 10,
}
def set_params(**kwargs):
if "save" in kwargs:
CCM_PARAM["save_path"] = kwargs["save"]
if "discard" in kwargs:
CCM_PARAM["discard"] = kwargs["discard"]
def xmap(x, k_dist, k_idx,emb_dim,tau,eps = 1e-5):
length = k_dist.shape[0]
x_tilde = np.empty((length))
for i in range(length):
u = np.exp(-k_dist[i, :] / (k_dist[i, 0] +eps))
w = u / np.sum(u)
x_tilde[i] = np.sum(w * x[ k_idx[i, :] +(emb_dim -1) * tau])
return x_tilde
def estimate(x, y, length=None, emb_dim=None,tau = 1, plot=False):
"""
method to estimate timeseries X from information of y.
t: the length of time you estimate, we estimate x[:t]
discard: discard some of the time series
note: please discard some of the time series if you use dynamic system like logisticmap.
Some of the initial time series should be discarded.
:return rho and estimated x
"""
x = np.array(x)
y = np.array(y)
if not emb_dim:
emb_dim = CCM_PARAM["emb_dim"]
emb = embedding(y, emb_dim,tau=tau)
if not length:
length = emb.shape[0]
emb = emb[:length]
rho, x_tilde = estimate_from_emb(x[:length+(emb_dim -1 ) * tau], emb,tau, plot=plot)
return rho, x_tilde
def estimate_from_emb(x, emb, tau = 1, plot=False):
length = emb.shape[0]
emb_dim = emb.shape[1]
dist_arr, dist_idx = calic_all_dist(emb)
k_dist, k_idx = k_nearest(dist_arr, dist_idx, length, emb_dim + 1)
x_tilde = xmap(x, k_dist, k_idx,emb_dim,tau)
if plot:
plt.scatter(x[(emb_dim-1)*tau:], x_tilde)
plt.show()
rho = correlation(x[(emb_dim-1)*tau:], x_tilde)
return rho, x_tilde
def estimate_using_bootstrap(x,y, length="auto", emb_dim=5,tau = 1):
"""
estimate x from y to judge x->y cause
:param x:
:param y:
:param length:
:param emb_dim:
:param tau:
:return:
"""
emb_y = embedding(y,emb_dim,tau)
max_length = len(emb_y)
if length =="auto":
length = max_length
rho, x_tilde = estimate_from_emb_random(x,emb_y,length,max_length,emb_dim,tau)
return rho, x_tilde
def estimate_from_emb_random(x,emb_y,length,max_length,emb_dim,tau):
idxs = np.random.choice(np.arange(max_length), length, replace=False)
y_selected = emb_y[idxs]
x_selected = x[idxs + (emb_dim - 1) * tau]
padding = np.empty((emb_dim - 1) * tau)
x = np.concatenate([padding, x_selected])
rho, x_tilde = estimate_from_emb(x, y_selected, tau)
return rho, x_tilde
def convergent(x, y, start = 0, length=None, emb_dim=None, tau = 1, min_length=None, estimation_freq=1,option = "linear"):
"""
see wheter rho increase with more use of time series. using x[start:start+length]
:param x:
:param y:
:param start:
:param length:
:param emb_dim:
:param min_length:
:return: rho_array
"""
x = np.array(x)
y = np.array(y)
x = x[start:]
y = y[start:]
if not emb_dim:
emb_dim = CCM_PARAM["emb_dim"]
if not min_length:
min_length = CCM_PARAM["discard"]
emb = embedding(y, emb_dim,tau)
dist_arr, dist_idx = calic_all_dist(emb)
length = emb.shape[0] if not length else length
L_array = np.arange(min_length, length, estimation_freq)
# it is meaningless to estimate x(i) with small i
rho_array = np.empty(L_array.shape[0])
for i, L in tqdm(enumerate(L_array)):
if option == "linear":
k_dist, k_idx = k_nearest(dist_arr, dist_idx, L, emb_dim + 1,option=option)
x_tilde = xmap(x, k_dist, k_idx, emb_dim, tau)
rho = correlation(x_tilde, x[(emb_dim - 1) * tau:(emb_dim - 1) * tau + L])
elif option == "random":
k_dist, k_idx, random_cand = k_nearest(dist_arr, dist_idx, L, emb_dim + 1,option=option)
x_tilde = xmap(x, k_dist, k_idx,emb_dim,tau) #xmap estimates x[idx_cand + offset] automatically(k_idx only includes idx in k_idx_cand
rho = correlation(x_tilde, x[random_cand+(emb_dim-1)*tau])
rho_array[i] = rho
return L_array, rho_array
def convergent_random(x, y, start = 0, length=None, emb_dim=None, tau = 1, min_length=None, estimation_freq=1, num=10):
rhos = []
for i in range(num):
L, rho = convergent(x, y, start=start, length=length, emb_dim=emb_dim, tau=tau, min_length=min_length, estimation_freq=estimation_freq,
option="random")
rhos.append(rho)
rhos = np.array(rhos).mean(axis=0)
return L,rhos
def convergent_emb(x,emb_y,length = None,min_length=None, estimation_freq=1,tau = 1,option = "linear"):
emb_dim = emb_y.shape[1]
min_length = min_length if not min_length else (emb_dim+1) * 2
length = emb_y.shape[0] if not length else length
dist_arr, dist_idx = calic_all_dist(emb_y)
L_array = np.arange(min_length, length, estimation_freq)
rho_array = np.empty(L_array.shape[0])
for i, L in enumerate(L_array):
if option == "linear":
k_dist, k_idx = k_nearest(dist_arr, dist_idx, L, emb_dim + 1)
x_tilde = xmap(x, k_dist, k_idx,emb_dim,tau)
rho = correlation(x_tilde, x[(emb_dim-1)*tau:(emb_dim-1)*tau+L])
elif option == "random":
k_dist, k_idx, random_cand = k_nearest(dist_arr, dist_idx, L, emb_dim + 1,option=option)
x_tilde = xmap(x, k_dist, k_idx,emb_dim,tau) #xmap estimates x[idx_cand + offset] automatically(k_idx only includes idx in k_idx_cand
rho = correlation(x_tilde, x[random_cand+(emb_dim-1)*tau])
rho_array[i] = rho
return L_array,rho_array
def convergent_random_emb(x, emb_y, length=None, emb_dim=None, tau = 1, min_length=None, estimation_freq=1,num=10):
rhos = []
for i in range(num):
L, rho = convergent_emb(x, emb_y, length=length, tau=tau, min_length=min_length, estimation_freq=estimation_freq,
option="random")
rhos.append(rho)
rhos = np.array(rhos).mean(axis=0)
return L,rhos
def convergence_plot(x, y, start=0, length=100, emb_dim = None, discard = None, save=False, sfx_xtoy="XtoY", sfx_ytox="YtoX",estimation_freq=1,tau = 1):
L_1,rho_1 = convergent(x, y, start, length, emb_dim = emb_dim,
min_length= discard, estimation_freq=estimation_freq, tau = tau)
L_2,rho_2 = convergent(y, x, start, length, emb_dim = emb_dim,
min_length= discard, estimation_freq=estimation_freq, tau = tau)
_plot_rho(L_1,rho_1,save,sfx_xtoy)
_plot_rho(L_2,rho_2,save,sfx_ytox)
def _plot_scatter(x, x_tilde):
plt.scatter(x, x_tilde)
plt.xlabel("real")
plt.ylabel("predict")
plt.show()
def _plot_rho(L_array, rho_array, save ,savesuffix):
y = pd.Series(rho_array)
y.fillna(method='ffill', limit=1)
plt.plot(L_array, y)
plt.xlabel("L")
plt.ylabel(r"$\rho$")
if save and savesuffix:
plt.savefig(CCM_PARAM["save_path"] + savesuffix + ".png")
plt.show()
def calic_all_dist(emb):
"""
caliculate all pair of distance and sort it with idx
:param emb:
:return:
"""
length = emb.shape[0]
dist_arr = calic_dist_l2(emb,emb)
dist_idx = np.zeros((length, length), dtype=int)
for i in range(length):
dist_idx[i, :] = np.argsort(dist_arr[i, :])
return dist_arr, dist_idx
def k_nearest(dist_arr, dist_idx, L, k,option="linear"):
k_idx = np.empty((L, k), dtype=int)
k_dist = np.empty((L, k), dtype=float)
L_max = dist_arr.shape[0]
assert L > k
# L = k means k-nearest includes same point thus inappropriate
if option == "linear":
for i in range(L):
idx = dist_idx[i, (dist_idx[i, :] < L) & (dist_idx[i,:] != i)]
idx = idx[0:k]
# excluding index-0 to avoid same-point index(d(i,i) = 0)
k_idx[i, :] = idx
k_dist[i, :] = dist_arr[i, idx]
return k_dist, k_idx
elif option == "random":
random_cand = np.sort(np.random.choice(L_max, L, replace=False))
for i in range(L):
idx_cand = dist_idx[random_cand[i], random_cand]
idx = idx_cand[idx_cand != random_cand[i]][:k]
# excluding index-0 to avoid same-point index(d(i,i) = 0)
k_idx[i, :] = idx
k_dist[i, :] = dist_arr[i, idx]
return k_dist, k_idx,random_cand
else:
raise ValueError
return None
def surrogate_test(x, y, emb_dim,tau=1, num=20, p=0.05,seed =None):
if seed:
np.random.seed(seed)
ys = twin_surrogate(y, emb_dim, delta="auto", num=num)
rhos = np.empty(len(ys))
for i, emb_y in tqdm(enumerate(ys)):
rho, x_tilde = estimate_from_emb(x, emb_y)
rhos[i] = rho
rho_true, x_tilde_true = estimate(x, y, emb_dim=emb_dim)
rhos.sort()
pvalue = 1 -len(rhos[rhos < rho_true]) / len(rhos)
return {
"p": pvalue,
"judge": pvalue < p,
"rhos": rhos,
"true": rho_true
}
def dual_surrogate_test(x, y, emb_dim, tau=1,num=20, p=0.05,seed =None):
if seed:
np.random.seed(seed)
xtoy = surrogate_test(x, y, emb_dim, tau=tau,num = num, p = p,seed=None)
ytox = surrogate_test(y, x, emb_dim, tau = tau,num = num, p = p,seed=None)
return {
"X->Y": xtoy,
"Y->X": ytox,
}
def regression_model(x_data,y_data,iter = 3001,plot = True,verbose = 0):
x_train = x_data.reshape(-1,1)
y_train = y_data.reshape(-1,1)
#graph
xt = tf.placeholder(tf.float32, [None,1])
yt = tf.placeholder(tf.float32, [None,1])
params = tf.Variable([np.max(y_train), 0.0, 0.0],"other_variable", dtype=tf.float32)
y = params[0] + tf.multiply(params[1],tf.exp(tf.multiply(tf.multiply(-1.0,params[2]),xt)))
loss = tf.reduce_sum(tf.square(y-yt))
train = tf.train.AdamOptimizer().minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
loss_log = []
prev_loss = np.inf
for step in range(iter):
loss_log.append( sess.run(loss,feed_dict={xt:x_train,yt:y_train}))
if step % 500 == 0:
loss_val = sess.run(loss,feed_dict={xt:x_train,yt:y_train})
params_val = sess.run(params)
if verbose:
print('Step: {}, Loss: {}, params: {}'.format(step,loss_val,params_val))
prev_loss = loss_val
sess.run(train,feed_dict={xt:x_train,yt:y_train})
loss_log = np.array(loss_log)
func = lambda a,b,c,x: a + b * np.exp(-c * x).reshape(-1,1)
y_ret = func(*params_val,x_train)
if plot:
plt.scatter(x_train,y_ret)
plt.scatter(x_train,y_train)
plt.show()
plt.plot(loss_log)
plt.show()
return loss_log,params_val
def ccm_test(x, y,emb_dim = "auto", l_0 = "auto", l_1 = "auto", tau=1, n=10,mean_num = 10,max_dim = 10):
"""
estimate x from y to judge x->y cause
:param x:
:param y:
:param l_0:
:param l_1:
:param emb_dim:
:param tau:
:param n:
:return:
"""
if emb_dim == "auto":
emb_dim = decide_dim(x,y)
if l_0 == "auto":
l_0 = int(np.ceil((len(x) - emb_dim + 1) * 0.1))
if l_1 == "auto":
l_1 = int(np.ceil((len(x) - emb_dim + 1) * 0.9))
ys = twin_surrogate(y, emb_dim,num=n)
raw_rhos = []
rhos = []
max_length = len(ys[0])
for i in tqdm(range(n)):
mean = 0
for j in range(mean_num):
rho_0, _ = estimate_using_bootstrap(x, y, length=l_0, emb_dim=emb_dim, tau=tau)
rho_1, _ = estimate_using_bootstrap(x, y, length=l_1, emb_dim=emb_dim, tau=tau)
rho_s_0, _ = estimate_from_emb_random(x, ys[i], length=l_0, emb_dim=emb_dim, tau=tau, max_length = max_length)
rho_s_1, _ = estimate_from_emb_random(x, ys[i], length=l_1, emb_dim=emb_dim, tau=tau, max_length = max_length)
raw_rhos.append([rho_0, rho_1, rho_s_0, rho_s_1])
mean += rho_1 -rho_0 -(rho_s_1 - rho_s_0 )
rhos.append(mean/mean_num)
rhos = np.array(rhos)
p = 1 - (len(rhos[rhos>0]) / n)
return {
"p_value" :p,
"rhos" :rhos,
"raw_rhos":raw_rhos
}
def ccm_regression_test(x,y,start = 0,length = 1000 ,surrogate_num = 10,emb_dim = 2,estimation_freq =1):
x = x[start:start+length]
y = y[start:start+length]
L_array, rho_array = convergent(x, y, emb_dim=emb_dim)
loss_true, param_true = regression_model(L_array, rho_array,plot=False)
loss_last = loss_true[-1]
ys = twin_surrogate(y, emb_dim=emb_dim, num=surrogate_num)
loss_surs = np.empty(surrogate_num)
param_data = []
print("Caliculate surrogate's loss")
for i,emb_y in tqdm(enumerate(ys)):
L_array, rho_array = convergent_emb(x, emb_y, length=len(emb_y), tau=1, min_length=10, estimation_freq=estimation_freq)
loss_log,params = regression_model(L_array, rho_array,plot = False)
loss_surs[i] = loss_log[-1]
param_data.append(params)
p = 1 -(len(loss_surs[loss_surs > loss_last])/surrogate_num)
return p,loss_last,param_true,loss_surs
def ccm_random_regression_test(x,y,start = 0,length = 1000 ,num = 10,surrogate_num = 10,emb_dim = 2,estimation_freq =1):
x = x[start:start+length]
y = y[start:start+length]
L_array_true, rho_array_true = convergent_random(x, y, emb_dim=emb_dim, num = num, estimation_freq=estimation_freq)
loss_true, param_true = regression_model(L_array_true, rho_array_true,plot=False)
loss_last = loss_true[-1]
ys = twin_surrogate(y, emb_dim=emb_dim, num=surrogate_num)
loss_surs = np.empty(surrogate_num)
param_data = []
print("Caliculate surrogate's loss")
for i,emb_y in tqdm(enumerate(ys)):
L_array, rho_array = convergent_random_emb(x, emb_y, tau=1, min_length=10, estimation_freq=estimation_freq,num = num)
loss_log,params = regression_model(L_array, rho_array,plot = False)
loss_surs[i] = loss_log[-1]
param_data.append(params)
param_data = np.array(param_data)
gammas = param_data[:,2]
convs = param_data[:,0]
gamma_test = 1 -(len(gammas[gammas < param_true[2]])/surrogate_num)
conv_test = 1 - (len(convs[convs < param_true[0]])/surrogate_num)
p = 1 -(len(loss_surs[loss_surs > loss_last])/surrogate_num)
return {
"p_value" :p,
"loss_last" :loss_last,
"loss_surs": loss_surs,
"param_true": param_true,
"param_data":param_data,
"L_array": L_array_true,
"rho_array":rho_array_true,
"gamma_test":gamma_test,
"conv_test":conv_test
}
|
<filename>rabbitmq_asynqp/rabbitmq_consumer.py<gh_stars>0
import asyncio
import asynqp
class EventConsumer:
def __init__(self, callback_fn, queue):
self.callback_fn = callback_fn
self.queue = queue
def __call__(self, msg):
"""
Whenever call is called same message content is processed and ack is sent to rabbit mq
:param msg: Rabbitmq message object
:return: None
"""
self.callback_fn(msg.json())
msg.ack()
def on_error(self, exc):
print("While consuming QUEUE:{queue} following error occurred: {exc}".
format(queue=self.queue, exc=exc))
def on_cancel(self):
print("Stopping consumer for QUEUE:{queue}".format(queue=self.queue))
async def connect_and_consume(rabbitmq_config, callback_fn, queue):
"""
Creates a new connection and starts Consumer. Do not use this directly.
:param rabbitmq_config: Rabbit mq config dict for connection
{'host': , 'port': , 'username': , 'password'}
:param queue: rabbit mq QUEUE name to be consumed (str)
:return: Rabbitmq connection object
"""
try:
connection = await asynqp.connect(rabbitmq_config['host'], rabbitmq_config['port'],
rabbitmq_config['username'], rabbitmq_config['password'])
except Exception as e:
print(e)
return None
try:
channel = await connection.open_channel()
amqp_queue = await channel.declare_queue(queue)
await amqp_queue.consume(EventConsumer(callback_fn, queue))
except asynqp.AMQPError as err:
print("Could not consume QUEUE:{queue}".format(queue=queue))
print(err)
await connection.close()
return None
return connection
async def reconnector(rabbitmq_config, callback_fn, queue, queue_settings=None):
"""
Reconnection to rabbit mq by internal polling. Don't use this directly.
:param rabbitmq_config: Rabbit mq config dict for connection
{'host': , 'port': , 'username': , 'password'}
:param event_queue: Asyncio QUEUE contain messages
:param queue: rabbit mq QUEUE name (str)
:return: None
"""
connection = None
if queue_settings is None:
queue_settings = {'reconnect_backoff_secs': 1, 'connection_check_polling_secs': 5}
connection_failures = 0
try:
while True:
if connection is None or connection.is_closed():
try:
connection = await connect_and_consume(rabbitmq_config, callback_fn, queue)
except (ConnectionError, OSError) as e:
connection = None
connection_failures += 1
print("Unable to establish connection and consume QUEUE:{queue}, failure count"
":{failures}".format(queue=queue, failures=connection_failures))
print(e)
if connection is None:
await asyncio.sleep(queue_settings['reconnect_backoff_secs']*connection_failures)
# poll connection state check
await asyncio.sleep(queue_settings["connection_check_polling_secs"])
except asyncio.CancelledError as err:
if connection is not None:
await connection.close()
print("Connection closed for consumer in QUEUE: {queue}".format(queue=queue))
def consume_message(rabbitmq_config, queue, callback_fn, queue_settings):
"""
Consumer creation for QUEUE.
:param app: flask app object
:param rabbitmq_config: Rabbit mq config dict for connection
{'host': , 'port': , 'username': , 'password'}
:param queue: rabbit mq QUEUE name (str)
:param callback_fn: consumer function to be used
:return: None
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Start main connecting and consuming task in the background
reconnect_task = loop.create_task(reconnector(rabbitmq_config, callback_fn, queue, queue_settings))
try:
loop.run_until_complete(reconnect_task)
except asyncio.CancelledError:
reconnect_task.cancel()
loop.run_until_complete(reconnect_task)
print("Asyncio coroutine cancelled")
finally:
for task in asyncio.tasks.all_tasks(loop):
task.cancel()
loop.close()
print("Consumer for QUEUE:{queue} stopped.".format(queue=queue))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.