text stringlengths 957 885k |
|---|
<filename>SBDDDOTParser.py
import itertools
import os
import re
import shutil
import time
from datetime import datetime
import pexpect as pexpect
from networkx import DiGraph, disjoint_union, set_node_attributes, Graph
from pexpect import EOF, TIMEOUT
import config
from BDDParser import BDDParser
from Benchmark import Benchmark
class SBDDDOTParser(BDDParser):
def __init__(self, benchmark: Benchmark):
super(SBDDDOTParser, self).__init__(benchmark)
if benchmark.model is not None:
self.dot_file = config.abc_path.joinpath('{}.dot'.format(benchmark.model))
else:
self.dot_file = config.abc_path.joinpath('{}.dot'.format(benchmark.name))
self.abc_file_path = config.abc_path.joinpath(self.benchmark.file_path.name)
self.bdd_show_time = 0
self.dot_bdd = None
if self.dot_file.exists():
os.remove(self.dot_file)
def _write_files(self):
print("\tStarted ABC")
file_name = self.benchmark.file_path.name
bdd_construct_start_time = time.time()
process = pexpect.spawn(config.abc_cmd, cwd=str(config.abc_path))
process.sendline('read "{}"; collapse;'.format(file_name))
try:
if config.time_limit_bdd is not None:
index = process.expect(['abc 03'], timeout=config.time_limit_bdd)
else:
index = process.expect(['abc 03'], timeout=config.time_limit_bdd)
bdd_construct_stop_time = time.time()
self.bdd_construct_time = bdd_construct_stop_time - bdd_construct_start_time
self.log += 'BDD construct time (s): {}\n'.format(self.bdd_construct_time)
if index == 0:
bdd_show_start_time = time.time()
abc_cmd = 'show_bdd -g;'
process.sendline(abc_cmd)
while not self.dot_file.exists():
pass
bdd_show_stop_time = time.time()
self.bdd_show_time = bdd_show_stop_time - bdd_show_start_time
time.sleep(1)
dot_bdd = self._read_file()
if self.dot_file.exists():
os.remove(self.dot_file)
while self.dot_file.exists():
pass
time.sleep(1)
print("\tStopped ABC")
return dot_bdd
except EOF:
raise Exception("\tABC EOF error.\n")
except TIMEOUT:
self.log += 'BDD construct time (s): {}\n'.format(config.time_limit_bdd)
self.log += 'Timeout'
config.log.add(self.get_log())
raise Exception("\tABC timeout error.\n")
def _read_file(self):
dot_content = ""
with open(self.dot_file, 'r') as file:
for line in file:
dot_content += line
return dot_content
def parse(self) -> Graph:
global output_variables
print("Started constructing SBDDs from benchmark")
print("\t{}".format(datetime.now()))
self.log += 'BDD type: SBDD\n'
# Copy the file from its current location to abc folder
shutil.copy(self.benchmark.file_path, self.abc_file_path)
# Generate BDD using ABC
dot_content = self._write_files()
print("\tStarted parsing SBDD")
bdd = DiGraph()
# Get the names of the nodes and the values on which they are mapped
raw_layers = list(re.findall(r'{\s*rank\s*=\s*same;([^}]+);\n}', dot_content))
layers = []
for raw_layer in raw_layers:
layers.append(raw_layer.replace(' ', '').replace('\n', '').replace('"', '').split(';'))
# Remove function output
layers = layers[:-1]
# Strip the 'rank' and 'node' objects
stripped_content = re.sub(r'{\s(?:rank|node)[^}]+}', '', dot_content)
node_variable = dict()
raw_output_variable_nodes = re.findall(r'"\s*([\w\d\[\]]+)\s*"\s->\s"([\w\d]+)"\s\[style\s=\ssolid\]',
stripped_content)
raw_output_variable_nodes = [(i[1], i[0]) for i in raw_output_variable_nodes]
output_variable_nodes = dict()
for raw_output_variable_node in raw_output_variable_nodes:
node = raw_output_variable_node[0]
output_variable = raw_output_variable_node[1]
if node in output_variable_nodes:
output_variable_nodes[node].append(output_variable)
else:
output_variable_nodes[node] = [output_variable]
# Find all edges defined by NODE -> NODE with optional [STYLE] parameter and
# leave nodes, defined by NODE [LABEL]
raw_edges = re.findall(
r'"([\w\d\s\[\]]+)"\s->\s"([\w\d\s]+)"(?:\s\[style\s=\s(solid|dashed)\])?|"([\w\d\s]+)"\s\[label\s=\s"(\d)"\]',
stripped_content)
# Build BDD (directed graph) by adding the nodes
for i in range(len(layers)):
layer = layers[i]
input_variable = layer[0]
nodes = layer[1:]
for node in nodes:
node_variable[node] = layer[0]
if node in output_variable_nodes:
output_vars = output_variable_nodes[node]
bdd.add_node(node, output_variables=output_vars, variable=input_variable, terminal=False, root=True)
else:
bdd.add_node(node, variable=input_variable, terminal=False, root=False)
has_terminal_zero = list(filter(lambda tup: tup[4] == '0', raw_edges))
has_terminal_one = list(filter(lambda tup: tup[4] == '1', raw_edges))
# Normal BDD
if len(has_terminal_one) > 0 and len(has_terminal_zero) > 0:
terminal_one = list(map(lambda tup: list(tup)[3], filter(lambda tup: tup[4] == '1', raw_edges)))[0]
terminal_zero = list(map(lambda tup: list(tup)[3], filter(lambda tup: tup[4] == '0', raw_edges)))[0]
bdd.add_node(terminal_one, variable='1', terminal=True, root=False)
bdd.add_node(terminal_zero, variable='0', terminal=True, root=False)
output_variables = list(itertools.chain(*output_variable_nodes.values()))
formatted_output_variables = [' {} '.format(output_variable) for output_variable in output_variables]
# Add the edges of the graph
for raw_edge in raw_edges:
if raw_edge[0] != '' and raw_edge[0] not in formatted_output_variables:
variable = node_variable[raw_edge[0]]
if raw_edge[2] != 'dashed':
bdd.add_edge(raw_edge[0], raw_edge[1], variable=variable, positive=True)
else:
bdd.add_edge(raw_edge[0], raw_edge[1], variable=variable, positive=False)
# We set the output variables for the terminal nodes
if terminal_one in output_variable_nodes:
output_vars = output_variable_nodes[terminal_one]
set_node_attributes(bdd, {terminal_one: output_vars}, "output_variables")
set_node_attributes(bdd, {terminal_one: True}, "root")
if terminal_zero in output_variable_nodes:
output_vars = output_variable_nodes[terminal_zero]
set_node_attributes(bdd, {terminal_zero: output_vars}, "output_variables")
set_node_attributes(bdd, {terminal_zero: True}, "root")
# Has positive terminal, but does not negative terminal: always true
elif len(has_terminal_one) > 0 and len(has_terminal_zero) == 0:
terminal_one = list(map(lambda tup: list(tup)[3], filter(lambda tup: tup[4] == '1', raw_edges)))[0]
output_variables = output_variable_nodes[terminal_one]
bdd.add_node('0', variable='0', terminal=True, root=False)
bdd.add_node(terminal_one, variable='1', terminal=True, root=True, output_variables=output_variables)
# Does not have positive terminal, but has negative terminal: always false
elif len(has_terminal_one) == 0 and len(has_terminal_zero) > 0:
terminal_zero = list(map(lambda tup: list(tup)[3], filter(lambda tup: tup[4] == '0', raw_edges)))[0]
output_variables = output_variable_nodes[terminal_zero]
bdd.add_node('1', variable='1', terminal=True, root=False)
bdd.add_node(terminal_zero, variable='0', terminal=True, root=True, output_variables=output_variables)
else:
Exception("BDD must at least have a positive or a negative terminal.")
directed_graph = bdd.copy(as_view=False)
self.benchmark_graph.add_graph(directed_graph)
self.directed_graph = disjoint_union(self.directed_graph, directed_graph)
bdd_log = ''
bdd_log += '\tOutput variables: {}\n'.format(', '.join(output_variables))
bdd_log += '\tNodes: {}\n'.format(len(bdd.nodes))
bdd_log += '\tEdges: {}\n'.format(len(bdd.edges))
bdd_log += '\tBDD show time (s): {}\n'.format(self.bdd_show_time)
self.log += bdd_log
for line in bdd_log.splitlines():
print("\t{}".format(line))
print("\tStopped parsing SBDD")
abc_dir = os.listdir(config.abc_path)
for file in abc_dir:
if file.endswith(".ps"):
os.remove(os.path.join(config.abc_path, file))
config.log.add(self.get_log())
print("Stopped constructing SBDD from benchmark")
print()
return self.benchmark_graph
|
<gh_stars>1-10
import librosa
import numpy as np
import matplotlib.pyplot as plt
import acoustid
import chromaprint
from fuzzywuzzy import fuzz
from rednoise_fun import rednoise, wave2stft, stft2power, get_mean_bandwidths, get_var_bandwidths, stft2wave, savewave, get_date, matchvol, get_pitch,get_pitch2, get_pitch_mean, pitch_sqrt, sound_index, get_energy, get_energy_mean
def get_fingpr(filename):
duration, fp_encoded = acoustid.fingerprint_file(filename)
fingerprint, version = chromaprint.decode_fingerprint(fp_encoded)
return(fingerprint)
def comp_fingpr(fingerprint1,fingerprint2):
similarity = fuzz.ratio(fingerprint1,fingerprint2)
return(similarity)
def wave2fingerprint(wavefile, target, noise):
y_stft, y, sr = wave2stft(wavefile)
y_power = stft2power(y_stft)
y_energy = get_energy(y_stft)
n_stft, ny, nsr = wave2stft(noise)
n_power = stft2power(n_stft)
n_energy = get_energy(n_stft)
n_energy_mean = get_energy_mean(n_energy)
t_stft, ty, tsr = wave2stft(target)
t_power = stft2power(t_stft)
t_energy = get_energy(t_stft)
npow_mean = get_mean_bandwidths(n_power)
#npow_mean = get_rms(n_power)
npow_var = get_var_bandwidths(n_power)
y_stftred = np.array([rednoise(npow_mean,npow_var,y_power[i],y_stft[i]) for i in range(y_stft.shape[0])])
voice_start,voice = sound_index(y_energy,start=True,rms_mean_noise = n_energy_mean)
if voice:
print(voice_start)
print(voice_start/len(y_energy))
start = voice_start/len(y_energy)
start_time = (len(y)*start)/sr
print("Start time: {} sec".format(start_time))
y_stftred_voice = y_stftred[voice_start:]
voicestart_samp = stft2wave(y_stftred_voice,len(y))
date = get_date()
savewave('./processed_recordings/rednoise_speechstart_{}.wav'.format(date),voicestart_samp,sr)
print('Removed silence from beginning of recording. File saved.')
else:
#handle no speech in recording, or too much background noise
return None
rednoise_samp = stft2wave(y_stftred_voice,len(y))
date = get_date()
savewave('./processed_recordings/rednoise_{}.wav'.format(date),rednoise_samp,sr)
print('Background noise reduction complete. File saved.')
print('Now matching volume to target recording.')
y_stftmatched = matchvol(t_power,y_power,y_stftred_voice)
matchvol_samp = stft2wave(y_stftmatched,len(y))
savewave('./processed_recordings/rednoise2_{}.wav'.format(date),matchvol_samp,sr)
print('Matched volume. File saved.')
#compare fingerprints of processed mimic and target sound
fp_target = get_fingpr(target)
fp_mimic = get_fingpr('./processed_recordings/rednoise2_{}.wav'.format(date))
fp_score = comp_fingpr(fp_mimic,fp_target)
return fp_score*10
def get_score(mimic_sound,mimic_noise):
mimic_sound = mimic_sound[0][1]
mimic_noise = mimic_noise[0][1]
score = mimic_sound - mimic_noise
score = int(score*100)
return score
|
import argparse
import getpass
import os
import sys
import crypto as Crypto
import keyfmt as KeyFormatting
import messaging as Messaging
import parsing as Parsing
'''Checking if encryption/decryption directories exist'''
frozen = getattr(sys,'frozen', None)
def check_decrypt_dir():
'''Checking if /decryption/ directory exists'''
if 'win32' in sys.platform or 'win64' in sys.platform:
if getattr(sys, 'frozen', False):
localadditionpath = os.path.dirname(sys.executable)
else:
localadditionpath = os.path.dirname(__file__)
decryptpath = localadditionpath + '/decrypted/'
norm_decrypt_path = os.path.normpath(decryptpath)
try:
os.makedirs(norm_decrypt_path)
except OSError:
if not os.path.isdir(norm_decrypt_path):
raise
else:
if getattr(sys, 'frozen', False):
localadditionpath = os.path.abspath(sys.executable)
else:
localadditionpath = os.path.abspath(__file__)
decryptpath = os.path.dirname(localadditionpath) + '/decrypted/'
norm_decrypt_path = os.path.normcase(decryptpath)
if not os.path.isdir(norm_decrypt_path):
os.makedirs(norm_decrypt_path)
def check_encrypt_dir():
'''Check if /encrypted/ directory exists'''
if 'win32' in sys.platform or 'win64' in sys.platform:
if getattr(sys, 'frozen', False):
localadditionpath = os.path.dirname(sys.executable)
else:
localadditionpath = os.path.dirname(__file__)
encryptpath = localadditionpath + '/encrypted/'
norm_encrypt_path = os.path.normcase(encryptpath)
try:
os.makedirs(norm_encrypt_path)
except OSError:
if not os.path.isdir(norm_encrypt_path):
raise
else:
if getattr(sys, 'frozen', False):
localadditionpath = os.path.abspath(sys.executable)
else:
localadditionpath = os.path.abspath(__file__)
encryptpath = os.path.dirname(localadditionpath) + '/encrypted/'
norm_encrypt_path = os.path.normcase(encryptpath)
if not os.path.isdir(norm_encrypt_path):
os.makedirs(norm_encrypt_path)
def check_keyring_files():
'''Check if /keyring/ directory and keyring files exists'''
if 'win32' in sys.platform or 'win64' in sys.platform:
if getattr(sys, 'frozen', False):
localadditionpath = os.path.dirname(sys.executable)
else:
localadditionpath = os.path.dirname(__file__)
keyring_path = localadditionpath + '/keyring/'
norm_keyring_path = os.path.normcase(keyring_path)
try:
os.makedirs(norm_keyring_path)
except OSError:
if not os.path.isdir(norm_keyring_path):
raise
open('keyring/master_keyring.dat', 'a+').close()
open('keyring/contact_keyring.dat', 'a+').close()
else:
if getattr(sys, 'frozen', False):
localadditionpath = os.path.abspath(sys.executable)
else:
localadditionpath = os.path.abspath(__file__)
keyring_path = os.path.dirname(localadditionpath) + '/keyring/'
norm_keyring_path = os.path.normcase(keyring_path)
if not os.path.isdir(norm_keyring_path):
os.makedirs(norm_keyring_path)
open(norm_keyring_path + 'master_keyring.dat', 'a+').close()
open(norm_keyring_path + 'contact_keyring.dat', 'a+').close()
'''Check if data folders exist'''
check_decrypt_dir()
check_encrypt_dir()
check_keyring_files()
'''Simple test for PRNG'''
if Crypto.run_test() is False:
sys.exit()
class PathException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class IDException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def resolve_masterkey_pass(your_id):
'''Resolves Master Key password using getpass'''
if KeyFormatting.key_locked(your_id) is True:
keypass = getpass.getpass(prompt = 'Enter password for key {}:'.format(your_id))
return keypass
elif KeyFormatting.key_locked(your_id) is False:
return None
def encrypt_message(args):
'''Encryption function'''
try:
text_message = read_file(args.msg)
check_masterkey_id(args.master_key)
check_contact_id(args.id)
keypass = <PASSWORD>masterkey_pass(your_id = args.master_key)
if not args.incognito:
m = Messaging.EncryptMessage(list(set(args.id)),
args.master_key,
keypass)
enc_msg, msg_name = m.encrypt_normal(text_message)
elif args.incognito:
m = Messaging.EncryptMessage(list(set(args.id)),
args.master_key,
keypass)
enc_msg, msg_name = m.encrypt_incognito(text_message)
if args.hide_ids:
m = Messaging.EncryptMessage(list(set(args.id)),
args.master_key,
keypass)
enc_msg, msg_name = m.encrypt_obfuscated(enc_msg)
if not args.binary:
enc_msg = Messaging.message_encode(enc_msg)
if not args.output:
print_message('Message:\n' + ('--------\n\n') + enc_msg)
elif args.output:
write_file(args.output, enc_msg)
print_message('Encrypted message to: ' + args.output)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
except PathException, (instance):
print_error(instance.parameter)
except IDException, (instance):
print_error(instance.parameter)
except IOError, (instance):
print_error('No such file or directory: {}'.format(args.msg))
def decrypt_message(args):
'''Main decryption function'''
try:
message_text = read_file(args.msg)
if not args.binary:
message = Messaging.message_decode(message_text)
elif args.binary:
message = message_text
decrypting(message)
except Messaging.DecryptException, (instance):
print_error(instance.parameter)
except Messaging.DecodeException, (instance):
print_error(instance.parameter)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
except Parsing.ParserException, (instance):
print_error(instance.parameter)
except PathException, (instance):
print_error(instance.parameter)
except IDException, (instance):
print_error(instance.parameter)
except IOError, (instance):
print_error('No such file or directory: {}'.format(args.msg))
def decrypting(message, keypass = None):
'''Actual decryption function'''
msg_type = Messaging.get_message_type(message)
if msg_type is 'normal':
id_list = Parsing.Parser().parse_rec_list(message)
your_id = KeyFormatting.pick_any_masterkey_from_id_list(id_list)
keypass = resolve_masterkey_pass(your_id)
m = Messaging.DecryptMessage(your_id, keypass)
text, status, info = m.decrypt_normal(message)
elif msg_type is 'incognito':
id_list = Parsing.Parser().parse_rec_list(message)
your_id = KeyFormatting.pick_any_masterkey_from_id_list(id_list)
keypass = resolve_masterkey_pass(your_id)
m = Messaging.DecryptMessage(your_id, keypass)
text, status, info = m.decrypt_incognito(message)
elif msg_type is 'obfuscated':
master_key_list = KeyFormatting.retrieve_masterkey_id_list()
for key_id in master_key_list:
keypass = resolve_masterkey_pass(your_id = key_id)
m = Messaging.DecryptMessage(key_id, keypass)
decrypted_payload = m.decrypt_obfuscated(message)
if not decrypted_payload is None:
decrypting(decrypted_payload, keypass)
return
elif decrypted_payload is None:
print_message('Failed to decrypt with {} '.format(key_id) +\
'Attempting to decrypt with next key...')
elif type is 'unknown':
print_error('Not an ECP message!')
return
print_message(status + '\n\n' + info)
if args.output:
write_file(args.output, text)
print_message('Decrypted message to: ' + args.output)
elif not args.output:
print_message('Message:\n' + ('--------\n\n') + text)
def write_file(path, data):
'''Checks if output directory exists and writes file to it'''
dir_path = os.path.dirname(os.path.abspath(path))
if not os.path.exists(dir_path):
e = 'No such directory: {}'.format(dir_path)
raise PathException(e)
with open(path, 'wb') as f:
f.write(data)
def write_signed(path, data):
'''Checks if output directory exists and writes UTF-8 encoded file to it'''
dir_path = os.path.dirname(os.path.abspath(path))
if not os.path.exists(dir_path):
e = 'No such directory: {}'.format(dir_path)
raise PathException(e)
with open(path, 'wb') as f:
f.write(data.encode('utf-8') + '\n')
def read_file(path):
'''Checks if directory exists and reads file'''
dir_path = os.path.dirname(os.path.abspath(path))
if not os.path.exists(dir_path):
e = 'No such directory: {}'.format(dir_path)
raise PathException(e)
with open(path, 'rb') as f:
data = f.read()
return data
def read_document(path):
'''Checks if directory exists and reads file in universal newline mode'''
dir_path = os.path.dirname(os.path.abspath(path))
if not os.path.exists(dir_path):
e = 'No such directory: {}'.format(dir_path)
raise PathException(e)
with open(path, 'rU') as f:
data = f.read()
return data
def sign_message(args):
'''Text signing function'''
try:
check_masterkey_id(args.master_key)
text_message = read_document(args.msg)
if not args.timestamp:
keypass = resolve_masterkey_pass(your_id = args.master_key)
m = Messaging.SignData(args.master_key, keypass)
signed_text = m.sign_clearsign(text_message)
elif args.timestamp:
keypass = resolve_masterkey_pass(your_id = args.master_key)
m = Messaging.SignData(args.master_key, keypass)
signed_text = m.sign_clearsign_t(text_message)
print_message('Signed message with key: ' + args.master_key)
if args.output:
write_signed(args.output, signed_text)
elif not args.output:
print_message('Message:\n' + ('--------\n\n') + signed_text)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
except PathException, (instance):
print_error(instance.parameter)
except IDException, (instance):
print_error(instance.parameter)
except IOError, (instance):
print_error('No such file or directory: {}'.format(args.msg))
def sign_file(args):
'''File signing function'''
try:
check_masterkey_id(args.master_key)
file_data = read_file(args.file)
if not args.timestamp:
keypass = resolve_masterkey_pass(your_id = args.master_key)
m = Messaging.SignData(args.master_key, keypass)
file_sig = m.sign_detached(file_data)
elif args.timestamp:
keypass = resolve_masterkey_pass(your_id = args.master_key)
m = Messaging.SignData(args.master_key, keypass)
file_sig = m.sign_detached_t(file_data)
print_message('Signed file with key: ' + args.master_key)
if args.output:
write_file(args.output, file_sig)
print_message('Wrote sigature to: ' + args.output)
elif not args.output:
print_message('Signature:\n' + ('----------\n\n') + file_sig)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
except PathException, (instance):
print_error(instance.parameter)
except IDException, (instance):
print_error(instance.parameter)
except IOError, (instance):
print_error('No such file or directory: {}'.format(args.msg))
def verify_message(args):
'''Text signature verification function'''
try:
message_text = read_file(args.msg)
data, sig = Messaging.msg_signature_decode(message_text)
sig_type = Messaging.get_signature_type(sig)
if sig_type is 'clearsign':
m = Messaging.VerifySignature()
status, info = m.verify_clearsigned(data, sig)
print_message(status + '\n\n' + info)
elif sig_type is 'clearsign_t':
m = Messaging.VerifySignature()
status, info = m.verify_clearsigned_t(data, sig)
print_message(status + '\n\n' + info)
elif sig_type is 'unknown':
print_error('Not an ECP signature!')
except Messaging.DecryptException, (instance):
print_error(instance.parameter)
except Messaging.DecodeException, (instance):
print_error(instance.parameter)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
except Parsing.ParserException, (instance):
print_error(instance.parameter)
except IOError, (instance):
print_error('No such file or directory: {}'.format(args.msg))
return
def verify_file(args):
'''File signature verification function'''
try:
data = read_file(args.file)
sig_file_data = read_file(args.sig)
sig = Messaging.file_signature_decode(sig_file_data)
sig_type = Messaging.get_signature_type(sig)
if sig_type is 'detached':
m = Messaging.VerifySignature()
status, info = m.verify_detached(data, sig)
print_message(status + '\n\n' + info)
elif sig_type is 'detached_t':
m = Messaging.VerifySignature()
status, info = m.verify_detached_t(data, sig)
print_message(status + '\n\n' + info)
elif sig_type is 'unknown':
print_error('Not an ECP signature!')
except Messaging.DecryptException, (instance):
print_error(instance.parameter)
except Messaging.DecodeException, (instance):
print_error(instance.parameter)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
except Parsing.ParserException, (instance):
print_error(instance.parameter)
except IOError, (instance):
print_error('No such file or directory: {}'.format(args.msg))
return
def print_masterkeys(args):
'''Prints users Master Keys'''
master_id_list = KeyFormatting.retrieve_masterkey_id_list()
s = u'Master keys:\n\n'
for id in master_id_list:
alias = KeyFormatting.retrieve_master_alias(id)
pub = KeyFormatting.retrieve_master_key(id)
s += u'[{}]\n Public key: {}\n Alias: {}\n\n'.format(id, pub, alias)
print_message(s)
def gen_masterkey(args):
'''Generates new Master Key'''
keypass = getpass.getpass(prompt = 'Enter password for a new Master Key:')
if not keypass:
keypass = None
new_key_id = KeyFormatting.generate_new_master_key(passwd = keypass)
print_message('Generated new key {}, edit alias for usability'.format(new_key_id))
def remove_masterkey_pass(args):
'''Removes password protection from a Master Key'''
try:
check_masterkey_id(args.master_key)
if KeyFormatting.key_locked(args.master_key) is True:
keypass_new = getpass.getpass(prompt = 'Enter password for a key {}: '.format(args.master_key))
KeyFormatting.remove_masterkey_pass(args.master_key, keypass_new)
elif KeyFormatting.key_locked(args.master_key) is False:
print_error('Cannot remove password - key is already unprotected')
return
print_message('Password has been removed for key: {}'.format(args.master_key))
except IDException, (instance):
print_error(instance.parameter)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
def set_masterkey_pass(args):
'''Sets password for Master Key'''
try:
check_masterkey_id(args.master_key)
if KeyFormatting.key_locked(args.master_key) is True:
keypass_old = getpass.getpass(prompt = 'Enter old password for a key {}: '.format(args.master_key))
keypass_new = getpass.getpass(prompt = 'Enter new password for a key {}: '.format(args.master_key))
KeyFormatting.change_privkey_pass(args.master_key, keypass_old, keypass_new)
elif KeyFormatting.key_locked(args.master_key) is False:
keypass_new = getpass.getpass(prompt = 'Enter new password for a key {}: '.format(args.master_key))
KeyFormatting.set_masterkey_pass(args.master_key, keypass_new)
print_message('New password has been set for key: {}'.format(args.master_key))
except IDException, (instance):
print_error(instance.parameter)
except KeyFormatting.KeyException, (instance):
print_error(instance.parameter)
def del_masterkey(args):
'''Removes chosen Master Keys'''
try:
check_masterkey_id(list(set(args.id)))
except IDException, (instance):
print_error(instance.parameter)
return
KeyFormatting.delete_master_key(list(set(args.id)))
print_message('Key(s) {} deleted'.format(', '.join(list(set(args.id)))))
def print_contacts(args):
'''Prints all Contacts'''
s = u'Contact keys:\n\n'
contact_id_list = KeyFormatting.retrieve_contactkey_id_list()
for id in contact_id_list:
alias = KeyFormatting.retrieve_contact_alias(id)
pub = KeyFormatting.retrieve_contact_key(id)
s += u'[{}]\n Public key: {}\n Alias: {}\n\n'.format(id, pub, alias)
print_message(s)
def add_contactkey(args):
'''Adds Contact public key to keyring'''
key_to_add = args.pubkey.replace(' ', '') # remove whitespace from public key string
validation = Crypto.check_pubkey(args.pubkey)
if validation is True:
new_key_raw = KeyFormatting.fmt_pub(args.pubkey, 'readable2raw')
new_key_id = KeyFormatting.form_key_id(new_key_raw)
if KeyFormatting.check_contact_identity(new_key_id) is True:
print_message('This key is already in key ring!')
elif KeyFormatting.check_contact_identity(new_key_id) is False:
KeyFormatting.add_new_contact_key(new_key_id, args.pubkey)
print_message('New contact added: {}, edit alias for usability'.format(new_key_id))
if args.alias:
KeyFormatting.edit_contact_alias(new_key_id, args.alias)
elif validation is False:
print_message('Invalid contact key!')
def del_contactkey(args):
'''Removes chosen Contacts'''
try:
check_contact_id(list(set(args.id)),)
except IDException, (instance):
print_error(instance.parameter)
return
KeyFormatting.delete_contact_key(list(set(args.id)),)
print_message('Key(s) {} deleted'.format(', '.join(list(set(args.id)))))
def edit_contactalias(args):
'''Changes contact alias'''
try:
check_contact_id(args.contact_id)
except IDException, (instance):
print_error(instance.parameter)
return
KeyFormatting.edit_contact_alias(args.contact_id, args.alias)
print_message('Changed alias for contact key {}'.format(args.contact_id))
def edit_masterkeyalias(args):
'''Changes Master Key alias'''
try:
check_masterkey_id(args.master_key)
except IDException, (instance):
print_error(instance.parameter)
return
KeyFormatting.edit_masterkey_alias(args.master_key, args.alias)
print_message('Changed alias for Master Key {}'.format(args.master_key))
def check_contact_id(ids_to_check):
'''Checks if chosen contacts ID is in the keyring'''
contact_id_list = KeyFormatting.retrieve_contactkey_id_list()
if isinstance(ids_to_check, basestring):
if not ids_to_check in contact_id_list:
e = 'No such key: {}'.format(ids_to_check)
raise IDException(e)
else:
for id in ids_to_check:
if not id in contact_id_list:
e = 'No such key: {}'.format(id)
raise IDException(e)
def check_masterkey_id(ids_to_check):
'''Checks if chosen Master Keys ID is in the keyring'''
master_id_list = KeyFormatting.retrieve_masterkey_id_list()
if isinstance(ids_to_check, basestring):
if not ids_to_check in master_id_list:
e = 'No such key: {}'.format(ids_to_check)
raise IDException(e)
else:
for id in ids_to_check:
if not id in master_id_list:
e = 'No such key: {}'.format(id)
raise IDException(e)
def print_message(msg):
'''Prints a string with newlines
Silent if no verbose option provided'''
if args.no_verbose is True:
pass
elif args.no_verbose is False:
print '\n\n' + msg + '\n\n'
def print_error(msg):
'''Prints error in consistent form
Silent if no verbose option provided'''
if args.no_verbose is True:
pass
elif args.no_verbose is False:
prog_name = os.path.basename(sys.argv[0])
print '{}: error: {}\n'.format(prog_name, msg)
arg_parser = argparse.ArgumentParser(description = 'ECP cryptographic tool')
subparsers = arg_parser.add_subparsers(help = 'Sub-command help')
parser_encrypt = subparsers.add_parser('encrypt',
help = 'Encrypt message')
parser_encrypt.add_argument('--master-key',
type = str,
required = True,
help = 'Specify master key to encrypt messages with')
parser_encrypt.add_argument('--msg',
type = str,
required = True,
help = 'Specify text message file to encrypt')
parser_encrypt.add_argument('--output',
type = str,
help = 'Specify output file')
parser_encrypt.add_argument('--contact-id',
dest = 'id',
nargs = '+',
type = str,
required = True,
help = 'Specify contacts to encrypt message for')
parser_encrypt.add_argument('--incognito',
action = 'store_true',
help = 'Do not include identifiers in encrypted message')
parser_encrypt.add_argument('--hide-ids',
action = 'store_true',
help = 'Obfuscate IDs in encrypted message')
parser_encrypt.add_argument('--binary',
action = 'store_true',
help = 'Do not MIME-encode encrypted message')
parser_encrypt.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_encrypt.set_defaults(func = encrypt_message)
parser_decrypt = subparsers.add_parser('decrypt',
help = 'Decrypt message')
parser_decrypt.add_argument('--msg',
type = str,
required = True,
help = 'Specify text message file to decrypt')
parser_decrypt.add_argument('--output',
type = str,
help = 'Specify output file for decrypted message')
parser_decrypt.add_argument('--binary',
action = 'store_true',
help = 'Decrypt binary (not encoded) message')
parser_decrypt.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_decrypt.set_defaults(func = decrypt_message)
parser_signmsg = subparsers.add_parser('sign-message',
help = 'Sign message')
parser_signmsg.add_argument('--master-key',
type = str,
required = True,
help = 'Specify master key to sign messages with')
parser_signmsg.add_argument('--msg',
type = str,
required = True,
help = 'Specify text document to sign')
parser_signmsg.add_argument('--output',
type = str,
help = 'Specify output file for signed message')
parser_signmsg.add_argument('--timestamp',
action = 'store_true',
help = 'Include timestamp in the file signature (reveals system clock)')
parser_signmsg.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_signmsg.set_defaults(func = sign_message)
parser_signfile = subparsers.add_parser('sign-file',
help = 'Sign file')
parser_signfile.add_argument('--master-key',
type = str,
required = True,
help = 'Specify master key to file messages with')
parser_signfile.add_argument('--file',
type = str,
required = True,
help = 'Specify file to sign')
parser_signfile.add_argument('--output',
type = str,
help = 'Specify output file for signature')
parser_signfile.add_argument('--timestamp',
action = 'store_true',
help = 'Include timestamp in the file signature (reveals system clock)')
parser_signfile.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_signfile.set_defaults(func = sign_file)
parser_verifymsg = subparsers.add_parser('verify-message',
help = 'Verify signed message')
parser_verifymsg.add_argument('--msg',
type = str,
required = True,
help = 'Specify text message file with signed message to verify')
parser_verifymsg.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_verifymsg.set_defaults(func = verify_message)
parser_verifyfile = subparsers.add_parser('verify-file',
help = 'Verify signed file')
parser_verifyfile.add_argument('--file',
type = str,
required = True,
help = 'Specify file to verify')
parser_verifyfile.add_argument('--signature',
dest = 'sig',
type = str,
required = True,
help = 'Specify file signature to verify')
parser_verifyfile.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_verifyfile.set_defaults(func = verify_file)
parser_genkey = subparsers.add_parser('gen-key',
help = 'Generate new private key (Master key)')
parser_genkey.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_genkey.set_defaults(func = gen_masterkey)
parser_showmasterkeys = subparsers.add_parser('master-keys',
help = 'Display all private keys (Master keys)')
parser_showmasterkeys.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_showmasterkeys.set_defaults(func = print_masterkeys)
parser_editmasterkey = subparsers.add_parser('set-key-alias',
help = 'Set an alias for a given private key (Master key)')
parser_editmasterkey.add_argument('--master-key',
type = str,
required = True,
help = 'Specify private key (Master keys)')
parser_editmasterkey.add_argument('--alias',
type = str,
required = True,
help = 'Specify alias string')
parser_editmasterkey.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_editmasterkey.set_defaults(func = edit_masterkeyalias)
parser_setkeypass = subparsers.add_parser('set-key-pass',
help = 'Set or change password for private key (Master key)')
parser_setkeypass.add_argument('--master-key',
type = str,
required = True,
help = 'Specify private key (Master keys)')
parser_setkeypass.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_setkeypass.set_defaults(func = set_masterkey_pass)
parser_remkeypass = subparsers.add_parser('del-key-pass',
help = 'Remove password from private key (Master key)')
parser_remkeypass.add_argument('--master-key',
type = str,
required = True,
help = 'Specify private key (Master keys)')
parser_remkeypass.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_remkeypass.set_defaults(func = remove_masterkey_pass)
parser_delkey = subparsers.add_parser('del-key',
help = 'Delete one or more private keys (Master keys)')
parser_delkey.add_argument('--master-key',
nargs = '+',
dest = 'id',
type = str,
required = True,
help = 'Specify one or more keys to delete')
parser_delkey.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_delkey.set_defaults(func = del_masterkey)
parser_addcontact = subparsers.add_parser('add-contact',
help = 'Add contact public key to the key ring')
parser_addcontact.add_argument('--public-key',
dest = 'pubkey',
required = True,
help = 'Public key to add to the key ring')
parser_addcontact.add_argument('--alias',
type = str,
help = 'Alias for added contact')
parser_addcontact.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_addcontact.set_defaults(func = add_contactkey)
parser_showcontacts = subparsers.add_parser('contacts',
help = 'Display all contact keys')
parser_showcontacts.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_showcontacts.set_defaults(func = print_contacts)
parser_editcontact = subparsers.add_parser('set-contact-alias',
help = 'Set an alias for a given contact key')
parser_editcontact.add_argument('--contact-id',
type = str,
required = True,
help = 'Specify contact key')
parser_editcontact.add_argument('--alias',
type = str,
required = True,
help = 'Specify alias string')
parser_editcontact.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_editcontact.set_defaults(func = edit_contactalias)
parser_delcontact = subparsers.add_parser('del-contact',
help = 'Delete one or more contact keys')
parser_delcontact.add_argument('--contact-id',
nargs = '+',
dest = 'id',
type = str,
required = True,
help = 'Specify one or more keys to delete')
parser_delcontact.add_argument('--no-verbose',
action = 'store_true',
help = 'Supress all notifications')
parser_delcontact.set_defaults(func = del_contactkey)
if len(sys.argv) < 2:
while(True):
try:
a = raw_input('{} > '.format(os.path.basename(sys.argv[0])))
args = arg_parser.parse_args(a.split())
args.func(args)
except SystemExit as e:
pass
except KeyboardInterrupt:
sys.exit()
else:
args = arg_parser.parse_args()
args.func(args)
|
import json
from model_mommy import mommy
from django.test.client import Client
from mock import *
from django.contrib.auth.models import User, Group
from survey.models.users import UserProfile
from survey.tests.base_test import BaseTest
from survey.forms.interviewer import InterviewerForm,\
USSDAccessForm, ODKAccessForm
from survey.models import EnumerationArea
from survey.models import LocationType, Location, Survey
from survey.models import Interviewer
from survey.models import USSDAccess
from django.forms.models import inlineformset_factory
from django.core.urlresolvers import reverse
class InterviewerViewTest(BaseTest):
def setUp(self):
self.client = Client()
self.user_without_permission = User.objects.create_user(
username='useless', email='<EMAIL>', password='<PASSWORD>')
self.raj = self.assign_permission_to(User.objects.create_user(
'demo5', '<EMAIL>', 'demo5'), 'can_view_interviewers')
self.assign_permission_to(self.raj, 'can_view_interviewers')
self.client.login(username='demo5', password='<PASSWORD>')
self.ea = EnumerationArea.objects.create(name="BUBEMBE", code="11-BUBEMBE")
self.country_type = LocationType.objects.create(name="country", slug="country")
self.country = self.country_type
self.district = LocationType.objects.create(name="Kampala", slug="kampala", parent=self.country_type)
self.uganda = Location.objects.create(name="Uganda", type=self.country)
self.kampala = Location.objects.create(name="Kampala", type=self.district, parent=self.uganda)
self.ea.locations.add(self.kampala)
self.survey = Survey.objects.create(name="survey A")
self.form_data = {
'name': 'Interviewer_1',
'date_of_birth': '1987-08-06',
'gender': 1,
'ea':self.ea
}
def test_unblock_interviwer_details(self):
investigator = Interviewer.objects.create(name="Investigator6",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
response = self.client.get(reverse('unblock_interviewer_page', kwargs={'interviewer_id': investigator.id}))
self.assertIn(response.status_code, [302, 200])
investigator = Interviewer.objects.get(name='Investigator6')
self.assertEquals(investigator.is_blocked, False)
# self.assertIn("Interviewer USSD Access successfully unblocked.", response.cookies['messages'].value)
# self.assertRedirects(response, expected_url=reverse('interviewers_page'), msg_prefix='')
def test_block_interviewer_details(self):
investigator = Interviewer.objects.create(name="Investigator5",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
response = self.client.get(reverse('block_interviewer_page', kwargs={'interviewer_id':investigator.id}))
self.assertIn(response.status_code, [302,200])
z3 = Interviewer.objects.get(name='Investigator5')
self.assertEquals(z3.is_blocked, True)
# self.assertIn("Interviewer USSD Access successfully blocked.", response.cookies['messages'].value)
# self.assertRedirects(response, expected_url=reverse('interviewers_page'))
def test_block_interviwer_when_no_such_interviewer_exist(self):
url = reverse('block_interviewer_page', kwargs={"interviewer_id": 99999})
response = self.client.get(url)
self.assertRedirects(response, expected_url=reverse('interviewers_page'))
self.assertIn("Interviewer does not exist.", response.cookies['messages'].value)
def test_block_interviwer_when_no_such_interviewer_exist(self):
url = reverse('unblock_interviewer_page', kwargs={"interviewer_id": 99999})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_get_interviewer_pages(self):
"""49, 73-75, 119, 122, 125, 139, 144-146, 154-155, 169-170, 192-193, 204-205, 216-217, 228-229
:return:
"""
interviewer = mommy.make(Interviewer, ea=self.ea)
url = reverse('interviewer_completion_summary', args=(interviewer.id, ))
response = self.client.get(url)
self.assertEquals(response.status_code, 302)
url = reverse("new_interviewer_page")
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertIn('form', response.context)
self.assertIn('ussd_access_form', response.context)
self.assertIn('odk_access_form', response.context)
url = reverse("show_interviewer_page", args=(interviewer.id, ))
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertIn('interviewer', response.context)
# def test_download_interviewers(self):
# response = self.client.get(reverse('download_interviewers'))
# # self.failUnlessEqual(response.status_code, 200)
# self.assertIn(response.status_code, [200,302])
# rtype = response.headers.get('content_type')
# self.assertIn('text/csv', rtype)
# res_csv = 'attachment; \
# filename="%s.csv"' % filename
# self.assertIn(response['Content-Disposition'], res_csv)
# def test_view_interviewer_details_when_no_such_interviewer_exists(self):
# investigator = Interviewer.objects.create(name="Investigator10",
# ea=self.ea,
# gender='1', level_of_education='Primary',
# language='Eglish', weights=0,date_of_birth='1987-01-01')
# self.client.get(reverse('view_interviewer_page', kwargs={"interviewer_id":investigator.id}))
# self.assertIn(response.status_code, [200,302])
# url = reverse(
# 'view_interviewer_page',
# kwargs={"interviewer_id": investigator.id})
# response = self.client.get(url)
# self.assertRedirects(response, expected_url=reverse('interviewers_page'))
# self.assertIn("Interviewer not found.", response.cookies['messages'].value)
def test_restricted_permission(self):
investigator = Interviewer.objects.create(name="Investigator",
ea=self.ea,
gender='1', level_of_education='Primary',
language='Eglish', weights=0,date_of_birth='1987-01-01')
self.assert_restricted_permission_for(reverse('interviewers_page'))
# url = reverse('view_interviewer_page', kwargs={"interviewer_id": investigator.id,"mode":'view'})
# self.assert_restricted_permission_for(reverse(url))
# url = reverse('block_interviewer_page', kwargs={"interviewer_id": investigator.id})
# self.assert_restricted_permission_for(reverse(url))
# url = reverse('unblock_interviewer_page', kwargs={"interviewer_id": investigator.id})
# self.assert_restricted_permission_for(reverse(url))
# url = reverse('download_interviewers')
# self.assert_restricted_permission_for(reverse(url)) |
"""
DB Model for Users table
and relevant junction tables
"""
import datetime
from flask_bcrypt import check_password_hash, generate_password_hash
from flask_jwt_extended import (create_access_token, create_refresh_token, decode_token, get_jwt_identity, get_raw_jwt,
jwt_refresh_token_required, jwt_required)
from flask_login import UserMixin
from sqlalchemy.sql import and_, select
from app.main import db, login_manager
# from app.main.models.comments import Comment
from app.main.models.movies import Movie
from app.main.models.posts import Post
class User(db.Model, UserMixin):
"""
Description of User model.
Columns
-----------
:id: int [pk]
:username: varchar(128) [not NULL]
:password: <PASSWORD>(<PASSWORD>) [not NULL]
:first_name: varchar(255) [not NULL]
:last_name: varchar(255)
:dob: date
:email: varchar(255) [not NULL]
:fb_handle: varchar(255)
:twitter_handle: varchar(255)
:bio: text
:occupation: varchar(255)
:profile_picture: int
:last_login: timestamp
:creation_time: timestamp
:is_verified: boolean
# Relationships
:watch_list: Relationship -> Movies (one to Many)
:bucket_list: Relationship -> Movies (one to Many)
"""
# Columns
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
first_name = db.Column(db.String(255), default="")
last_name = db.Column(db.String(255), default="")
dob = db.Column(db.DateTime)
email = db.Column(db.String(255), nullable=False)
fb_handle = db.Column(db.String(255))
twitter_handle = db.Column(db.String(255))
instagram_handle = db.Column(db.String(255))
profile_picture = db.Column(db.Integer)
bio = db.Column(db.Text)
favourites = db.Column(db.JSON)
last_login = db.Column(db.DateTime)
creation_time = db.Column(db.DateTime, default=datetime.datetime.now())
is_verified = db.Column(db.Boolean, default=False)
seen_list_titles = db.Column(db.JSON, default={})
seen_list_IDs = db.Column(db.JSON, default={})
bucket_list_titles = db.Column(db.JSON, default={})
bucket_list_IDs = db.Column(db.JSON, default={})
recommend_list_titles = db.Column(db.JSON, default={})
recommend_list_IDs = db.Column(db.JSON, default={})
# Relationships
movie_list = db.relationship('Movie', backref="user")
posts = db.relationship('Post', backref="user")
# comments = db.relationship('Comment', backref="user")
def __init__(self, username, password, email):
self.username = username
self.password = <PASSWORD>)
self.email = email
self.is_verified = False
self.profile_picture = 1
db.session.add(self)
db.session.commit()
@staticmethod
@login_manager.user_loader
def load_user(id):
return User.query.filter_by(id=id).first()
@staticmethod
@login_manager.request_loader
def load_user_from_request(request):
try:
token = request.headers.get('Authorization')
if token:
user_id = decode_token(token)
username = user_id['identity']
user = User.query.filter_by(username=username).first()
if user:
return user
except Exception as e:
print(e)
return None
def update_col(self, key, value):
setattr(self, key, value)
db.session.commit()
def check_password(self, password):
return check_password_hash(self.password, password)
def resetPassword(self, newPassword):
# Pass in a hashed password
self.password = generate_password_hash(newPassword)
db.session.commit()
def isVerified(self):
return self.is_verified
def setVerified(self):
self.is_verified = True
db.session.commit()
def add_to_movie_list(self, imdb_ID):
movie = Movie.query.filter_by(imdb_ID=imdb_ID).first()
self.movie_list.append(movie)
db.session.commit()
|
<gh_stars>0
import unittest
from resappserver.graph import *
def dummy_edge(data):
pass
def increment_a_edge(data):
data['a'] += 1
def increment_b_edge(data):
data['b'] += 1
def decrement_a_edge(data):
data['a'] -= 1
def dummy_predicate(data):
return True
def nonzero_predicate(data):
return True if data['a'] != 0 else False
def positiveness_predicate(data):
return True if data['a'] > 0 else False
def nonpositiveness_predicate(data):
return True if data['a'] <= 0 else False
def print_exception(exc_data, data):
print('exception data: {}'.format(exc_data))
print('current state of data: {}'.format(data))
class GraphGoodCheck(unittest.TestCase):
initial_conditions = range(-10, 10)
def test_trivial_serial_graph(self):
initial_state, term_state, correct_outputs = self._get_trivial_serial_graph([{'a': ic} for ic in self.initial_conditions])
self._run_graph(initial_state, ('a',), (-1, 0), correct_outputs)
def test_trivial_parallel_graph(self):
initial_state, term_state, correct_outputs = self._get_trivial_parallel_graph([{'a': ic, 'b': ic} for ic in self.initial_conditions])
self._run_graph(initial_state, ('a', 'b'), (-1, 0), correct_outputs)
def test_trivial_cycled_graph(self):
initial_state, term_state, correct_outputs = self._get_trivial_cycled_graph([{'a': ic} for ic in self.initial_conditions])
self._run_graph(initial_state, ('a',), (), correct_outputs)
def test_complex_graph_made_from_trivial_ones(self):
'''
serial graph + parallel graph + cycled graph
'''
s_1, s_2, correct_outputs = self._get_trivial_serial_graph([{'a': ic, 'b': ic} for ic in self.initial_conditions])
s_3, s_4, correct_outputs = self._get_trivial_parallel_graph(correct_outputs)
s_5, s_6, correct_outputs = self._get_trivial_cycled_graph(correct_outputs)
s_2.connect_to(s_3, edge=Edge(dummy_predicate, dummy_edge))
s_4.connect_to(s_5, edge=Edge(dummy_predicate, dummy_edge))
self._run_graph(s_1, ('a', 'b'), (-3, -2, -1, 0), correct_outputs)
def _get_trivial_serial_graph(self, initial_conditions):
'''
s_1 -> s_2 -> s_3,
p_12 = p_23 := a not 0
f_12 = f_23 := a + 1
'''
spp = SerialParallelizationPolicy()
oosp = OnlyOneSelectionPolicy()
s_1 = State('serial_s_1', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_2 = State('serial_s_2', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_3 = State('serial_s_3', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_1.connect_to(s_2, edge=Edge(nonzero_predicate, increment_a_edge))
s_2.connect_to(s_3, edge=Edge(nonzero_predicate, increment_a_edge))
#correct_outputs = [{'a': ic + 2} for ic in initial_conditions]
correct_outputs = []
for ic in initial_conditions:
ic['a'] += 2
correct_outputs.append(ic)
return s_1, s_3, correct_outputs
def _get_trivial_parallel_graph(self, initial_conditions):
'''
s_1 -> s_2 -> s_4
-> s_3 ->
p_12 = p_24 = p_13 = p_34 := a not 0
f_12 = f_24 := a + 1
f_13 = f_34 := b + 1
'''
spp = SerialParallelizationPolicy()
oosp = OnlyOneSelectionPolicy()
asp = AllSelectionPolicy()
s_1 = State('parallel_s_1', parallelization_policy=spp,
parallel_branches_selection_policy=asp)
s_2 = State('parallel_s_2', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_3 = State('parallel_s_3', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_4 = State('parallel_s_4', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_1.connect_to(s_2, edge=Edge(nonzero_predicate, increment_a_edge))
s_2.connect_to(s_4, edge=Edge(nonzero_predicate, increment_a_edge))
s_1.connect_to(s_3, edge=Edge(nonzero_predicate, increment_b_edge))
s_3.connect_to(s_4, edge=Edge(nonzero_predicate, increment_b_edge))
#correct_outputs = [{'a': ic + 2, 'b': ic + 2} for ic in self.initial_conditions]
correct_outputs = []
for ic in initial_conditions:
ic['a'] += 2
ic['b'] += 2
correct_outputs.append(ic)
return s_1, s_4, correct_outputs
def _get_trivial_cycled_graph(self, initial_conditions):
'''
s_1 -> s_2 -> s_3
<-
p_12 := True
p_23 := a > 0
p_23 := a <= 0
f_12 = f_23 = f_24 := a + 1
'''
spp = SerialParallelizationPolicy()
oosp = OnlyOneSelectionPolicy()
s_1 = State('cycled_s_1', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_2 = State('cycled_s_2', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_3 = State('cycled_s_3', parallelization_policy=spp,
parallel_branches_selection_policy=oosp)
s_1.connect_to(s_2, edge=Edge(dummy_predicate, increment_a_edge))
s_2.connect_to(s_3, edge=Edge(positiveness_predicate, increment_a_edge))
s_2.connect_to(s_1, edge=Edge(nonpositiveness_predicate, increment_a_edge))
# correct_outputs = [{'a': ic + 2} if ic >=0 else {'a': ic%2 + 2} for ic in self.initial_conditions]
correct_outputs = []
for ic in initial_conditions:
if ic['a'] >= 0:
ic['a'] += 2
else:
ic['a'] = ic['a']%2 + 2
correct_outputs.append(ic)
return s_1, s_3, correct_outputs
def _run_graph(self, initial_state, vars_to_initialize, invalid_ics, correct_outputs):
graph = Graph(initial_state)
for ic, correct_output in zip(self.initial_conditions, correct_outputs):
print('Doing ic = {}...'.format(ic))
gotten_output, okay = graph.run({var: ic for var in vars_to_initialize})
if ic in invalid_ics:
print(gotten_output['__EXCEPTION__'])
self.assertEqual('__EXCEPTION__' in gotten_output, True)
self.assertEqual(okay, False)
else:
self.assertEqual(okay, True)
self.assertEqual(gotten_output, correct_output)
if __name__ == '__main__':
unittest.main()
|
"""Collection of tests around log handling."""
import logging
import pytest
from cookiecutter.log import configure_logger
def create_log_records():
"""Test function, create log entries in expected stage of test."""
cookiecutter_logger = logging.getLogger('cookiecutter')
foo_logger = logging.getLogger('cookiecutter.foo')
foobar_logger = logging.getLogger('cookiecutter.foo.bar')
cookiecutter_logger.info('Welcome to Cookiecutter')
cookiecutter_logger.debug('Generating project from pytest-plugin')
foo_logger.info('Loading user config from home dir')
foobar_logger.debug("I don't know.")
foobar_logger.debug('I wanted to save the world.')
foo_logger.error('Aw, snap! Something went wrong')
cookiecutter_logger.debug('Successfully generated project')
@pytest.fixture
def info_messages():
"""Fixture. List of test info messages."""
return [
'INFO: Welcome to Cookiecutter',
'INFO: Loading user config from home dir',
'ERROR: Aw, snap! Something went wrong',
]
@pytest.fixture
def debug_messages():
"""Fixture. List of test debug messages."""
return [
"INFO cookiecutter: Welcome to Cookiecutter",
"DEBUG cookiecutter: Generating project from pytest-plugin",
"INFO cookiecutter.foo: Loading user config from home dir",
"DEBUG cookiecutter.foo.bar: I don't know.",
"DEBUG cookiecutter.foo.bar: I wanted to save the world.",
"ERROR cookiecutter.foo: Aw, snap! Something went wrong",
"DEBUG cookiecutter: Successfully generated project",
]
@pytest.fixture
def info_logger():
"""Fixture. Call cookiecutter logger setup with `info` debug level."""
return configure_logger(stream_level='INFO')
@pytest.fixture
def debug_logger():
"""Fixture. Call cookiecutter logger setup with `debug` debug level."""
return configure_logger(stream_level='DEBUG')
@pytest.fixture
def debug_file(tmp_path):
"""Fixture. Generate debug file location for tests."""
return tmp_path.joinpath('pytest-plugin.log')
@pytest.fixture
def info_logger_with_file(debug_file):
"""Fixture. Call cookiecutter logger setup with `info` debug level + `file`."""
return configure_logger(stream_level='INFO', debug_file=str(debug_file))
def test_info_stdout_logging(caplog, info_logger, info_messages):
"""Test that stdout logs use info format and level."""
[stream_handler] = info_logger.handlers
assert isinstance(stream_handler, logging.StreamHandler)
assert stream_handler.level == logging.INFO
create_log_records()
stream_messages = [
stream_handler.format(r)
for r in caplog.records
if r.levelno >= stream_handler.level
]
assert stream_messages == info_messages
def test_debug_stdout_logging(caplog, debug_logger, debug_messages):
"""Test that stdout logs use debug format and level."""
[stream_handler] = debug_logger.handlers
assert isinstance(stream_handler, logging.StreamHandler)
assert stream_handler.level == logging.DEBUG
create_log_records()
stream_messages = [
stream_handler.format(r)
for r in caplog.records
if r.levelno >= stream_handler.level
]
assert stream_messages == debug_messages
def test_debug_file_logging(caplog, info_logger_with_file, debug_file, debug_messages):
"""Test that logging to stdout uses a different format and level than \
the the file handler."""
[file_handler, stream_handler] = info_logger_with_file.handlers
assert isinstance(file_handler, logging.FileHandler)
assert isinstance(stream_handler, logging.StreamHandler)
assert stream_handler.level == logging.INFO
assert file_handler.level == logging.DEBUG
create_log_records()
assert debug_file.exists()
# Last line in the log file is an empty line
with debug_file.open() as f:
assert f.read().split('\n') == debug_messages + ['']
|
<filename>nova/scheduler/filters/vcpu_model_filter.py
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""Scheduler filter "VCpuModelFilter", host_passes() returns True when the host
CPU model is newer than or equal to the guest CPU model as specified in the
instance type.
The CPU models considered are currently limited to a subset of the Intel CPU
family. See nova.constants.VCPU_MODEL for the complete list.
"""
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova.scheduler import filters
from oslo_log import log as logging
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
class VCpuModelFilter(filters.BaseHostFilter):
"""Filter hosts that support the necessary virtual cpu model.
"""
# Instance type data does not change within a request
run_filter_once_per_request = True
def _is_compatible(self, host, guest):
"""Determine if the host CPU model is capable of emulating the guest CPU
model. For this determination we are currently only interested in
Intel based CPUs in the most recent family (from pentiumpro to
Haswell)
"""
try:
host_index = fields.CPUModel.ALL.index(host)
except ValueError:
# The host CPU model is not in our list. This is unexpected and
# likely indicates that support for a new processor needs to be
# added and validated.
return False
try:
guest_index = fields.CPUModel.ALL.index(guest)
except ValueError:
# The guest CPU model is not in our list. We can't tell whether
# we can support it or not.
return False
# if guest request IBRS cpu, but host does not support 'IBRS', then
# return false directly
if('IBRS' in guest and 'IBRS' not in host):
return False
return bool(guest_index <= host_index)
def _passthrough_host_passes(self, host_state, spec_obj):
hints = spec_obj.scheduler_hints
if not hints['host'] or not hints['node']:
LOG.info("(%(host)s, %(node)s) CANNOT SCHEDULE: "
"VCPU Passthrough guest migrating from unknown host",
{'host': host_state.host,
'node': host_state.nodename})
msg = ("Passthrough cpu model migrating from unknown host")
self.filter_reject(host_state, spec_obj, msg)
return False
ctxt = context.get_admin_context()
try:
source_node = objects.ComputeNode.get_by_host_and_nodename(ctxt,
hints['host'][0], hints['node'][0])
except exception.NotFound:
LOG.info("(%(host)s, %(node)s) CANNOT SCHEDULE: "
"No compute node record found for source %(h)s, %(n)s)",
{'host': host_state.host,
'node': host_state.nodename,
'h': hints['host'][0], 'n': hints['node'][0]})
msg = ("No compute node record found for %(host)s, %(node)s)" %
{'host': hints['host'][0], 'node': hints['node'][0]})
self.filter_reject(host_state, spec_obj, msg)
return False
source_model = self._get_cpu_model(source_node.cpu_info)
host_model = self._get_cpu_model(host_state.cpu_info)
same_cpu_features = self._cpus_have_same_features(host_state.cpu_info,
source_node.cpu_info)
key = 'vcpu_model'
if source_model != host_model or not same_cpu_features:
LOG.info("(%(host)s, %(node)s) CANNOT SCHEDULE: "
"different model or incompatible cpu features: "
"host %(key)s = %(host_model)s, "
"required = %(required)s",
{'host': host_state.host,
'node': host_state.nodename,
'key': key,
'host_model': host_model,
'required': source_model})
msg = ("Different VCPU model or cpu features. "
"Host %(host_model)s required %(req)s" %
{'host_model': host_model, 'req': source_model})
self.filter_reject(host_state, spec_obj, msg)
return False
LOG.info("(%(host)s, %(node)s) "
"PASS: host %(key)s = %(host_model)s, "
"required = %(required)s",
{'host': host_state.host,
'node': host_state.nodename,
'key': key,
'host_model': host_model,
'required': source_model})
return True
def _get_cpu_model(self, cpu_info):
"""Parses the driver CPU info structure to extract the host CPU model
"""
info = jsonutils.loads(cpu_info)
return info['model']
def _cpus_have_same_features(self, host_cpu_info, cpu_info):
info = jsonutils.loads(cpu_info)
host_info = jsonutils.loads(host_cpu_info)
return cmp(info['features'], host_info['features']) == 0
def _is_host_kvm(self, cpu_info):
info = jsonutils.loads(cpu_info)
if 'vmx' in info['features']:
return True
return False
def host_passes(self, host_state, spec_obj):
"""If the host CPU model is newer than or equal to the guest CPU model
that is specified in the flavor or image then this host is deemed
capable of instantiating this instance.
"""
flavor_model = spec_obj.flavor.extra_specs.get("hw:cpu_model")
image_model = spec_obj.image.properties.get("hw_cpu_model")
if (image_model is not None and flavor_model is not None and
image_model != flavor_model):
raise exception.ImageVCPUModelForbidden()
model = flavor_model or image_model
if not model:
LOG.debug("(%(host)s, %(node)s) PASS: no required vCPU model",
{'host': host_state.host,
'node': host_state.nodename})
return True
if model == 'Passthrough' and \
not self._is_host_kvm(host_state.cpu_info):
LOG.info("(%(host)s, %(node)s) CANNOT SCHEDULE: "
"Passthrough VCPU Model only available on 'kvm' hosts",
{'host': host_state.host,
'node': host_state.nodename})
msg = "Passthrough VCPU Model only available on 'kvm' hosts"
self.filter_reject(host_state, spec_obj, msg)
return False
task_state = spec_obj.scheduler_hints.get('task_state')
if model == 'Passthrough':
if task_state and ('scheduling' not in task_state):
return self._passthrough_host_passes(host_state, spec_obj)
key = 'vcpu_model'
host_model = self._get_cpu_model(host_state.cpu_info)
if self._is_compatible(host_model, model):
LOG.info("(%(host)s, %(node)s) "
"PASS: host %(key)s = %(host_model)s, "
"required = %(required)s",
{'host': host_state.host,
'node': host_state.nodename,
'key': key,
'host_model': host_model,
'required': model})
return True
else:
LOG.info("(%(host)s, %(node)s) CANNOT SCHEDULE: "
"host %(key)s = %(host_model)s, "
"required = %(required)s",
{'host': host_state.host,
'node': host_state.nodename,
'key': key,
'host_model': host_model,
'required': model})
msg = ("Host VCPU model %(host_model)s required %(required)s" %
{'host_model': host_model, 'required': model})
self.filter_reject(host_state, spec_obj, msg)
return False
|
<gh_stars>1-10
#!/usr/bin/env python
#
# system imports
#
import cPickle, Image, glob
from scipy.misc import fromimage
import numpy as np
#
# user defined imports
#
from DefinitionsAndUtils import *
from ImageProcessing import thresholdNDArray
from GraphAndHistogramUtilities import timeToIdx, toProbs, countQuantiles
from CurrentLM import ileNames, iles, applyCurrentLM
removeNonresponders = True
removeSaturated = True
useLogscaleCounts = True
applyPredThresholds = True
outputPath = "/home/mfenner/scipy_prep/data/output/pickle-probs/"
outputFile = outputPath + "predThresh-trimmedEnds-log-probs-by-OrgTime.pck"
#
# take allExpCnds and the file hierarchy as canonical
#
storedProbs = {}
#########################################
# forward to next big comment is necessary to get joined repetitions
#########################################
for cnd in allNonSerExpCnds:
expArray = np.empty((0,3), np.uint8)
for ser in series:
cnd["Series"] = ser
#
# determine the number of slices in this
# stack by "ls'ing" the directory
#
cnd["Slice"] = "*"
globpath = imageDataPath + fileNameFormat[cnd["Organelle"]] % cnd
# ls -l blah/*/blah.tif (brittle FIXME)
matchingFiles = glob.glob(globpath)
sliceValues = makeSliceValues(len(matchingFiles))
for sli in sliceValues:
cnd["Slice"] = sli
exampleFilename = fileNameFormat[cnd["Organelle"]] % cnd
# print exampleFilename,
if exampleFilename.rsplit("/",1)[1] in \
{"15m60xendser301.TIF",
"15m60xendser301.TIF",
"15m60xendser301.TIF",
"120m60xac17ser24.TIF",
"120m60xac17ser27.TIF"}:
# print ".....skipping:"
continue
currentImage = Image.open(imageDataPath+exampleFilename)
pixels = fromimage(currentImage).reshape((numImagePoints,3))
if applyPredThresholds:
# zero removal for quantile computation
pixels = pixels[np.any(pixels, 1)]
counts = {c:np.bincount(pixels[:,c],
minlength=256) for c in colors}
qs = {c:countQuantiles(counts[c], iles) for c in colors}
# clunky:
qDict = {"R8D": qs[R][0], "R9D": qs[R][1],
"G8D": qs[G][0], "G9D": qs[G][1],
"B8D": qs[B][0], "B9D": qs[B][1]}
predThreshes = dict((c, applyCurrentLM(qDict,
c)) for c in colorNames)
# print ",".join("%5.4f" % t for t in predThresholds.values())
thresholdNDArray(pixels, predThreshes, dropSaturated=True)
expArray = np.concatenate((expArray, pixels))
#########################################
# all to here is necessary to get joined series
#########################################
org = simplifyOrgStain(cnd["Organelle"], cnd["Stain"])
t = timeToIdx(cnd["Time"])
print org, t
# convert image stack to counts and add to histograms
for c1, c2 in colorPairs:
probs = toProbs(expArray[:,c1], expArray[:,c2],
removeNonresponders = removeNonresponders,
removeSaturated = removeSaturated,
useLogscaleCounts = useLogscaleCounts)
cnd = (org, c1, c2, t)
storedProbs[cnd] = probs
outputFile = open(outputFile, "wb")
cPickle.dump(storedProbs, outputFile, -1)
outputFile.close()
|
<filename>Train/eval_functions.py
import sys
import os
import keras
import tensorflow as tf
from keras.losses import kullback_leibler_divergence, categorical_crossentropy
from keras.models import load_model, Model
from argparse import ArgumentParser
from keras import backend as K
from Losses import * #needed!
from Metrics import * #needed!
import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from root_numpy import array2root
import pandas as pd
import h5py
from Losses import NBINS
#sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess = tf.InteractiveSession()
def loadModel(inputDir,trainData,model,LoadModel,sampleDatasets=None,removedVars=None,adv=False):
inputModel = '%s/KERAS_check_best_model.h5'%inputDir
from DeepJetCore.DataCollection import DataCollection
traind=DataCollection()
traind.readFromFile(trainData)
traind.dataclass.regressiontargetclasses = range(0,NBINS)
print(traind.getNRegressionTargets())
if(LoadModel):
evalModel = load_model(inputModel, custom_objects = global_loss_list)
shapes=traind.getInputShapes()
else:
shapes=traind.getInputShapes()
train_inputs = []
for s in shapes:
train_inputs.append(keras.layers.Input(shape=s))
modelargs = {}
if adv:
modelargs.update({'nRegTargets':NBINS,
'discTrainable': True,
'advTrainable':True})
evalModel = model(train_inputs,traind.getNClassificationTargets(),traind.getNRegressionTargets(),sampleDatasets,removedVars,**modelargs)
evalModel.load_weights(inputModel)
return evalModel
def evaluate(testd, trainData, model, outputDir, storeInputs=False, adv=False):
NENT = 1 # Can skip some events
filelist=[]
i=0
for s in testd.samples:
#for s in testd.samples[0:1]:
spath = testd.getSamplePath(s)
filelist.append(spath)
h5File = h5py.File(spath)
f = h5File
#features_val = [h5File['x%i_shape'%j][()] for j in range(0, h5File['x_listlength'][()][0])]
features_val = [h5File['x%i'%j][()] for j in range(0, h5File['x_listlength'][()][0])]
#features_val=testd.getAllFeatures()
predict_test_i = model.predict(features_val)
labels_val_i = h5File['y0'][()][::NENT,:]
spectators_val_i = h5File['z0'][()][::NENT,0,:]
if storeInputs: raw_features_val_i = h5File['z1'][()][::NENT,0,:]
if i==0:
predict_test = predict_test_i
labels_val = labels_val_i
spectators_val = spectators_val_i
if storeInputs: raw_features_val = raw_features_val_i
else:
predict_test = np.concatenate((predict_test,predict_test_i))
labels_val = np.concatenate((labels_val, labels_val_i))
spectators_val = np.concatenate((spectators_val, spectators_val_i))
if storeInputs: raw_features_val = np.concatenate((raw_features_val, raw_features_val_i))
i+=1
# Value
#labels_val=testd.getAllLabels()[0][::NENT,:]
#features_val=testd.getAllFeatures()[0][::NENT,0,:]
#spectators_val = testd.getAllSpectators()[0][::NENT,0,:]
#if storeInputs: raw_features_val = testd.getAllSpectators()[-1][::NENT,0,:]
# Labels
print testd.dataclass.branches
feature_names = testd.dataclass.branches[1]
spectator_names = testd.dataclass.branches[0]
#truthnames = testd.getUsedTruth()
from DeepJetCore.DataCollection import DataCollection
traind=DataCollection()
traind.readFromFile(trainData)
truthnames = traind.getUsedTruth()
# Store features
print "Coulmns", spectator_names
df = pd.DataFrame(spectators_val, columns = spectator_names)
if storeInputs:
for i, tname in enumerate(feature_names):
df[tname] = raw_features_val[:,i]
# Add predictions
print truthnames
print predict_test.shape
for i, tname in enumerate(truthnames):
df['truth'+tname] = labels_val[:,i]
#print "Mean 0th label predict predict of ", tname, np.mean(predict_test[:,0]), ", Stats:", np.sum(labels_val[:,i]), "/", len(labels_val[:,i])
if adv:
df['predict'+tname] = predict_test[:,NBINS+i]
for j in range(NBINS):
df['predict_massbin%i'%j] = predict_test[:,j+i]
else:
df['predict'+tname] = predict_test[:,i]
print "Testing prediction:"
print "Total: ", len(predict_test[:,0])
for lab in truthnames:
print lab, ":", sum(df['truth'+lab].values)
df.to_pickle(outputDir+'/output.pkl') #to save the dataframe, df to 123.pkl
return df
print "Finished storing dataframe"
|
<gh_stars>0
from os import getenv
from traceback import TracebackException
import discord
from discord.ext import commands
from mogirin import (
TicketAlreadyCollected,
TicketCollector,
TicketNumberNotFound,
find_ticket_number,
)
MOGIRI_CHANNEL_ID = int(getenv("MOGIRI_CHANNEL_ID"))
ATTENDEE_ROLE_ID = int(getenv("ATTENDEE_ROLE_ID"))
bot = commands.Bot(command_prefix="/")
collector = TicketCollector(getenv("SPREADSHEET_ID"))
class NeedToNotifyMonitors(Exception):
def __init__(self, message):
self.message = message
@bot.event
async def on_command_error(ctx, error):
original_error = getattr(error, "original", error)
error_message = "".join(
TracebackException.from_exception(original_error).format()
)
await ctx.send(error_message)
async def greet(channel_id=int(getenv("LOGGING_CHANNEL_ID"))):
channel = bot.get_channel(channel_id)
await channel.send("[INFO] もぎりん、起動しました")
async def notify(
message,
channel_id=int(getenv("LOGGING_CHANNEL_ID")),
guild_id=int(getenv("GUILD_ID")),
role_id=int(getenv("STAFF_ROLE_ID")),
):
channel = bot.get_channel(channel_id)
guild = bot.get_guild(guild_id)
staff_role = guild.get_role(role_id)
await channel.send(f"{staff_role.mention} {message}")
@bot.event
async def on_ready():
await greet()
@bot.command()
async def ping(ctx):
await ctx.send("pong")
async def collect_ticket(
ticket_number: str, member: discord.Member, role: discord.Role
) -> str:
try:
await collector.collect(ticket_number, member, role)
except TicketNumberNotFound:
return (
f"LookupError: Couldn't find your number {ticket_number!r}.\n"
"If the number is correct, try again in a few hours "
"(Sorry, there is a lag in synchronizing participant information)."
)
except TicketAlreadyCollected:
message = (
f"RuntimeError: the ticket {ticket_number!r} is already used.\n"
"If the number is correct, please contact staff "
"with `@2021-staff` mention."
)
raise NeedToNotifyMonitors(message)
else:
return "Accepted! Welcome to PyCon JP 2021 venue!"
@bot.event
async def on_message(message):
if message.channel.id != MOGIRI_CHANNEL_ID:
# もぎり用テキストチャンネル以外のmessageには反応しない
return
if message.author.bot:
# botからのmessageには反応しない (Issue #8)
return
if bot.user not in message.mentions:
# @mogirin メンションがないmessageには反応しない
return
attendee_role = message.guild.get_role(ATTENDEE_ROLE_ID)
if attendee_role in message.author.roles:
# すでにattendeeロールが付いているユーザからのmessageには反応しない
return
ticket_number = find_ticket_number(message.clean_content)
needs_to_notify = False
if ticket_number is None:
reply_message = (
f"ValueError: ticket number is not included in your message.\n"
"Please input numeric ticket number like `@mogirin 1234567`."
)
else:
try:
reply_message = await collect_ticket(
ticket_number, message.author, attendee_role
)
except NeedToNotifyMonitors as ex:
needs_to_notify = True
reply_message = ex.message
await message.channel.send(f"{message.author.mention} {reply_message}")
if needs_to_notify:
monitor_message = (
f"「{message.author.display_name}」氏のもぎりに以下のエラーを送出しました\n"
f"{reply_message}"
)
await notify(monitor_message)
token = getenv("DISCORD_BOT_TOKEN")
bot.run(token)
|
#!/usr/bin/env python3
# Creates a csv file named se_corpus-yyyy-mm-dd.csv in working directory
import csv
import requests
import re
import sys
REPO_BASE_URL = "https://api.github.com/users/standardebooks/repos?per_page=100&page="
RAW_FILE_URL_STEM = "https://raw.github.com/"
PATH_TO_CONTENT_OPF = "/master/src/epub/content.opf"
PATH_TO_COLOPHON = "/master/src/epub/text/colophon.xhtml"
# Strings used as search patterns in content.opf and colophon.xhtml:
FINDS_TITLE = 'meta property="file-as" refines="#title'
FINDS_SE_SUBJECT = 'meta property="se:subject"'
FINDS_DESCRIPTION = 'dc:description id="description"'
FINDS_AUTHOR = 'meta property="file-as" refines="#author"'
FINDS_ARTIST = 'dc:contributor id="artist"'
FINDS_PRODUCER = 'meta property="file-as" refines="#producer-1"'
FINDS_PUB_DATE = "<dc:date>"
FINDS_REV_DATE = 'meta property="dcterms:modified"'
FINDS_PAINTING = 'epub:type="se:name.visual-art.painting"'
# Dictionary keys:
KEY_TITLE = "Title"
KEY_SE_SUBJECT = "SE subject"
KEY_DESCRIPTION = "Description"
KEY_AUTHOR = "Author"
KEY_ARTIST = "Artist"
KEY_PRODUCER = "Producer"
KEY_PUB_DATE = "Pub date"
KEY_REV_DATE = "Rev date"
KEY_PAINTING = "Painting"
# Launch flags and args:
FLAG_VERBOSE = False
FLAG_VERY_VERBOSE = False
FLAG_QUIET = False
ARG_FILEPATH = ""
# Firing order: functions fire in this order:
# parse_args()
# save_csv_to_path
# complete_corpus_list()
# get_repo_urls()
# dict_from_repo_url()
# content_opf_from_repo_url()
# dict_from_content_opf_string()
# strip_and_remove_tags()
# reformat_time_string()
# colophon_from_repo_url()
# painting_from_colophon_string()
# write_csv_from_list_to_path()
# Returns a list of repo urls
def get_repo_urls():
output_info_for_type("Looking for repos...", "", False)
page = 0
rv = []
last_page = False
while last_page == False:
page += 1
full_url = REPO_BASE_URL + str(page)
output_info_for_type("Checking " + full_url, "v", False)
obj = requests.get(full_url)
text = obj.text
if len(text) < 100:
last_page = True
else:
lines = text.split(",")
for line in lines:
if '"full_name":' in line:
# line looks like: "full_name":"standardebooks/a-a-milne_the-red-house-mystery"
full_name = line[13:-1]
repo_url = RAW_FILE_URL_STEM + full_name
rv.append(repo_url)
output_info_for_type("Found repo at " + repo_url, "vv", False)
output_info_for_type("Repos found: " + str(len(rv)), "v", False)
return rv
# Return text of content.opf page
def content_opf_from_repo_url(repo_url):
rv = ""
content_opf_url = repo_url + PATH_TO_CONTENT_OPF
content_opf_resp = requests.get(content_opf_url)
if content_opf_resp.ok == True:
rv = content_opf_resp.text
output_info_for_type("Found content.opf for " + repo_url, "vv", False)
else:
output_info_for_type("No content.opf found for " + repo_url, "", False)
return rv
# Returns text of colophon.xhtml
def colophon_from_repo_url(repo_url):
rv = ""
colophon_url = repo_url + PATH_TO_COLOPHON
colophon_resp = requests.get(colophon_url)
if colophon_resp.ok == True:
rv = colophon_resp.text
output_info_for_type("Found colophon.xhtml for " + repo_url, "vv", False)
else:
output_info_for_type("No colophon.xhtml found for " + repo_url, "", False)
return rv
# Returns a dict that has all data except name of painting
def dict_from_repo_url(repo_url):
content_opf_string = content_opf_from_repo_url(repo_url)
rv = dict_from_content_opf_string(content_opf_string)
return rv
def strip_and_remove_tags(s):
s = s.strip()
return re.sub('<[^<]+?>', '', s)
def reformat_time_string(time_string):
# Times comes in like this: 2021-05-12T22:13:51Z
return time_string[:10]
def dict_from_content_opf_string(s):
rv = {KEY_TITLE:'', KEY_SE_SUBJECT:'', KEY_DESCRIPTION:'', KEY_AUTHOR:'', KEY_ARTIST:'', KEY_PRODUCER:'', KEY_PUB_DATE:'', KEY_REV_DATE:''}
lines = s.split("\n")
for line in lines:
if FINDS_TITLE in line:
rv[KEY_TITLE] = strip_and_remove_tags(line)
elif FINDS_SE_SUBJECT in line:
rv[KEY_SE_SUBJECT] = strip_and_remove_tags(line)
elif FINDS_DESCRIPTION in line:
rv[KEY_DESCRIPTION] = strip_and_remove_tags(line)
elif FINDS_AUTHOR in line:
rv[KEY_AUTHOR] = strip_and_remove_tags(line)
elif FINDS_ARTIST in line:
rv[KEY_ARTIST] = strip_and_remove_tags(line)
elif FINDS_PRODUCER in line:
rv[KEY_PRODUCER] = strip_and_remove_tags(line)
elif FINDS_PUB_DATE in line:
pub_date = strip_and_remove_tags(line)
rv[KEY_PUB_DATE] = reformat_time_string(pub_date)
elif FINDS_REV_DATE in line:
rev_date = strip_and_remove_tags(line)
rv[KEY_REV_DATE] = reformat_time_string(rev_date)
return rv
def painting_from_colophon_string(s):
rv = ""
lines = s.split("\n")
for line in lines:
if FINDS_PAINTING in line:
rv = strip_and_remove_tags(line)
# Remove trailing period:
rv = rv[:-1]
output_info_for_type("Found painting in colophon.xhtml", "vv", False)
break
if rv == "":
output_info_for_type("No painting found in colophon", "v", False)
return rv
def complete_corpus_list():
rv = []
repo_urls = get_repo_urls()
for repo_url in repo_urls:
# Create a dict for data from content.opf:
repo_dict = dict_from_repo_url(repo_url)
# Add painting name from colophon:
colophon = colophon_from_repo_url(repo_url)
painting = painting_from_colophon_string(colophon)
repo_dict[KEY_PAINTING] = painting
rv.append(repo_dict)
output_info_for_type("Got metadata for " + repo_url, "vv", False)
return rv
def write_csv_from_list_to_path(corpus_list, filepath):
# This array sets the column order:
field_names = [KEY_TITLE, KEY_AUTHOR, KEY_SE_SUBJECT, KEY_ARTIST, KEY_PAINTING, KEY_PRODUCER, KEY_PUB_DATE, KEY_REV_DATE, KEY_DESCRIPTION]
# To get a header on the first line of the csv, prepend a dict where key and value are the same.
header_dict = {}
for field_name in field_names:
header_dict[field_name] = field_name
corpus_list.insert(0, header_dict)
output_info_for_type("Writing csv file.", "vv", True)
with open(filepath, 'w') as csv_file:
dict_object = csv.DictWriter(csv_file, fieldnames = field_names)
for dict in corpus_list:
dict_object.writerow(dict)
output_info_for_type(".", "vv", True)
output_info_for_type("", "vv", False)
def save_csv_to_path(filepath):
corpus_list = complete_corpus_list()
write_csv_from_list_to_path(corpus_list, filepath)
def print_usage():
print("Usage: python3 se_corpus.py filepath")
print("Optional flags: -v (verbose); -vv (very verbose); -q (quiet); -h or --help: usage")
sys.exit()
def output_info_for_type(s, verbosity, suppress_newline):
# verbosity is "", "v", or "vv"; suppress_newline is True or False
global FLAG_QUIET
global FLAG_VERBOSE
global FLAG_VERY_VERBOSE
if FLAG_QUIET == True:
return
if verbosity == "vv":
if FLAG_VERY_VERBOSE == True:
if suppress_newline == True:
print(s, end='')
else:
print(s)
elif verbosity == "v":
if FLAG_VERBOSE == True:
if suppress_newline == True:
print(s, end='')
else:
print(s)
else:
if suppress_newline == True:
print(s, end='')
else:
print(s)
def parse_args():
args = sys.argv
args_count = len(args)
global ARG_FILEPATH
global FLAG_QUIET
global FLAG_VERBOSE
global FLAG_VERY_VERBOSE
for n in range(1, args_count):
arg = args[n]
print(arg)
if arg == "--help":
print_usage()
elif arg.startswith("-"):
if "vv" in arg:
FLAG_VERY_VERBOSE = True
elif "v" in arg:
FLAG_VERBOSE = True
elif "q" in arg:
FLAG_QUIET = True
elif "h" in arg:
print_usage()
else:
ARG_FILEPATH = arg
if len(ARG_FILEPATH) < 1:
print_usage()
###########################
if __name__ == "__main__":
parse_args()
save_csv_to_path(ARG_FILEPATH)
|
<filename>mapie/classification.py
from __future__ import annotations
from typing import Optional, Union, Tuple, Iterable
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import BaseCrossValidator
from sklearn.pipeline import Pipeline
from sklearn.utils import check_X_y, check_array, check_random_state
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_is_fitted
from sklearn.preprocessing import label_binarize
from ._typing import ArrayLike
from ._machine_precision import EPSILON
from .utils import (
check_null_weight,
check_n_features_in,
check_alpha,
check_alpha_and_n_samples,
check_n_jobs,
check_verbose,
check_input_is_image
)
class MapieClassifier(BaseEstimator, ClassifierMixin): # type: ignore
"""
Prediction sets for classification.
This class implements several conformal prediction strategies for
estimating prediction sets for classification. Instead of giving a
single predicted label, the idea is to give a set of predicted labels
(or prediction sets) which come with mathematically guaranteed coverages.
Parameters
----------
estimator : Optional[ClassifierMixin]
Any classifier with scikit-learn API
(i.e. with fit, predict, and predict_proba methods), by default None.
If ``None``, estimator defaults to a ``LogisticRegression`` instance.
method: Optional[str]
Method to choose for prediction interval estimates.
Choose among:
- "naive", sum of the probabilities until the 1-alpha thresold.
- "score", based on the the scores
(i.e. 1 minus the softmax score of the true label)
on the calibration set.
- "cumulated_score", based on the sum of the softmax outputs of the
labels until the true label is reached, on the calibration set.
- "top_k", based on the sorted index of the probability of the true
label in the softmax outputs, on the calibration set. In case two
probabilities are equal, both are taken, thus, the size of some p
prediction sets may be different from the others.
By default "score".
cv: Optional[str]
The cross-validation strategy for computing scores :
- ``"prefit"``, assumes that ``estimator`` has been fitted already.
All data provided in the ``fit`` method is then used
to calibrate the predictions through the score computation.
At prediction time, quantiles of these scores are used to estimate
prediction sets.
By default ``prefit``.
n_jobs: Optional[int]
Number of jobs for parallel processing using joblib
via the "locky" backend.
At this moment, parallel processing is disabled.
If ``-1`` all CPUs are used.
If ``1`` is given, no parallel computing code is used at all,
which is useful for debugging.
For n_jobs below ``-1``, ``(n_cpus + 1 + n_jobs)`` are used.
None is a marker for ‘unset’ that will be interpreted as ``n_jobs=1``
(sequential execution).
By default ``None``.
random_state: Optional[Union[int, RandomState]]
Pseudo random number generator state used for random uniform sampling
for evaluation quantiles and prediction sets in cumulated_score.
Pass an int for reproducible output across multiple function calls.
By default ```0``.
verbose : int, optional
The verbosity level, used with joblib for multiprocessing.
At this moment, parallel processing is disabled.
The frequency of the messages increases with the verbosity level.
If it more than ``10``, all iterations are reported.
Above ``50``, the output is sent to stdout.
By default ``0``.
Attributes
----------
valid_methods: List[str]
List of all valid methods.
single_estimator_ : sklearn.ClassifierMixin
Estimator fitted on the whole training set.
n_features_in_: int
Number of features passed to the fit method.
n_samples_val_: Union[int, List[int]]
Number of samples passed to the fit method.
conformity_scores_ : np.ndarray of shape (n_samples_train)
The conformity scores used to calibrate the prediction sets.
quantiles_ : np.ndarray of shape (n_alpha)
The quantiles estimated from ``conformity_scores_`` and alpha values.
References
----------
<NAME>, <NAME>, and <NAME>.
"Least Ambiguous Set-Valued Classifiers with Bounded Error Levels",
Journal of the American Statistical Association, 114, 2019.
<NAME>, <NAME> and <NAME>.
"Classification with Valid and Adaptive Coverage."
NeurIPS 202 (spotlight).
<NAME>, <NAME>, <NAME>
and <NAME>.
"Uncertainty Sets for Image Classifiers using Conformal Prediction."
International Conference on Learning Representations 2021.
Examples
--------
>>> import numpy as np
>>> from sklearn.naive_bayes import GaussianNB
>>> from mapie.classification import MapieClassifier
>>> X_toy = np.arange(9).reshape(-1, 1)
>>> y_toy = np.stack([0, 0, 1, 0, 1, 2, 1, 2, 2])
>>> clf = GaussianNB().fit(X_toy, y_toy)
>>> mapie = MapieClassifier(estimator=clf, cv="prefit").fit(X_toy, y_toy)
>>> _, y_pi_mapie = mapie.predict(X_toy, alpha=0.2)
>>> print(y_pi_mapie[:, :, 0])
[[ True False False]
[ True False False]
[ True False False]
[ True True False]
[False True False]
[False True True]
[False False True]
[False False True]
[False False True]]
"""
valid_methods_ = ["naive", "score", "cumulated_score", "top_k"]
def __init__(
self,
estimator: Optional[ClassifierMixin] = None,
method: str = "score",
cv: Optional[str] = "prefit",
n_jobs: Optional[int] = None,
random_state: Optional[Union[int, np.random.RandomState]] = None,
verbose: int = 0
) -> None:
self.estimator = estimator
self.method = method
self.cv = cv
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def _check_parameters(self) -> None:
"""
Perform several checks on input parameters.
Raises
------
ValueError
If parameters are not valid.
"""
if self.method not in self.valid_methods_:
raise ValueError(
"Invalid method. "
"Allowed values are 'score' or 'cumulated_score'."
)
check_n_jobs(self.n_jobs)
check_verbose(self.verbose)
check_random_state(self.random_state)
def _check_estimator(
self,
X: ArrayLike,
y: ArrayLike,
estimator: Optional[ClassifierMixin] = None,
) -> ClassifierMixin:
"""
Check if estimator is ``None``,
and returns a ``LogisticRegression`` instance if necessary.
If the ``cv`` attribute is ``"prefit"``,
check if estimator is indeed already fitted.
Parameters
----------
X : ArrayLike of shape (n_samples, n_features)
Training data.
y : ArrayLike of shape (n_samples,)
Training labels.
estimator : Optional[ClassifierMixin], optional
Estimator to check, by default ``None``
Returns
-------
ClassifierMixin
The estimator itself or a default ``LogisticRegression`` instance.
Raises
------
ValueError
If the estimator is not ``None``
and has no fit, predict, nor predict_proba methods.
NotFittedError
If the estimator is not fitted and ``cv`` attribute is "prefit".
"""
if estimator is None:
if not self.image_input:
return LogisticRegression(multi_class="multinomial").fit(X, y)
else:
raise ValueError(
"Default LogisticRegression's input can't be an image."
"Please provide a proper model."
)
if isinstance(estimator, Pipeline):
est = estimator[-1]
else:
est = estimator
if (
not hasattr(est, "fit")
and not hasattr(est, "predict")
and not hasattr(est, "predict_proba")
):
raise ValueError(
"Invalid estimator. "
"Please provide a classifier with fit,"
"predict, and predict_proba methods."
)
if self.cv == "prefit":
check_is_fitted(est)
if not hasattr(est, "classes_"):
raise AttributeError(
"Invalid classifier. "
"Fitted classifier does not contain "
"'classes_' attribute."
)
return estimator
def _check_cv(
self, cv: Optional[Union[int, str, BaseCrossValidator]] = None
) -> Optional[Union[float, str]]:
"""
Check if cross-validator is ``None`` or ``"prefit"``.
Else raise error.
Parameters
----------
cv : Optional[Union[int, str, BaseCrossValidator]], optional
Cross-validator to check, by default ``None``.
Returns
-------
Optional[Union[float, str]]
'prefit' or None.
Raises
------
ValueError
If the cross-validator is not valid.
"""
if cv is None:
return "prefit"
if cv == "prefit":
return cv
raise ValueError("Invalid cv argument." "Allowed value is 'prefit'.")
def _check_include_last_label(
self,
include_last_label: Optional[Union[bool, str]]
) -> Optional[Union[bool, str]]:
"""
Check if include_last_label is a boolean or a string.
Else raise error.
Parameters
----------
include_last_label : Optional[Union[bool, str]]
Whether or not to include last label in
prediction sets for the "cumulated_score" method. Choose among:
- False, does not include label whose cumulated score is just over
the quantile.
- True, includes label whose cumulated score is just over the
quantile, unless there is only one label in the prediction set.
- "randomized", randomly includes label whose cumulated score is
just over the quantile based on the comparison of a uniform number
and the difference between the cumulated score of the last label
and the quantile.
Returns
-------
Optional[Union[bool, str]]
Raises
------
ValueError
"Invalid include_last_label argument. "
"Should be a boolean or 'randomized'."
"""
if (
(not isinstance(include_last_label, bool)) and
(not include_last_label == "randomized")
):
raise ValueError(
"Invalid include_last_label argument. "
"Should be a boolean or 'randomized'."
)
else:
return include_last_label
def _check_proba_normalized(
self,
y_pred_proba: ArrayLike
) -> Optional[ArrayLike]:
"""
Check if, for all the observations, the sum of
the probabilities is equal to one.
Parameters
----------
y_pred_proba : ArrayLike of shape (n_samples, n_classes)
Softmax output of a model.
Returns
-------
Optional[ArrayLike] of shape (n_samples, n_classes)
Softmax output of a model if the scores all sum
to one.
Raises
------
ValueError
If the sum of the scores is not equal to one.
"""
np.testing.assert_allclose(
np.sum(y_pred_proba, axis=1),
1,
err_msg="The sum of the scores is not equal to one.",
rtol=1e-5
)
return y_pred_proba
def _get_last_index_included(
self,
y_pred_proba_cumsum: ArrayLike,
include_last_label: Optional[Union[bool, str]]
) -> ArrayLike:
"""
Return the index of the last included sorted probability
depending if we included the first label over the quantile
or not.
Parameters
----------
y_pred_proba_cumsum : ArrayLike of shape (n_samples, n_classes)
Cumsumed probabilities in the original order.
include_last_label : Union[bool, str]
Whether or not include the last label. If 'randomized',
the last label is included.
Returns
-------
Optional[ArrayLike] of shape (n_samples, n_classes)
Index of the last included sorted probability.
"""
if (
(include_last_label is True) or
(include_last_label == 'randomized')
):
y_pred_index_last = np.stack(
[
np.argmin(
np.ma.masked_less(
y_pred_proba_cumsum,
quantile
),
axis=1
)
for quantile in self.quantiles_
], axis=1
)
elif (include_last_label is False):
y_pred_index_last = np.stack(
[
np.argmax(
np.ma.masked_where(
y_pred_proba_cumsum > np.maximum(
quantile,
np.min(y_pred_proba_cumsum, axis=1) + EPSILON
).reshape(-1, 1),
y_pred_proba_cumsum
),
axis=1
)
for quantile in self.quantiles_
], axis=1
)
else:
raise ValueError(
"Invalid include_last_label argument. "
"Should be a boolean or 'randomized'."
)
return y_pred_index_last
def _add_random_tie_breaking(
self,
prediction_sets: ArrayLike,
y_pred_index_last: ArrayLike,
y_pred_proba_cumsum: ArrayLike,
y_pred_proba_last: ArrayLike
) -> ArrayLike:
"""
Randomly remove last label from prediction set based on the
comparison between a random number and the difference between
cumulated score of the last included label and the quantile.
Parameters
----------
prediction_sets : ArrayLike of shape (n_samples, n_classes, n_alpha)
Prediction set for each observation and each alpha.
y_pred_index_last : ArrayLike of shape (n_samples, n_alpha)
Index of the last included label.
y_pred_proba_cumsum : ArrayLike of shape (n_samples, n_classes)
Cumsumed probability of the model in the original order.
y_pred_proba_last : ArrayLike of shape (n_samples, n_alpha)
Last included probability.
Returns
-------
ArrayLike of shape (n_samples, n_classes, n_alpha)
Updated version of prediction_sets with randomly removed
labels.
"""
# filter sorting probabilities with kept labels
y_proba_last_cumsumed = np.stack(
[
np.squeeze(
np.take_along_axis(
y_pred_proba_cumsum,
y_pred_index_last[:, iq].reshape(-1, 1),
axis=1
)
)
for iq, _ in enumerate(self.quantiles_)
], axis=1
)
# compute V parameter from Romano+(2020)
vs = np.stack(
[
(
y_proba_last_cumsumed[:, iq]
- quantile
) / np.squeeze(y_pred_proba_last[:, :, iq])
for iq, quantile in enumerate(self.quantiles_)
], axis=1,
)
# get random numbers for each observation and alpha value
random_state = check_random_state(self.random_state)
us = random_state.uniform(size=prediction_sets.shape[0])
# remove last label from comparison between uniform number and V
vs_less_than_us = vs < us[:, np.newaxis]
np.put_along_axis(
prediction_sets,
y_pred_index_last[:, np.newaxis, :],
vs_less_than_us[:, np.newaxis, :],
axis=1
)
return prediction_sets
def fit(
self,
X: ArrayLike,
y: ArrayLike,
image_input: Optional[bool] = False,
sample_weight: Optional[ArrayLike] = None,
) -> MapieClassifier:
"""
Fit the base estimator or use the fitted base estimator.
Parameters
----------
X : ArrayLike of shape (n_samples, n_features)
Training data.
y : ArrayLike of shape (n_samples,)
Training labels.
image_input: Optional[bool] = False
Whether or not the X input is an image. If True, you must provide
a model that accepts image as input (e.g., a Neural Network). All
Scikit-learn classifiers only accept two-dimensional inputs.
By default False.
sample_weight : Optional[ArrayLike] of shape (n_samples,)
Sample weights for fitting the out-of-fold models.
If None, then samples are equally weighted.
If some weights are null,
their corresponding observations are removed
before the fitting process and hence have no prediction sets.
By default None.
Returns
-------
MapieClassifier
The model itself.
"""
# Checks
self.image_input = image_input
self._check_parameters()
cv = self._check_cv(self.cv)
estimator = self._check_estimator(X, y, self.estimator)
if self.image_input:
check_input_is_image(X)
X, y = check_X_y(
X, y, force_all_finite=False, ensure_2d=self.image_input,
allow_nd=self.image_input, dtype=["float64", "int", "object"]
)
assert type_of_target(y) == "multiclass"
self.n_features_in_ = check_n_features_in(X, cv, estimator)
sample_weight, X, y = check_null_weight(sample_weight, X, y)
# Work
self.single_estimator_ = estimator
y_pred_proba = self.single_estimator_.predict_proba(X)
y_pred_proba = self._check_proba_normalized(y_pred_proba)
self.n_samples_val_ = X.shape[0]
if self.method == "naive":
self.conformity_scores_ = np.empty(y_pred_proba.shape)
elif self.method == "score":
self.conformity_scores_ = np.take_along_axis(
1 - y_pred_proba, y.reshape(-1, 1), axis=1
)
elif self.method == "cumulated_score":
y_true = label_binarize(y=y, classes=estimator.classes_)
index_sorted = np.fliplr(np.argsort(y_pred_proba, axis=1))
y_pred_proba_sorted = np.take_along_axis(
y_pred_proba, index_sorted, axis=1
)
y_true_sorted = np.take_along_axis(y_true, index_sorted, axis=1)
y_pred_proba_sorted_cumsum = np.cumsum(y_pred_proba_sorted, axis=1)
cutoff = np.argmax(y_true_sorted, axis=1)
self.conformity_scores_ = np.take_along_axis(
y_pred_proba_sorted_cumsum, cutoff.reshape(-1, 1), axis=1
)
y_proba_true = np.take_along_axis(
y_pred_proba, y.reshape(-1, 1), axis=1
)
random_state = check_random_state(self.random_state)
u = random_state.uniform(size=len(y_pred_proba)).reshape(-1, 1)
self.conformity_scores_ -= u*y_proba_true
elif self.method == "top_k":
# Here we reorder the labels by decreasing probability
# and get the position of each label from decreasing probability
index = np.argsort(
np.fliplr(np.argsort(y_pred_proba, axis=1))
)
self.conformity_scores_ = np.take_along_axis(
index,
y.reshape(-1, 1),
axis=1
)
else:
raise ValueError(
"Invalid method. "
"Allowed values are 'score' or 'cumulated_score'."
)
return self
def predict(
self,
X: ArrayLike,
alpha: Optional[Union[float, Iterable[float]]] = None,
include_last_label: Optional[Union[bool, str]] = True,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Prediction prediction sets on new samples based on target confidence
interval.
Prediction sets for a given ``alpha`` are deduced from :
- quantiles of softmax scores ("score" method)
- quantiles of cumulated scores ("cumulated_score" method)
Parameters
----------
X : ArrayLike of shape (n_samples, n_features)
Test data.
alpha: Optional[Union[float, Iterable[float]]]
Can be a float, a list of floats, or a ``np.ndarray`` of floats.
Between 0 and 1, represent the uncertainty of the confidence
interval.
Lower ``alpha`` produce larger (more conservative) prediction
sets.
``alpha`` is the complement of the target coverage level.
By default ``None``.
include_last_label: Optional[Union[bool, str]]
Whether or not to include last label in
prediction sets for the "cumulated_score" method. Choose among:
- False, does not include label whose cumulated score is just over
the quantile.
- True, includes label whose cumulated score is just over the
quantile, unless there is only one label in the prediction set.
- "randomized", randomly includes label whose cumulated score is
just over the quantile based on the comparison of a uniform number
and the difference between the cumulated score of the last label
and the quantile.
By default ``True``.
Returns
-------
Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]
- np.ndarray of shape (n_samples,) if alpha is None.
- Tuple[np.ndarray, np.ndarray] of shapes
(n_samples,) and (n_samples, n_classes, n_alpha) if alpha is not None.
"""
# Checks
include_last_label = self._check_include_last_label(include_last_label)
alpha_ = check_alpha(alpha)
check_is_fitted(
self,
[
"single_estimator_",
"conformity_scores_",
"n_features_in_",
"n_samples_val_",
],
)
if self.image_input:
check_input_is_image(X)
X = check_array(
X, force_all_finite=False, ensure_2d=self.image_input,
allow_nd=self.image_input, dtype=["float64", "object"]
)
y_pred = self.single_estimator_.predict(X)
y_pred_proba = self.single_estimator_.predict_proba(X)
y_pred_proba = self._check_proba_normalized(y_pred_proba)
n = self.n_samples_val_
if alpha_ is None:
return np.array(y_pred)
else:
# Choice of the quantile
check_alpha_and_n_samples(alpha_, n)
if self.method == "naive":
self.quantiles_ = 1 - alpha_
else:
self.quantiles_ = np.stack([
np.quantile(
self.conformity_scores_,
((n + 1) * (1 - _alpha)) / n,
interpolation="higher"
) for _alpha in alpha_
])
if self.method == "score":
prediction_sets = np.stack(
[
y_pred_proba > 1 - quantile
for quantile in self.quantiles_
],
axis=2,
)
elif self.method in ["cumulated_score", "naive"]:
# sort labels by decreasing probability
index_sorted = np.fliplr(np.argsort(y_pred_proba, axis=1))
# sort probabilities by decreasing order
y_pred_proba_sorted = np.take_along_axis(
y_pred_proba, index_sorted, axis=1
)
# get sorted cumulated score
y_pred_proba_sorted_cumsum = np.cumsum(
y_pred_proba_sorted, axis=1
)
# get cumulated score at their original position
y_pred_proba_cumsum = np.take_along_axis(
y_pred_proba_sorted_cumsum,
np.argsort(index_sorted),
axis=1
)
# get index of the last included label
y_pred_index_last = self._get_last_index_included(
y_pred_proba_cumsum,
include_last_label
)
# get the probability of the last included label
y_pred_proba_last = np.stack(
[
np.take_along_axis(
y_pred_proba,
y_pred_index_last[:, iq].reshape(-1, 1),
axis=1
)
for iq, _ in enumerate(self.quantiles_)
], axis=2
)
# get the prediction set by taking all probabilities above the
# last one
prediction_sets = np.stack(
[
y_pred_proba >= y_pred_proba_last[:, :, iq] - EPSILON
for iq, _ in enumerate(self.quantiles_)
], axis=2
)
# remove last label randomly
if include_last_label == 'randomized':
prediction_sets = self._add_random_tie_breaking(
prediction_sets,
y_pred_index_last,
y_pred_proba_cumsum,
y_pred_proba_last
)
elif self.method == "top_k":
index_sorted = np.fliplr(np.argsort(y_pred_proba, axis=1))
y_pred_index_last = np.stack(
[
index_sorted[:, quantile]
for quantile in self.quantiles_
], axis=1
)
y_pred_proba_last = np.stack(
[
np.take_along_axis(
y_pred_proba,
y_pred_index_last[:, iq].reshape(-1, 1),
axis=1
)
for iq, _ in enumerate(self.quantiles_)
], axis=2
)
prediction_sets = np.stack(
[
y_pred_proba >= y_pred_proba_last[:, :, iq] - EPSILON
for iq, _ in enumerate(self.quantiles_)
], axis=2
)
else:
raise ValueError(
"Invalid method. "
"Allowed values are 'score' or 'cumulated_score'."
)
return y_pred, prediction_sets
|
#!/usr/bin/env
"""
CTD2NCheader.py
When run independantly, this program will allow the creation of a header text file for
all ctd casts in a directory. It relys mostly on the ship logs and not on the meta information
within the ctd files.
Using Anaconda packaged Python
"""
import os, datetime
#user defined
from utilities import utilities
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
"""-----------------------------Cruise Log Class---------------------------------------"""
class CruiseLogHeader(object):
"""This class holds all necessary header information from a cruise log"""
Vessel_ID = []
Cruise_ID = []
def __init__(self,CTD_num= 'n/a',lat= 'n/a',lon= 'n/a',day= 'n/a',date= 'n/a',time_gmt= 'n/a',
dry_bulb= 'n/a',wet_bulb= 'n/a',press= 'n/a',wind_dir= 'n/a',wind_speed= 'n/a',
bttm_depth= 'n/a',stat_name= 'n/a',water_mass_code= 'G'):
self.CTD_num = CTD_num
self.lat = lat
self.lon = lon
self.day = day
self.date = date
self.time_gmt = time_gmt
self.dry_bulb = dry_bulb
self.wet_bulb = wet_bulb
self.press = press
self.wind_dir = wind_dir
self.wind_speed = wind_speed
self.bttm_depth = bttm_depth
self.stat_name = stat_name
self.water_mass_code = water_mass_code
def hardcodedvars(self):
self.seastate = 'n/a'
self.visibility = 'n/a'
self.cloud_amt = 'n/a'
self.cloud_type = 'n/a'
self.weather = 'n/a'
def print2file(self, ofile):
with open(ofile, "a") as myfile:
myfile.write("Castnum=%s, Lat(N)=%s, Lon(W)=%s W, Day=%s, Date=%s, time (GMT)=%s, Dry_bulb=%s, Wet_bulb=%s, Pressure=%s, wind dir=%s, wind speed=%s, bottom depth=%s, station name/id=%s, water mass code=%s \n"
% (self.CTD_num, self.lat, self.lon, self.day, self.date, self.time_gmt,
self.dry_bulb, self.wet_bulb, self.press, self.wind_dir, self.wind_speed,
self.bttm_depth, self.stat_name, self.water_mass_code))
"""-----------------------------Cruise Log---------------------------------------------"""
def CruiseLogHeaderRead(ifile_clh, idir):
""" Get Existing header info from text file"""
CruiseLogHeader.Cruise_ID = idir.split('/')[-2]
dic_casts = {}
print "Reading in " + ifile_clh
with open(ifile_clh, "r") as myfile:
#expecting format from CruiseLogHeader.print2file()
for line in myfile:
line_split = line.split(',')
dic_casts[line_split[0].split('=')[1]] = CruiseLogHeader(line_split[0].split('=')[1],line_split[1].split('=')[1],line_split[2].split('=')[1],
line_split[3].split('=')[1],line_split[4].split('=')[1],line_split[5].split('=')[1],line_split[6].split('=')[1],
line_split[7].split('=')[1],line_split[8].split('=')[1],line_split[9].split('=')[1],line_split[10].split('=')[1],
line_split[11].split('=')[1],line_split[12].split('=')[1],line_split[13].split('=')[1])
return(dic_casts)
def CruiseLogHeaderCreate(ifile_clh, idir, castfiles):
""" Create header info from existing files and cruise log"""
#get info from cast files first
CruiseLogHeader.Vessel_ID = idir.split('/')[-2][0:2]
CruiseLogHeader.Cruise_ID = idir.split('/')[-2]
CID = raw_input("The Cruise ID is listed as: [" + CruiseLogHeader.Cruise_ID + "]. If this is correct, press enter or input new ID. \n")
if CID:
CruiseLogHeader.Cruise_ID = CID
dic_casts = {}
### add cruise header info
for i, fid in enumerate(castfiles):
idname = fid.split('.')[0]
dic_casts[idname] = CruiseLogHeader()
dic_casts[idname].CTD_num = idname
#cast
print "For the following information: enter a new value or press return \n"
t_var = raw_input("ctd number: " + dic_casts[idname].CTD_num)
if t_var:
dic_casts[idname].CTD_num = t_var
#lat
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Latitude (N): " + dic_casts[idname].lat)
if t_var:
dic_casts[idname].lat = t_var
#lon
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Longitude (W): " + dic_casts[idname].lon)
if t_var:
dic_casts[idname].lon = t_var
#day
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Day: " + dic_casts[idname].day)
if t_var:
dic_casts[idname].day = t_var
#date
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Date (Sep 13): " + dic_casts[idname].date)
if t_var:
dic_casts[idname].date = t_var
#time
print "For the following information: enter a new value or press return \n"
t_var = raw_input("24hr GMT time (hh:mm): " + dic_casts[idname].time_gmt)
if t_var:
dic_casts[idname].time_gmt = t_var
#dry bulb
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Dry Bulb (deg C): " + dic_casts[idname].dry_bulb)
if t_var:
dic_casts[idname].dry_bulb = t_var
#wet bulb
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Wet Bulb (deg C): " + dic_casts[idname].wet_bulb)
if t_var:
dic_casts[idname].wet_bulb = t_var
#pres
print "For the following information: enter a new value or press return \n"
t_var = raw_input("ctd number: " + dic_casts[idname].press)
if t_var:
dic_casts[idname].press = t_var
#wind dir
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Wind Dir.: " + dic_casts[idname].wind_dir)
if t_var:
dic_casts[idname].wind_dir = t_var
#wind speed
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Wind Speed (kts): " + dic_casts[idname].wind_speed)
if t_var:
dic_casts[idname].wind_speed = t_var
#bottom depth
print "For the following information: enter a new value or press return \n"
t_var = raw_input("bottom depth (m): " + dic_casts[idname].bttm_depth)
if t_var:
dic_casts[idname].bttm_depth = t_var
#station name
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Station Name/ID: " + dic_casts[idname].stat_name)
if t_var:
dic_casts[idname].stat_name = t_var
#wind speed
print "For the following information: enter a new value or press return \n"
t_var = raw_input("Water Mass Code: (A)rctic, (G)ulf of Alaska, (B)ering Sea,\n \
(S)helikof Strait, (P)uget Sound, (V)ents: " + dic_casts[idname].water_mass_code)
if t_var:
dic_casts[idname].water_mass_code = t_var
dic_casts[idname].print2file(ifile_clh)
return(dic_casts)
"""-----------------------------Main---------------------------------------------------"""
def header_main():
idir = utilities.ChooseDirectoryofCruise() #user defined path to cruise
(castfiles, btlfiles, ioerror) = utilities.GetCNVorBTL(idir) #check for .cnv and .btl files and retrieve
if not ioerror == 0:
print "No .cnv files are found, will not be able to generate header updates. \n"
sys.exit()
print "Looking for cruise header text file. \n"
if os.path.exists(idir + 'cruiselogheader.txt'):
print "Found existing cruiselogheader.txt \n"
dic_casts = CruiseLogHeaderRead(idir + 'cruiselogheader.txt', idir)
else:
print """No cruiselogheader.txt file. Proceeding to generate it. \n
You will need to have the cruise logs to proceed. \n"""
dic_casts = CruiseLogHeaderCreate(idir + 'cruiselogheader.txt', idir, castfiles) #from Ship Logs
if __name__ == "__main__":
header_main() |
<gh_stars>1-10
import time
import sys
import numpy as np
import scipy.stats
import librosa
from matplotlib import pyplot as plt
from tqdm.notebook import tqdm
import gc
from face_rhythm.util import helpers
def prepare_freqs(config_filepath):
config = helpers.load_config(config_filepath)
for session in config['General']['sessions']:
eps = 1.19209e-07 #float32 eps
fmin_rough = config['CQT']['fmin_rough']
sampling_rate = config['CQT']['sampling_rate']
n_bins = config['CQT']['n_bins']
bins_per_octave = int(np.round((n_bins) / np.log2((sampling_rate / 2) / fmin_rough)))
fmin = ((sampling_rate / 2) / (2 ** ((n_bins) / bins_per_octave))) - (2 * eps)
fmax = fmin * (2 ** ((n_bins) / bins_per_octave))
freqs_Sxx = fmin * (2 ** ((np.arange(n_bins) + 1) / bins_per_octave))
print(f'bins_per_octave: {round(bins_per_octave)} bins/octave')
print(f'minimum frequency (fmin): {round(fmin, 3)} Hz')
print(f'maximum frequency (fmax): {round(fmax, 8)} Hz')
print(f'Nyquist : {sampling_rate / 2} Hz')
print(f'number of frequencies : {n_bins} bins')
print(f'Frequencies: {np.round(freqs_Sxx, 3)}')
plt.figure()
plt.plot(freqs_Sxx)
config['CQT']['bins_per_octave'] = bins_per_octave
config['CQT']['fmin'] = fmin
config['CQT']['fmax'] = fmax
helpers.save_config(config, config_filepath)
helpers.create_nwb_group(session['nwb'], 'CQT')
helpers.create_nwb_ts(session['nwb'], 'CQT', 'freqs_Sxx', freqs_Sxx, 1.0)
def cqt_workflow(config_filepath, data_key):
"""
computes spectral analysis on the cleaned optic flow output
Args:
config_filepath (Path): path to the config file
data_key (str): data name on which to perform cqt
Returns:
"""
print(f'== Beginning Spectrogram Computation ==')
tic_all = time.time()
## get parameters
config = helpers.load_config(config_filepath)
general = config['General']
cqt = config['CQT']
hop_length = cqt['hop_length']
sampling_rate = cqt['sampling_rate']
n_bins = cqt['n_bins']
bins_per_octave = cqt['bins_per_octave']
fmin = cqt['fmin']
for session in general['sessions']:
tic_session = time.time()
freqs_Sxx = helpers.load_nwb_ts(session['nwb'], 'CQT', 'freqs_Sxx')
positions_convDR_meanSub = helpers.load_nwb_ts(session['nwb'], 'Optic Flow', data_key)
## define positions traces to use
# input_sgram = np.single(np.squeeze(positions_new_sansOutliers))[:,:,:]
input_sgram = np.single(np.squeeze(positions_convDR_meanSub))[:,:,:]
## make a single spectrogram to get some size parameters for preallocation
Sxx = librosa.cqt(np.squeeze(input_sgram[0,0,:]),
sr=sampling_rate,
hop_length=hop_length,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
window='hann')
# preallocation
tic = time.time()
Sxx_allPixels = np.single(np.zeros((input_sgram.shape[0] , Sxx.shape[0] , Sxx.shape[1] , 2)))
helpers.print_time('Preallocation completed', time.time() - tic_all)
print(f'starting spectrogram calculation')
tic = time.time()
for ii in tqdm(range(input_sgram.shape[0]),total=Sxx_allPixels.shape[0]):
## iterated over x and y
for jj in range(2):
tmp_input_sgram = np.squeeze(input_sgram[ii,jj,:])
tmp = librosa.cqt(np.squeeze(input_sgram[ii,jj,:]),
sr=sampling_rate,
hop_length=hop_length,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
window='hann')
## normalization
tmp = abs(tmp) * freqs_Sxx[:,None]
# tmp = scipy.stats.zscore(tmp , axis=0)
# tmp = test - np.min(tmp , axis=0)[None,:]
# tmp = scipy.stats.zscore(tmp , axis=1)
# tmp = tmp - np.min(tmp , axis=1)[:,None]
Sxx_allPixels[ii,:,:,jj] = tmp
# Sxx_allPixels = Sxx_allPixels / np.std(Sxx_allPixels , axis=1)[:,None,:,:]
print(f'completed spectrogram calculation')
print('Info about Sxx_allPixels:\n')
print(f'Shape: {Sxx_allPixels.shape}')
print(f'Number of elements: {Sxx_allPixels.shape[0]*Sxx_allPixels.shape[1]*Sxx_allPixels.shape[2]*Sxx_allPixels.shape[3]}')
print(f'Data type: {Sxx_allPixels.dtype}')
print(f'size of Sxx_allPixels: {round(sys.getsizeof(Sxx_allPixels)/1000000000,3)} GB')
helpers.print_time('Spectrograms computed', time.time() - tic)
### Normalize the spectrograms so that each time point has a similar cumulative spectral amplitude across all dots (basically, sum of power of all frequencies from all dots at a particular time should equal one)
## hold onto the normFactor variable because you can use to it to undo the normalization after subsequent steps
Sxx_allPixels_normFactor = np.mean(np.sum(Sxx_allPixels , axis=1) , axis=0)
Sxx_allPixels_norm = Sxx_allPixels / Sxx_allPixels_normFactor[None,None,:,:]
#Sxx_allPixels_norm.shape
helpers.create_nwb_ts(session['nwb'], 'CQT', 'Sxx_allPixels', Sxx_allPixels,1.0)
helpers.create_nwb_ts(session['nwb'], 'CQT', 'Sxx_allPixels_norm', Sxx_allPixels_norm,1.0)
helpers.create_nwb_ts(session['nwb'], 'CQT', 'Sxx_allPixels_normFactor', Sxx_allPixels_normFactor,1.0)
helpers.print_time(f'Session {session["name"]} completed', time.time() - tic_session)
del Sxx, Sxx_allPixels, Sxx_allPixels_norm, Sxx_allPixels_normFactor, positions_convDR_meanSub, input_sgram
helpers.print_time('total elapsed time', time.time() - tic_all)
print(f'== End spectrogram computation ==')
gc.collect() |
# from https://github.com/Moonrise55/Mbot/blob/f4e19df1df9fa4ef1a7730e63aa8009894aa304c/utils/paginator.py#L8
import discord
import asyncio
# set up pagination of results
class Pages:
"""
(self, ctx, *, solutions, weights=None, embedTemp, endflag=None)
solutions, weights: lists
"""
def __init__(self, ctx, *, solutions, weights=None, embedTemp, endflag=None):
self.bot = ctx.bot
self.message = ctx.message
self.channel = ctx.channel
self.author = ctx.author
self.solutions = solutions
self.weights = weights
self.endflag = endflag
self.page = 1
self.numsol = 15
self.embed = embedTemp
self.title = embedTemp.title
self.description = None
self.first = True
self.loopState = True
self.reactAll = [
"\u23EA", # first
"\u25C0", # left
"\u25B6", # right
"\u274C", # stop
]
def extractData(self):
final = []
finalend = []
start = (self.page - 1) * self.numsol
end = start + self.numsol
# check if request page empty
if start > len(self.solutions):
self.page = self.page - 1
start = (self.page - 1) * self.numsol
end = start + self.numsol
# check if request page partial
if end >= len(self.solutions):
end = len(self.solutions)
if self.endflag:
finalend = self.endflag
for n in range(start, end):
if self.weights:
line = (
self.solutions[n]
+ "...................."
+ str(round(self.weights[n], 3))
)
else:
line = self.solutions[n]
final.append(line)
final = "\n".join(final)
if finalend:
final = final + "\n" + finalend
return final
async def sendPage(self):
final = self.extractData()
self.embed.description = final
self.embed.title = self.title + " (pg:" + str(self.page) + ")"
if self.first:
self.message = await self.channel.send(embed=self.embed)
self.first = False
else:
await self.message.edit(embed=self.embed)
for react in self.reactAll:
await self.message.add_reaction(react)
async def pageLoop(self):
def check(reaction, user):
if user != self.author:
return False
if str(reaction.emoji) not in self.reactAll:
return False
if reaction.message.id != self.message.id:
return False
return True
while self.loopState:
await self.sendPage()
try:
reaction, user = await self.bot.wait_for(
"reaction_add", check=check, timeout=120.0
)
except asyncio.TimeoutError:
await self.message.clear_reactions()
self.loopState = False
# await self.channel.send('I timed out')
else:
await self.message.remove_reaction(reaction, user)
# await self.channel.send(reaction)
if str(reaction.emoji) == self.reactAll[0]:
self.page = 1
elif str(reaction.emoji) == self.reactAll[1]:
if self.page != 1:
self.page = self.page - 1
elif str(reaction.emoji) == self.reactAll[2]:
self.page = self.page + 1
elif str(reaction.emoji) == self.reactAll[3]:
await self.message.clear_reactions()
self.loopState = False
|
from __future__ import absolute_import, unicode_literals
import logging
from .common import *
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS')
# END SITE CONFIGURATION
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
TEMPLATES[0]['APP_DIRS'] = False
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', 'file'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/var/log/budgetme.log'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
'budgetme': {
'level': 'ERROR',
'handlers': ['console', 'sentry', 'file'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.WARNING)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.WARNING),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
SIMPLE_JWT['ACCESS_TOKEN_LIFETIME'] = datetime.timedelta(minutes=30)
SIMPLE_JWT['REFRESH_TOKEN_LIFETIME'] = datetime.timedelta(days=1)
SIMPLE_JWT['SIGNING_KEY'] = env('DJANGO_SECRET_KEY')
|
<gh_stars>0
from time import sleep
from bitcointx.core import coins_to_satoshi, satoshi_to_coins
from bitcointx.wallet import CCoinAddress as ExternalAddress
from PyQt6.QtCore import *
from PyQt6.QtGui import *
from PyQt6.QtWidgets import *
from serial import SerialException
from bitcoin_coin_selection.selection_types.coin_selection import CoinSelection
from controllers.main_controller import MainController
from errors.tx_broadcast_api_error import TxBroadcastAPIError
from models.watch_only_wallet import WatchOnlyWallet
from persistence.config import Network
from utils.coin_selection_utils import map_coin_selection_to_utxos
from views.modal_view import Message, Modal
from views.send.fee_selection_form_view import FeeSelectionForm
from views.send.send_amount_form_view import SendAmountForm
from views.send.target_address_form_view import TargetAddressForm
class SendView(QWidget):
TESTNET_TX_URL = "https://live.blockcypher.com/btc-testnet/tx/{tx_id}"
MAINNET_TX_URL ="https://live.blockcypher.com/btc/tx/{tx_id}"
def __init__(self, controller: MainController, watch_only_wallet: WatchOnlyWallet):
super().__init__()
self.controller = controller
self.watch_only_wallet = watch_only_wallet
self.layout = QVBoxLayout(self)
self.layout.setAlignment(None, Qt.Alignment.AlignVCenter)
self.layout.setSpacing(40)
self.setLayout(self.layout)
self.size_policy = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred)
self.setSizePolicy(self.size_policy)
self.modal = Modal()
self.target_address_form = TargetAddressForm()
self.fee_selection_form = FeeSelectionForm()
self.send_amount_form = SendAmountForm()
self.send_amount_form.send_max_amount_button.clicked.connect(
self.handle_click_send_max_amount_button
)
self.send_button_form = QWidget()
self.send_button_form.layout = QVBoxLayout()
self.send_button_form.setLayout(self.send_button_form.layout)
self.send_button = QPushButton("Send transaction")
self.send_button.setMaximumWidth(300)
self.send_button.clicked.connect(self.handle_click_send_button)
self.send_button.setEnabled(False)
self.send_button.setToolTip("Connect hardware wallet")
self.send_button_form.layout.addWidget(self.send_button, alignment=Qt.Alignment.AlignCenter)
self.layout.addWidget(self.target_address_form)
self.layout.addWidget(self.fee_selection_form)
self.layout.addWidget(self.send_amount_form)
self.layout.addWidget(self.send_button_form, alignment=Qt.Alignment.AlignVCenter)
self.init_event_handlers()
def init_event_handlers(self):
self.controller.hardware_wallet_initialized.connect(
self.handle_hardware_wallet_initialized
)
self.controller.hardware_wallet_loaded.connect(
self.handle_hardware_wallet_loaded
)
self.controller.serial_client.connection.connect(
self.handle_serial_connection_change
)
@property
def address_input_text(self):
return self.target_address_form.target_address_input.text().strip()
def address_is_not_none(self, address_str: str) -> bool:
return bool(len(address_str))
def address_is_valid(self, address_str: str) -> bool:
try:
address = ExternalAddress(address_str)
return True
except Exception:
return False
@property
def spend_amount(self):
return float(self.send_amount_form.send_amount_input.text())
def set_spend_amount(self, amount: float):
self.send_amount_form.send_amount_input.setText(str(amount))
@property
def priority_fee_selected(self):
return self.fee_selection_form.priority_fee_button.isChecked()
def get_max_spend_btc(self) -> float:
address_str = self.address_input_text
address = (
ExternalAddress(address_str)
if self.address_is_valid(address_str) else None
)
max_spend_satosis = self.controller.get_max_possible_spend(
self.priority_fee_selected, address
)
return satoshi_to_coins(max_spend_satosis)
def select_coins(self, address: ExternalAddress):
target_value = coins_to_satoshi(self.spend_amount, False)
return self.controller.select_coins(
target_value, address, self.priority_fee_selected)
def attempt_spend(self, address: ExternalAddress, coin_selection: CoinSelection):
try:
tx_id = self.controller.orchestrate_spend(address, coin_selection)
self.send_button.setEnabled(True)
if not tx_id:
self.modal.show(Message.TX_REJECTED)
else:
self.modal.show(Message.TX_SENT)
except SerialException:
self.modal.show(Message.SERIAL_DISCONNECT)
except TxBroadcastAPIError:
self.modal.show(Message.TX_BROADCAST_FAILED)
finally:
self.send_button.setEnabled(True)
############################ Event Handlers ##############################
def handle_click_send_button(self):
self.send_button.setEnabled(False)
address_str = self.address_input_text
if not self.address_is_not_none(address_str):
self.send_button.setEnabled(True)
self.modal.show(Message.NO_ADDRESS)
return
elif not self.address_is_valid(address_str):
self.send_button.setEnabled(True)
self.modal.show(Message.INVALID_ADDRESS)
return
address = ExternalAddress(address_str)
selection = self.select_coins(address)
if selection.outcome == selection.Outcome.INSUFFICIENT_FUNDS:
self.send_button.setEnabled(True)
self.modal.show(Message.INSUFFICIENT_FUNDS)
return
elif selection.outcome == selection.Outcome.INSUFFICIENT_FUNDS_AFTER_FEES:
self.send_button.setEnabled(True)
self.modal.show(Message.INSUFFICIENT_FUNDS_AFTER_FEES)
return
elif selection.outcome == selection.Outcome.INVALID_SPEND:
self.send_button.setEnabled(True)
self.modal.show(Message.INVALID_SPEND)
return
# spinner or something
self.attempt_spend(address, selection)
def handle_click_send_max_amount_button(self):
max_spend_btc = self.get_max_spend_btc()
self.set_spend_amount(max_spend_btc)
@pyqtSlot(bool)
def handle_serial_connection_change(self, is_connected):
if not is_connected:
self.send_button.setEnabled(False)
self.send_button.setToolTip("Connect hardware wallet")
def handle_hardware_wallet_loaded(self):
self.send_button.setEnabled(True)
self.send_button.setToolTip("")
def handle_hardware_wallet_initialized(self):
self.send_button.setEnabled(True)
self.send_button.setToolTip("")
|
<filename>libs/envs/modules/sprites.py
import os
import pygame
class pusherSprite(pygame.sprite.Sprite):
def __init__(self, col, row, cfg, id):
pygame.sprite.Sprite.__init__(self)
if id == 1:
self.image_path = os.path.join(cfg.IMAGESDIR, 'player.png')
else:
self.image_path = os.path.join(cfg.IMAGESDIR, 'robot.png')
self.image = pygame.image.load(self.image_path).convert()
color = self.image.get_at((0, 0))
self.image.set_colorkey(color, pygame.RLEACCEL)
self.rect = self.image.get_rect()
self.col = col
self.row = row
self.speed_col = 0.7
self.speed_row = 0.7
self.friction = 0.1
def update_speed(self, direction):
if direction == 'up':
return self.col, round(self.row - self.speed_row, 1)
elif direction == 'down':
return self.col, round(self.row + self.speed_row, 1)
elif direction == 'left':
return round(self.col - self.speed_col, 1), self.row
elif direction == 'right':
return round(self.col + self.speed_col, 1), self.row
def move_delta(self, col, row):
self.col = round(self.col + col, 1)
self.row = round(self.row + row, 1)
def move_to(self, col, row):
self.col = round(col, 1)
self.row = round(row, 1)
def move(self, direction, is_test=False):
if is_test:
if direction == 'up':
return self.col, round(self.row - self.speed_row, 1)
elif direction == 'down':
return self.col, round(self.row + self.speed_row, 1)
elif direction == 'left':
return round(self.col - self.speed_col, 1), self.row
elif direction == 'right':
return round(self.col + self.speed_col, 1), self.row
else:
if direction == 'up':
self.row -= self.speed_row
self.row = round(self.row, 1)
elif direction == 'down':
self.row += self.speed_row
self.row = round(self.row, 1)
elif direction == 'left':
self.col -= self.speed_col
self.col = round(self.col, 1)
elif direction == 'right':
self.col += self.speed_col
self.col = round(self.col, 1)
def draw(self, screen):
self.rect.x = self.rect.width * self.col
self.rect.y = self.rect.height * self.row
screen.blit(self.image, self.rect)
class elementSprite(pygame.sprite.Sprite):
def __init__(self, sprite_name, col, row, cfg):
pygame.sprite.Sprite.__init__(self)
self.image_path = os.path.join(cfg.IMAGESDIR, sprite_name)
self.image = pygame.image.load(self.image_path).convert()
color = self.image.get_at((0, 0))
self.image.set_colorkey(color, pygame.RLEACCEL)
self.rect = self.image.get_rect()
self.sprite_type = sprite_name.split('.')[0]
self.col = col
self.row = row
self.speed_col = 0.7
self.speed_row = 0.7
def draw(self, screen):
self.rect.x = self.rect.width * self.col
self.rect.y = self.rect.height * self.row
screen.blit(self.image, self.rect)
def move_delta(self, col, row):
self.col = round(self.col + col, 1)
self.row = round(self.row + row, 1)
def move_to(self, col, row):
self.col = round(col, 1)
self.row = round(row, 1)
def move(self, direction, is_test=False):
if self.sprite_type == 'diamond':
if is_test:
if direction == 'up':
return self.col, round(self.row - self.speed_row, 1)
elif direction == 'down':
return self.col, round(self.row + self.speed_row, 1)
elif direction == 'left':
return round(self.col - self.speed_col, 1), self.row
elif direction == 'right':
return round(self.col + self.speed_col, 1), self.row
else:
if direction == 'up':
self.row -= self.speed_row
self.row = round(self.row, 1)
elif direction == 'down':
self.row += self.speed_row
self.row = round(self.row, 1)
elif direction == 'left':
self.col -= self.speed_col
self.col = round(self.col, 1)
elif direction == 'right':
self.col += self.speed_col
self.col = round(self.col, 1) |
<filename>regression_tests/parsers/c_parser/exprs/expression.py
"""
A base class of all expressions.
"""
from abc import ABCMeta
from abc import abstractmethod
from clang import cindex
from regression_tests.parsers.c_parser.utils import first_child_node
from regression_tests.parsers.c_parser.utils import has_token
from regression_tests.parsers.c_parser.utils import has_token_in_position
class Expression(metaclass=ABCMeta):
"""A base class of all expressions."""
def __init__(self, node):
"""
:param node: Internal node representing the expression.
"""
self._node = node
def is_eq_op(self):
"""Is the expression an equals operator?"""
return False
def is_neq_op(self):
"""Is the expression a not equals operator?"""
return False
def is_gt_op(self):
"""Is the expression a greater than operator?"""
return False
def is_gt_eq_op(self):
"""Is the expression a greater than or equal operator?"""
return False
def is_lt_op(self):
"""Is the expression a less than operator?"""
return False
def is_lt_eq_op(self):
"""Is the expression a less than or equal operator?"""
return False
def is_add_op(self):
"""Is the expression an add operator?"""
return False
def is_sub_op(self):
"""Is the expression a subtraction operator?"""
return False
def is_mul_op(self):
"""Is the expression a multiplication operator?"""
return False
def is_mod_op(self):
"""Is the expression a modulo operator?"""
return False
def is_div_op(self):
"""Is the expression a division operator?"""
return False
def is_and_op(self):
"""Is the expression an and operator?"""
return False
def is_or_op(self):
"""Is the expression an or operator?"""
return False
def is_bit_and_op(self):
"""Is the expression a bit-and operator?"""
return False
def is_bit_or_op(self):
"""Is the expression a bit-or operator?"""
return False
def is_bit_xor_op(self):
"""Is the expression a bit-xor operator?"""
return False
def is_bit_shl_op(self):
"""Is the expression a bit left shift operator?"""
return False
def is_bit_shr_op(self):
"""Is the expression a bit right shift operator?"""
return False
def is_not_op(self):
"""Is the expression a not operator?"""
return False
def is_neg_op(self):
"""Is the expression a negation operator?"""
return False
def is_assign_op(self):
"""Is the expression an assignment operator?"""
return False
def is_address_op(self):
"""Is the expression an address operator?"""
return False
def is_deref_op(self):
"""Is the expression a dereference operator?"""
return False
def is_array_index_op(self):
"""Is the expression an array subscript operator?"""
return False
def is_comma_op(self):
"""Is the expression a comma operator?"""
return False
def is_ternary_op(self):
"""Is the expression a ternary operator?"""
return False
def is_call(self):
"""Is the expression a call expression?"""
return False
def is_cast(self):
"""Is the expression a cast expression?"""
return False
def is_pre_increment_op(self):
"""Is the expression a pre increment operator?"""
return False
def is_post_increment_op(self):
"""Is the expression a post increment operator?"""
return False
def is_pre_decrement_op(self):
"""Is the expression a pre decrement operator?"""
return False
def is_post_decrement_op(self):
"""Is the expression a post decrement operator?"""
return False
def is_compound_assign_op(self):
"""Is the expression a compound assignment operator?"""
return False
def is_struct_ref_op(self):
"""Is the expression a struct reference operator?"""
return False
def is_struct_deref_op(self):
"""Is the expression a struct dereference operator?"""
return False
@staticmethod
def _from_clang_node(node):
"""Creates a new expression from the given clang node.
:param node: Internal node representing the expression.
:raises AssertionError: If the expression is not supported.
"""
node = Expression._skip_unconvertable_nodes(node)
# Literals.
if node.kind == cindex.CursorKind.INTEGER_LITERAL:
return IntegralLiteral(node)
elif node.kind == cindex.CursorKind.FLOATING_LITERAL:
return FloatingPointLiteral(node)
elif node.kind == cindex.CursorKind.CHARACTER_LITERAL:
return CharacterLiteral(node)
elif node.kind == cindex.CursorKind.STRING_LITERAL:
return StringLiteral(node)
# Array initializer.
elif node.kind == cindex.CursorKind.INIT_LIST_EXPR:
return InitListExpr(node)
# Identifier.
elif node.kind == cindex.CursorKind.DECL_REF_EXPR:
from regression_tests.parsers.c_parser.exprs.variable import Variable
return Variable(node)
# Ternary operator.
elif node.kind == cindex.CursorKind.CONDITIONAL_OPERATOR:
return TernaryOpExpr(node)
# Call expression.
elif node.kind == cindex.CursorKind.CALL_EXPR:
return CallExpr(node)
# Cast expression.
elif node.kind == cindex.CursorKind.CSTYLE_CAST_EXPR:
return CastExpr(node)
# Binary operators.
elif node.kind == cindex.CursorKind.COMPOUND_ASSIGNMENT_OPERATOR:
return CompoundAssignOpExpr(node)
elif node.kind == cindex.CursorKind.BINARY_OPERATOR:
if has_token(node, '='):
return AssignOpExpr(node)
elif has_token(node, '=='):
return EqOpExpr(node)
elif has_token(node, '!='):
return NeqOpExpr(node)
elif has_token(node, '>'):
return GtOpExpr(node)
elif has_token(node, '>='):
return GtEqOpExpr(node)
elif has_token(node, '<'):
return LtOpExpr(node)
elif has_token(node, '<='):
return LtEqOpExpr(node)
elif has_token(node, '+'):
return AddOpExpr(node)
elif has_token(node, '-'):
return SubOpExpr(node)
elif has_token(node, '*'):
return MulOpExpr(node)
elif has_token(node, '%'):
return ModOpExpr(node)
elif has_token(node, '/'):
return DivOpExpr(node)
elif has_token(node, '&&'):
return AndOpExpr(node)
elif has_token(node, '||'):
return OrOpExpr(node)
elif has_token(node, '&'):
return BitAndOpExpr(node)
elif has_token(node, '|'):
return BitOrOpExpr(node)
elif has_token(node, '^'):
return BitXorOpExpr(node)
elif has_token(node, '<<'):
return BitShlOpExpr(node)
elif has_token(node, '>>'):
return BitShrOpExpr(node)
elif has_token(node, ','):
return CommaOpExpr(node)
elif node.kind == cindex.CursorKind.ARRAY_SUBSCRIPT_EXPR:
return ArrayIndexOpExpr(node)
elif node.kind == cindex.CursorKind.MEMBER_REF_EXPR:
if has_token(node, '.'):
return StructRefOpExpr(node)
elif has_token(node, '->'):
return StructDerefOpExpr(node)
# Unary operators.
elif node.kind == cindex.CursorKind.UNARY_OPERATOR:
if has_token(node, '!'):
return NotOpExpr(node)
elif has_token(node, '-'):
return NegOpExpr(node)
elif has_token(node, '&'):
return AddressOpExpr(node)
elif has_token(node, '*'):
return DerefOpExpr(node)
elif has_token_in_position(node, '++', 0):
return PreIncrementOpExpr(node)
elif has_token_in_position(node, '++', 1):
return PostIncrementOpExpr(node)
elif has_token_in_position(node, '--', 0):
return PreDecrementOpExpr(node)
elif has_token_in_position(node, '--', 1):
return PostDecrementOpExpr(node)
raise AssertionError('unsupported expression `{}` of kind {}'.format(
node.spelling, node.kind))
@property
def type(self):
"""Type of the expression (:class:`.Type`)."""
return Type._from_clang_type(self._node.type)
@abstractmethod
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
return not self == other
@abstractmethod
def __hash__(self):
raise NotImplementedError
@staticmethod
def _skip_unconvertable_nodes(node):
"""Skips nodes that cannot be directly converted and returns the first
node that can.
"""
while node.kind in _UNCONVERTABLE_NODES:
node = first_child_node(node)
return node
#: Nodes that cannot be converted into expressions.
_UNCONVERTABLE_NODES = [
cindex.CursorKind.UNEXPOSED_EXPR,
# For `(x)`, we want to get `x` as there is no support for representing
# parentheses in the framework.
cindex.CursorKind.PAREN_EXPR,
]
from regression_tests.parsers.c_parser.exprs.init_list_expr import InitListExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.add_op_expr import AddOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.and_op_expr import AndOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.array_index_op_expr import ArrayIndexOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.assign_op_expr import AssignOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.bit_and_op_expr import BitAndOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.bit_or_op_expr import BitOrOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.bit_shl_op_expr import BitShlOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.bit_shr_op_expr import BitShrOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.bit_xor_op_expr import BitXorOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.comma_op_expr import CommaOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.compound_assign_op_expr import CompoundAssignOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.div_op_expr import DivOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.eq_op_expr import EqOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.gt_op_expr import GtOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.gt_eq_op_expr import GtEqOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.lt_op_expr import LtOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.lt_eq_op_expr import LtEqOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.mod_op_expr import ModOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.mul_op_expr import MulOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.neq_op_expr import NeqOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.or_op_expr import OrOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.struct_deref_op_expr import StructDerefOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.struct_ref_op_expr import StructRefOpExpr
from regression_tests.parsers.c_parser.exprs.binary_ops.sub_op_expr import SubOpExpr
from regression_tests.parsers.c_parser.exprs.call_expr import CallExpr
from regression_tests.parsers.c_parser.exprs.cast_expr import CastExpr
from regression_tests.parsers.c_parser.exprs.literals.character_literal import CharacterLiteral
from regression_tests.parsers.c_parser.exprs.literals.floating_point_literal import FloatingPointLiteral
from regression_tests.parsers.c_parser.exprs.literals.integral_literal import IntegralLiteral
from regression_tests.parsers.c_parser.exprs.literals.string_literal import StringLiteral
from regression_tests.parsers.c_parser.exprs.ternary_op_expr import TernaryOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.address_op_expr import AddressOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.deref_op_expr import DerefOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.neg_op_expr import NegOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.not_op_expr import NotOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.post_decrement_op_expr import PostDecrementOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.post_increment_op_expr import PostIncrementOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.pre_decrement_op_expr import PreDecrementOpExpr
from regression_tests.parsers.c_parser.exprs.unary_ops.pre_increment_op_expr import PreIncrementOpExpr
from regression_tests.parsers.c_parser.types.type import Type
|
from typing import *
import random
import enum
from enum import Enum
# Please see https://namu.wiki/w/%ED%99%94%ED%88%AC/%ED%8C%A8
# Following cards are defined in the same order as appeared in the above link
# For the rules and terminologies: please see https://www.pagat.com/fishing/gostop.html
# card constants
class CardCode(Enum):
JanBright=1
JanRedRibbon=2
Jan1=3
Jan2=4
FebBird=5
FebRedRibbon=6
Feb1=7
Feb2=8
MarBright=9
MarRedRibbon=10
Mar1=11
Mar2=12
AprBird=13
AprRibbon=14
Apr1=15
Apr2=16
MayBridge=17
MayRibbon=18
May1=19
May2=20
JunButterfly=21
JunBlueRibbon=22
Jun1=23
Jun2=24
JulPig=25
JulRibbon=26
Jul1=27
Jul2=28
AugBright=29
AugBird=30
Aug1=31
Aug2=32
SepFlask=33
SepBlueRibbon=34
Sep1=35
Sep2=36
OctDeer=37
OctBlueRibbon=38
Oct1=39
Oct2=40
NovBright=41
NovDouble=42
Nov1=43
Nov2=44
DecBright=45
DecBird=46
DecRibbon=47
DecDoor=48
JokerDouble1=49
JokerDouble2=50
JokerTriple=51
class Card(object):
def __init__(self, code:CardCode):
if not code in CardCode:
raise Exception('Illegal code number')
self._code = code
@property
def month(self) -> int:
code = self._code.value
if code >= 1 and code <= 48:
return int((code - 1) / 4) + 1
return None
@property
def is_bright(self) -> bool:
return self._code in [CardCode.JanBright, CardCode.MarBright, CardCode.AugBright, CardCode.NovBright, CardCode.DecBright]
@property
def is_subbright(self) -> bool:
return self._code == CardCode.DecBright
@property
def is_bird(self) -> bool:
return self._code in [CardCode.FebBird, CardCode.AprBird, CardCode.AugBird]
@property
def is_animal(self) -> bool:
return self._code in [CardCode.FebBird, CardCode.AprBird, CardCode.MayBridge, CardCode.JunButterfly, CardCode.JulPig,
CardCode.AugBird, CardCode.SepFlask, CardCode.OctDeer, CardCode.DecBird]
@property
def is_red_ribbon(self) -> bool:
return self._code in [CardCode.FebRedRibbon, CardCode.MarRedRibbon, CardCode.JanRedRibbon]
@property
def is_blue_ribbon(self) -> bool:
return self._code in [CardCode.JunBlueRibbon, CardCode.SepBlueRibbon, CardCode.OctBlueRibbon]
@property
def is_plain_ribbon(self) -> bool:
return self._code in [CardCode.AprRibbon, CardCode.MayRibbon, CardCode.JulRibbon, CardCode.DecRibbon]
@property
def is_ribbon(self) -> bool:
return self._code in [CardCode.FebRedRibbon, CardCode.MarRedRibbon, CardCode.JanRedRibbon,
CardCode.JunBlueRibbon, CardCode.SepBlueRibbon, CardCode.OctBlueRibbon,
CardCode.AprRibbon, CardCode.MayRibbon, CardCode.JulRibbon, CardCode.DecRibbon]
@property
def pi_cnt(self) -> int:
if self._code in [CardCode.Jan1, CardCode.Jan2, CardCode.Feb1, CardCode.Feb2, CardCode.Mar1, CardCode.Mar2,
CardCode.Apr1, CardCode.Apr2, CardCode.May1, CardCode.May2, CardCode.Jun1, CardCode.Jun2,
CardCode.Jul1, CardCode.Jul2, CardCode.Aug1, CardCode.Aug2, CardCode.Sep1, CardCode.Sep2,
CardCode.Oct1, CardCode.Oct2, CardCode.Nov1, CardCode.Nov2]:
return 1
if self._code in [CardCode.DecDoor, CardCode.NovDouble, CardCode.JokerDouble1, CardCode.JokerDouble2]:
return 2
if self._code == CardCode.JokerTriple: return 3
return 0
@property
def is_bonus(self) -> bool:
return self._code in [CardCode.JokerDouble1, CardCode.JokerDouble2, CardCode.JokerTriple]
def __eq__(self, other:'Card'): return self._code.value == other._code.value
def __gt__(self, other:'Card'): return self > other._code.value
def __lt__(self, other:'Card'): return self < other._code.value
def __hash__(self): return self._code.value
def __str__(self):
return '[' + self._code.name + ']'
CardSet = Set[Card]
def _cardset_to_str(cardset:CardSet):
if len(cardset) == 0: return '-'
res = ' '.join([str(c) for c in cardset])
return res
class Player(object):
def __init__(self):
self._hand = set()
self._acquired = set()
self._shaked = set()
self._go_cnt = 0
self._shake_cnt = 0
self._bomb_cnt = 0
self._kukjin_as_doublepi = False
self._latest_go_score = 0
self._bomb_card_cnt = 0
self._bbuck_cnt = 0
self._consec_bbuck_cnt = 0
self._president_cnt = 0
def has(self, card:Card)->bool:
return card in self._hand
def dump_str(self, indent=0):
blank = ''
if indent > 0: blank = ' ' * indent
res = blank + ("Go:{0} Shake:{1} Bomb:{2} BBuck:{3} BombCard:{4} Kukjin->TwoPi:{5}\n"
.format(self._go_cnt, self._shake_cnt, self._bbuck_cnt, self._bbuck_cnt, self._bomb_card_cnt,
self._kukjin_as_doublepi))
res += blank + 'Hand : ' + _cardset_to_str(self._hand) + '\n'
res += blank + 'Acquired: ' + _cardset_to_str(self._acquired) + '\n'
res += blank + 'Shaked : ' + _cardset_to_str(self._shaked) + '\n'
return res
def shakable_months(self) -> List[int]:
cnt = dict()
for c in self._hand: #type:Card
m = c.month
cnt[m] = cnt.get(m, 0) + 1
res = []
for k in cnt.keys():
if cnt[k] == 3: res.append(m)
return res
def can_say_go(self) -> bool:
cur_score = self.score(amplifier=False)
return cur_score > self._latest_go_score and cur_score > 0
def president_months(self) -> List[int]:
cnt = dict()
for c in self._hand: #type:Card
m = c.month
cnt[m] = cnt.get(m, 0) + 1
res = []
for k in cnt.keys():
if cnt[k] == 4: res.append(m)
return res
def score(self, amplifier = True) -> int:
if self._president_cnt > 0: return 7
bright_cnt = 0
pi_cnt = 0
subbright = False
has_kukjin = False
ribbon_cnt = 0
red_ribbon_cnt = 0
blue_ribbon_cnt = 0
plain_ribbon_cnt = 0
animal_cnt = 0
bird_cnt = 0
for c in self._acquired: #type: Card
subbright |= c.is_subbright
has_kukjin |= (c._code == CardCode.SepFlask)
pi_cnt += c.pi_cnt
red_ribbon_cnt += 1 if c.is_red_ribbon else 0
blue_ribbon_cnt += 1 if c.is_blue_ribbon else 0
plain_ribbon_cnt += 1 if c.is_plain_ribbon else 0
ribbon_cnt += 1 if c.is_ribbon else 0
animal_cnt += 1 if c.is_animal else 0
bird_cnt += 1 if c.is_bird else 0
bright_cnt += 1 if c.is_bright else 0
pi_cnt += c.pi_cnt
res = 0
if self._kukjin_as_doublepi and has_kukjin:
pi_cnt += 2
animal_cnt -= 1
if pi_cnt >= 10: res += pi_cnt - 9
if animal_cnt >= 5 and animal_cnt <= 7: res += animal_cnt - 4
elif animal_cnt > 7: res += 3
if bird_cnt == 3: res += 5
if ribbon_cnt >= 5: res += ribbon_cnt - 4
if bright_cnt == 3 and subbright: res += 2
elif bright_cnt == 3 and not subbright: res += 3
elif bright_cnt == 4: res += 4
elif bright_cnt == 5: res += 15
if red_ribbon_cnt == 3: res += 3
if blue_ribbon_cnt == 3: res += 3
if plain_ribbon_cnt >= 3: res += 3
if amplifier:
if self._go_cnt == 1: res += 1
elif self._go_cnt == 2: res += 2
elif self._go_cnt > 0: res = res * (2 ** (self._go_cnt - 2))
if animal_cnt >= 7: res *= 2 # mungbak
if self._shake_cnt > 0: res = res * (2 ** self._shake_cnt)
if self._bomb_cnt > 0: res = res * (2 ** self._bomb_cnt)
return res
def by_month(self, set_name:str, month:int) -> Set[Card]:
res = set()
if set_name == 'hand': used_set = self._hand
elif set_name == 'acquired': used_set = self._acquired
elif set_name == 'shaked': used_set = self._shaked
for c in used_set: #type: Card
if c.month == month: res.add(c)
return res
@property
def pibakable(self) -> bool:
cnt = 0
for c in list(self._acquired): #type: Card
cnt += c.pi_cnt
if cnt == 0 or cnt > 5: return False
return True
@property
def gwangbakable(self) -> bool:
cnt = 0
for c in list(self._acquired): #type: Card
cnt += 1 if c.is_bright else 0
return cnt == 0
@property
def bomb_cnt(self) -> int:
return self._bomb_cnt
## Fuctions, whose names start with an underscore, should not be called by the user
def _claim_president(self) -> bool:
if len(self.president_months()) > 0:
self._president_cnt += 1
return True
return False
def _remove_pi(self) -> Union[Card, None]:
pi = None
pi_cnt = 0
for c in self._acquired: #type: Card
if pi_cnt == 0 or c.pi_cnt < pi_cnt:
pi = c
pi_cnt = c.pi_cnt
if pi_cnt > 0: self._acquired.remove(pi)
return pi
def _shake(self, c:Card) -> bool:
m = c.month
shakables = self.shakable_months()
if m in shakables:
for cc in self._hand: #type: Card
if cc.month == m: self._shaked.insert(cc)
return True
return False
def _claim_go(self) -> bool:
if not self.can_say_go(): return False
self._latest_go_score = self.score(amplifier=False)
self._go_cnt += 1
return True
def _throw(self, c:Union[None, Card]) -> bool:
if c._code is None:
if self._bomb_card_cnt > 0:
self._bomb_card_cnt -= 1
return True
return False
else:
if not c in self._hand: return False
self._hand.remove(c)
if c in self._shaked: self._shaked.remove(c)
return True
def _acquire_bomb(self, bomb_cnt):
self._bomb_card_cnt += bomb_cnt
def _get(self, cards:Set[Card]):
self._acquired.update(cards)
class GameState(Enum):
Initialized = 0
AskPresident = 1
AnsweredPresident = 2
AskCardToThrow = 3
AnsweredCardToThrow = 4
AskCardToCapture = 5
AnsweredCardToCapture = 6
Draw = 254
Done = 255
class Board(object):
_cards = None
_bbuck_player = None
def __init__(self, cards: List[Card]):
self._cards = dict()
self._bbuck_player = dict()
for i in range(1, 13):
self._cards[i] = set()
self._bbuck_player[i] = None
for card in cards:
month = card.month
if not (1 <= month and month <= 12) : continue
self._cards[card.month].add(card)
def by_month(self, month:int) -> Set[Card]:
if month < 1 or month > 12: return set()
return self._cards[month]
def as_set(self) -> Set[Card]:
res = set()
for key in self._cards.keys():
res.update(self._cards[key])
return res
def count(self) -> int:
res = 0
for key in self._cards.keys():
res += len(self._cards[key])
return res
def whose_bbuck(self, month:int) -> Union[None, int]:
if not month in self._bbuck_player: return None
return self._bbuck_player[month]
def _set_bbuck(self, player_idx:Union[None, int], month:int) -> bool:
if month < 1 or month > 12: return False
self._bbuck_player[month] = player_idx
return True
class Game(object):
_round_cnt = 0
_players = None
_stock = None
_board = None
_state = None
_round = None
_turn = None
_answer = None
_winner = None
@property
def num_player(self):
return len(self._players)
@property
def goable_score(self):
if self.num_player == 2: return 7
elif self.num_player == 3: return 3
def __init__(self, num_players:int=2):
self._state = GameState.Initialized
self._turn = 0
self._answer = None
if num_players == 2:
num_hand = 10
num_board = 8
elif num_players == 3:
num_hand = 7
num_board = 6
else: raise NotImplemented
need_init = True
self._round_cnt = 0
while need_init:
need_init = False
self._players = list()
self._stock = list()
self._board = None
for _ in range(num_players):
self._players.append(Player())
for i in CardCode:
self._stock.append(Card(i))
# dealing cards
random.shuffle(self._stock)
for i in range(self.num_player):
hand, self._stock = self._stock[:num_hand], self._stock[num_hand:]
self._players[i]._hand.update(hand)
board, self._stock = self._stock[:num_board], self._stock[num_board:]
self._board.update(board)
# the 1st player get the bonus cards on the board
for c in self._board: #type: Card
if c.is_bonus: self._players[0]._acquired.add(c)
self._board = Board(set([c for c in self._board if not c.is_bonus]))
# if 'president' occured on the board, we re-initialize the whole game.
cnt = dict()
for c in self._board.as_set(): #type:Card
m = c.month
cnt[m] = cnt.get(m, 0) + 1
res = []
for k in cnt.keys():
if cnt[k] == 4:
need_init = True
break
self._deal()
return
def winner(self):
if self._state != GameState.Done: return None
if self._state == GameState.Draw: return None
return self._winner
def _deal(self):
cur_player = self.turn_player()
if self._state in [GameState.Initialized, GameState.AnsweredPresident]:
while self._turn < len(self._players):
months = cur_player.president_months()
if len(months) > 0:
self._state = GameState.AskPresident
return
self._turn += 1
self._turn = 0
self._state = GameState.AskCardToThrow
return
elif self._state == GameState.AnsweredCardToThrow:
hand_card = self._answer['card']
shake = self._answer['shake_or_bomb']
throw_res = cur_player._throw(hand_card)
assert(throw_res)
if hand_card.is_special:
cur_player._get(set([hand_card]))
for p in self._players:
res = p._remove_pi()
if res is not None:
cur_player._get(set([res]))
self._state = GameState.AskCardToThrow
return
if len(self._stock) == 0:
self._state = GameState.Done
return
stock_cards = []
while True:
stock_card = self._stock.pop(0)
stock_cards.append(stock_card)
if not stock_card.is_bonus: break
hand_month = None
stock_month = None
if not hand_card is None: hand_month = hand_card.month
for stock_card in stock_cards:
stock_month = stock_card.month
if 1 <= stock_card_month and stock_card_month <= 12: break
hand_cnt = 0 if hand_month is None else len(self._board.by_month(hand_month))
stock_cnt = 0 if stock_month is None else len(self._board.by_month(stock_month))
gather_pi_from_others = 0
if hand_month is None:
#used a bomb card
if stock_cnt == 0:
pass # wasted
elif stock_cnt == 1:
pass # acquired
elif stock_cnt == 2:
pass # choose what to acquire
elif stock_cnt == 3:
pass # resolve bbuck
else:
#used a normal card
if shake:
if hand_cnt == 0:
pass # shaked
else:
pass # bomb
else:
if hand_month == stock_month:
if hand_cnt == 0:
pass # jjock
elif hand_cnt == 1:
pass # bbuck
elif hand_cnt == 2:
pass # dda dack
else:
# TODO: what do we have to do when hand_cnt == 2 and stock_cnt == 2?
if hand_cnt == 0:
pass # wasted
elif hand_cnt == 1:
pass # acquired
elif hand_cnt == 2:
pass # choose what to acquire
elif hand_cnt == 3:
pass # resolve bbuck
if stock_cnt == 0:
pass # wasted
elif stock_cnt == 1:
pass # acquired
elif stock_cnt == 2:
pass # choose what to acquire
elif stock_cnt == 3:
pass # resolve bbuck
def action_reqfields(self):
if self._state == GameState.AskPresident:
return ['go']
elif self._state == GameState.AskCardToThrow:
return ['card', 'shake_or_bomb']
# for a valid action, this function returns true. if not, it returns false
def action(self, ans:dict) -> bool:
required = self.answer_reqfields()
cur_player = self.turn_player()
for key in required:
if not key in ans: return False
self._answer = ans
if self._state == GameState.AskPresident:
if not self._answer['go']:
cur_player._claim_president()
self._state = GameState.Done
self._winner = self._turn
return True
self._state = GameState.AnsweredPresident
elif self._state == GameState.AskCardToThrow:
hand_card = ans['card']
shake = ans['shake_or_bomb']
if hand_card is None:
if cur_player.bomb_cnt < 1: return False
else:
if not cur_player.has(hand_card): return False
if shake:
shakable_months = cur_player.shakable_months()
hand_month = hand_card.month
if not hand_month in shakable_months:
board_cnt = len(self._board.by_month(hand_month))
hand_cnt = cur_player.by_month('hand', hand_month)
if board_cnt + hand_cnt < 4: return False
self._state = GameState.AnsweredCardToThrow
self._deal()
return True
@property
def state(self):
return self._state
@property
def turn(self):
return self._turn
@property
def turn_player(self):
return self._players[self._turn]
def dump_str(self, indent:int=4):
res = 'Round #{0}\n'.format(self._round_cnt)
res += 'Board: {0}\n'.format(_cardset_to_str(self._board))
res += 'Stock: {0}\n'.format(_cardset_to_str(self._stock))
for pidx, p in enumerate(self._players):
res += 'Player #{0} ====\n'.format(pidx)
res += p.dump_str(indent=indent)
return res
class TestConsole(object):
def __init__(self, num_players=2):
self._game = Game(num_players)
console = TestConsole(2)
|
<gh_stars>0
"""Gibbs sampling kernel"""
import collections
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
from tensorflow_probability.python.experimental import unnest
from tensorflow_probability.python.internal import prefer_static
tfd = tfp.distributions # pylint: disable=no-member
tfb = tfp.bijectors # pylint: disable=no-member
mcmc = tfp.mcmc # pylint: disable=no-member
class GibbsKernelResults(
mcmc_util.PrettyNamedTupleMixin,
collections.namedtuple(
"GibbsKernelResults", ["target_log_prob", "inner_results",],
),
):
__slots__ = ()
def get_target_log_prob(results):
"""Fetches a target log prob from a results structure"""
return unnest.get_innermost(results, "target_log_prob")
def update_target_log_prob(results, target_log_prob):
"""Puts a target log prob into a results structure"""
return unnest.replace_innermost(results, target_log_prob=target_log_prob)
def maybe_transform_value(tlp, state, kernel, direction):
if not isinstance(kernel, tfp.mcmc.TransformedTransitionKernel):
return tlp
tlp_rank = prefer_static.rank(tlp)
event_ndims = prefer_static.rank(state) - tlp_rank
if direction == "forward":
return tlp + kernel.bijector.inverse_log_det_jacobian(
state, event_ndims=event_ndims
)
if direction == "inverse":
return tlp - kernel.bijector.inverse_log_det_jacobian(
state, event_ndims=event_ndims
)
raise AttributeError("`direction` must be `forward` or `inverse`")
class GibbsKernel(mcmc.TransitionKernel):
def __init__(self, target_log_prob_fn, kernel_list, name=None):
"""Build a Gibbs sampling scheme from component kernels.
:param target_log_prob_fn: a function that takes `state` arguments
and returns the target log probability
density.
:param kernel_list: a list of tuples `(state_part_idx, kernel_make_fn)`.
`state_part_idx` denotes the index (relative to
positional args in `target_log_prob_fn`) of the
state the kernel updates. `kernel_make_fn` takes
arguments `target_log_prob_fn` and `state`, returning
a `tfp.mcmc.TransitionKernel`.
:returns: an instance of `GibbsKernel`
"""
# Require to check if all kernel.is_calibrated is True
self._parameters = dict(
target_log_prob_fn=target_log_prob_fn,
kernel_list=kernel_list,
name=name,
)
@property
def is_calibrated(self):
return True
@property
def target_log_prob_fn(self):
return self._parameters["target_log_prob_fn"]
@property
def kernel_list(self):
return self._parameters["kernel_list"]
def one_step(self, current_state, previous_results, seed=None):
"""We iterate over the state elements, calling each kernel in turn.
The `target_log_prob` is forwarded to the next `previous_results`
such that each kernel has a current `target_log_prob` value.
Transformations are automatically performed if the kernel is of
type tfp.mcmc.TransformedTransitionKernel.
In graph and XLA modes, the for loop should be unrolled.
"""
next_state = current_state
next_results = []
untransformed_target_log_prob = previous_results.target_log_prob
for i, (state_part_idx, kernel_fn) in enumerate(self.kernel_list):
def target_log_prob_fn(state_part):
next_state[
state_part_idx # pylint: disable=cell-var-from-loop
] = state_part
return self.target_log_prob_fn(*next_state)
kernel = kernel_fn(target_log_prob_fn, next_state)
previous_kernel_results = update_target_log_prob(
previous_results.inner_results[i],
maybe_transform_value(
tlp=untransformed_target_log_prob,
state=next_state[state_part_idx],
kernel=kernel,
direction="inverse",
),
)
next_state[state_part_idx], next_kernel_results = kernel.one_step(
next_state[state_part_idx], previous_kernel_results, seed
)
next_results.append(next_kernel_results)
untransformed_target_log_prob = maybe_transform_value(
tlp=get_target_log_prob(next_kernel_results),
state=next_state[state_part_idx],
kernel=kernel,
direction="forward",
)
return (
next_state,
GibbsKernelResults(
target_log_prob=untransformed_target_log_prob,
inner_results=next_results,
),
)
def bootstrap_results(self, current_state):
inner_results = []
untransformed_target_log_prob = 0.0
for state_part_idx, kernel_fn in self.kernel_list:
def target_log_prob(state_part):
return self.target_log_prob_fn(*current_state)
kernel = kernel_fn(target_log_prob, current_state)
kernel_results = kernel.bootstrap_results(
current_state[state_part_idx]
)
inner_results.append(kernel_results)
untransformed_target_log_prob = maybe_transform_value(
tlp=get_target_log_prob(kernel_results),
state=current_state[state_part_idx],
kernel=kernel,
direction="forward",
)
tf.print("Inner results: ", inner_results)
tf.print("untransformed tlp: ", untransformed_target_log_prob)
return GibbsKernelResults(
target_log_prob=untransformed_target_log_prob,
inner_results=inner_results,
)
|
# import onnx
# import torch.onnx
# from models import cropper
# from models import build_model
#
#
# device = torch.device('cpu')
#
#
# def static_onnx_converter(model_path, onnx_file):
# # load checkpoint
# # checkpoint = torch.load(model_path, map_location=device)
# # # config for model architecture
# # config = checkpoint['config']
# # model = build_model(config['arch'])
# # model.load_state_dict(checkpoint['state_dict'])
# # model.to(device)
# # model.eval()
#
# model = cropper.U2NET()
# if torch.cuda.is_available():
# model.load_state_dict(torch.load(model_path))
# model.to(torch.device("cuda"))
# else:
# model.load_state_dict(torch.load(model_path, map_location='cpu'))
# model.eval()
#
# x = torch.rand(1, 3, 1024, 512, requires_grad=True)
#
# print('Converting static model ...')
# torch.onnx.export(model=model, # model to be exported
# args=x, # model input (or a tuple for multiple inputs)
# f=onnx_file, # where to save onnx model (a file-like object)
# # export_params=True, # store the trained parameter weights inside the model file
# opset_version=12, # the ONNX version that the model is exported to (Default = 9)
# # main opset = 13; stable opsets = [7, 8, 9, 10, 11, 12]
# # do_constant_folding=True, # whether to execute constant folding for optimization
# input_names=['input'], # the model's input names
# output_names=['output']) # the model's output names
# print('Converted static onnx')
#
#
# def dynamic_onnx_converter(static_onnx_path,
# dynamic_onnx_path):
# # save_folder = path.dirname(path.dirname(path.join(model_path)))
# # static_onnx_path = path.join(save_folder, static_onnx_file)
# # dynamic_onnx_path = path.join(save_folder, dynamic_onnx_file)
#
# model = onnx.load(static_onnx_path)
# model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = '?'
# model.graph.input[0].type.tensor_type.shape.dim[2].dim_param = '?'
# model.graph.input[0].type.tensor_type.shape.dim[3].dim_param = '?'
#
# print('Converting dynamic model ...')
# onnx.save(model, dynamic_onnx_path)
# print('Converted dynamic onnx')
#
#
# # def init_args():
# # parser = argparse.ArgumentParser(description='Convert_torch_to_onnx')
# # parser.add_argument('--model_path',
# # # default='C:/Users/ADMIN/Desktop/u2net.pth',
# # default=r'D:\OCR\Localization\text_localization\models\detect_r34.pth',
# # type=str,
# # help='pytorch model path for conversion')
# # # parser.add_argument('--onnx_path', default='model.onnx', type=str, help='onnx model file name after conversion')
# # args = parser.parse_args()
# # return args
#
#
# if __name__ == '__main__':
# torch_file = 'C:/Users/ADMIN/Desktop/u2net.pth'
# static_onnx_file = 'model.onnx'
# dynamic_onnx_file = 'model_dynamic.onnx'
# import gc
# gc.collect()
# # static_onnx_converter(model_path=torch_file,
# # onnx_file=static_onnx_file)
#
# dynamic_onnx_converter(static_onnx_path=static_onnx_file,
# dynamic_onnx_path=dynamic_onnx_file)
# # import gc
# # gc.collect()
# # np.testing.assert_allclose(to_numpy(torch_out), ort_outputs[0], rtol=1e-03, atol=1e-05)
|
<filename>jftools/ipynbimport.py
# coding: utf-8
## Importing IPython Notebooks as Modules
# It is a common problem that people want to import code from IPython Notebooks.
# This is made difficult by the fact that Notebooks are not plain Python files,
# and thus cannot be imported by the regular Python machinery.
#
# Fortunately, Python provides some fairly sophisticated [hooks](http://www.python.org/dev/peps/pep-0302/) into the import machinery,
# so we can actually make IPython notebooks importable without much difficulty,
# and only using public APIs.
import io, os, sys, types
try:
import nbformat
except ImportError:
from IPython import nbformat
from IPython.core.interactiveshell import InteractiveShell
# Import hooks typically take the form of two objects:
#
# 1. a Module **Loader**, which takes a module name (e.g. `'IPython.display'`), and returns a Module
# 2. a Module **Finder**, which figures out whether a module might exist, and tells Python what **Loader** to use
def find_notebook(fullname, path=None):
"""find a notebook, given its fully qualified name and an optional path
This turns "foo.bar" into "foo/bar.ipynb"
and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar
does not exist.
"""
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
### Notebook Loader
# Here we have our Notebook Loader.
# It's actually quite simple - once we figure out the filename of the module,
# all it does is:
#
# 1. load the notebook document into memory
# 2. create an empty Module
# 3. execute every cell in the Module namespace
#
# Since IPython cells can have extended syntax,
# the IPython transform is applied to turn each of these cells into their pure-Python counterparts before executing them.
# If all of your notebook cells are pure-Python,
# this step is unnecessary.
class NotebookLoader(object):
"""Module Loader for IPython Notebooks"""
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
print ("importing IPython notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, 4)
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.cells:
if cell.cell_type == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.source)
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
### The Module Finder
# The finder is a simple object that tells you whether a name can be imported,
# and returns the appropriate loader.
# All this one does is check, when you do:
#
# ```python
# import mynotebook
# ```
#
# it checks whether `mynotebook.ipynb` exists.
# If a notebook is found, then it returns a NotebookLoader.
#
# Any extra logic is just for resolving paths within packages.
class NotebookFinder(object):
"""Module finder that locates IPython Notebooks"""
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
### Register the hook
# Now we register the `NotebookFinder` with `sys.meta_path`
def install():
sys.meta_path.append(NotebookFinder())
|
#!/usr/bin/env python
#coding:utf-8
from django.db import models
# Create your models here
from django.contrib.auth.models import AbstractUser
###---------- users------------------###
'''
class userType(models.Model):
name = models.CharField('用户类型',max_length=200,default='user')
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
class Meta:
verbose_name = '用户类型'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
'''
'''
class userGroup(models.Model):
name = models.CharField('所属组',max_length=200,default='default')
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
class Meta:
verbose_name = '用户组'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
'''
class userInfo(AbstractUser):
workflow_order = models.IntegerField('工作流编号',default=0,)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '登录用户'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username,self.usertype
'''
class userInfo(models.Model):
username = models.CharField('用户名',max_length=200)
email = models.EmailField('邮箱')
password = models.CharField('密码',max_length=100)
usertype = models.ForeignKey(userType,verbose_name='用户类型',on_delete=models.CASCADE)
#STATUS_CHOICE = (
# ('0', '否'),
# ('1', '是'),
#)
is_active = models.BooleanField('激活',max_length=200,default=True,)
group = models.ManyToManyField(userGroup,verbose_name='用户组',blank=True,)
workflow_order = models.IntegerField('工作流编号',default=0,)
#ROLE_CHOICE = (
# ('0', '否'),
# ('1', '是'),
#)
#approval = models.CharField('是否部门主管',max_length=128,choices=ROLE_CHOICE,default='0')
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '登录用户'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username,self.usertype
'''
'''
class UserProfile(models.Model):
name = models.CharField('名字',max_length=256)
email = models.EmailField('邮箱',max_length=256)
mobile = models.CharField('手机',max_length=256)
memo = models.TextField('备注',blank=True,null=True)
create_time = models.DateTimeField('创建时间',auto_now_add=True)
update_time = models.DateTimeField('更新时间',auto_now=True)
class Meta:
verbose_name = '管理用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
'''
'''
class Admininfo(models.Model):
user = models.OneToOneField(UserProfile,on_delete=models.CASCADE)
username = models.CharField('用户名',max_length=256)
password = models.CharField('密码',max_length=256)
class Meta:
verbose_name = '管理用户'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username
'''
class monitor(models.Model):
name = models.CharField('名称',max_length=256)
class Meta:
verbose_name = '监控权限'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
###---------- workflow------------------###
class wf_type(models.Model):
name = models.CharField('请求类型', max_length=128, default='上线', )
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
class Meta:
verbose_name = '工单类型'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class wf_business(models.Model):
name = models.CharField('业务单元', max_length=128, default='default', )
proj_id = models.CharField('项目id',max_length=128,default=183)
repo = models.CharField('项目地址',max_length=128,)
admin = models.ForeignKey('userInfo',on_delete=models.CASCADE,verbose_name='业务管理员',related_name='admin',)
approval = models.ManyToManyField('userInfo', verbose_name='审批人', blank=True, related_name='approval',)
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
class Meta:
verbose_name = '业务单元'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class wf_info(models.Model):
sn = models.CharField('请求编号',max_length=128,)
title = models.CharField('标题',max_length=256,)
sponsor = models.CharField('发起人',max_length=128,)
type = models.ForeignKey(wf_type,verbose_name='请求类型',on_delete=models.CASCADE)
content = models.TextField('请求内容',blank=True,null=True)
status = models.CharField('工单状态',max_length=128,default='未提交')
business = models.ForeignKey('wf_business',on_delete=models.CASCADE)
flow_id = models.IntegerField('流程id',default=-1)
assignee = models.CharField('当前处理人',max_length=128,)
next_assignee = models.CharField('下个处理人',max_length=128,)
create_time = models.DateTimeField('创建时间',auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
finish_time = models.DateTimeField('完成时间', auto_now=True)
#duration = models.BigIntegerField('耗时')
memo = models.TextField('备注',blank=True,null=True)
class Meta:
verbose_name = '工单流程'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.type,self.status
class wf_info_process_history(models.Model):
sn = models.CharField('请求编号',max_length=128,)
title = models.CharField('标题',max_length=256,)
sponsor = models.CharField('发起人',max_length=128,)
type = models.ForeignKey(wf_type,verbose_name='请求类型',on_delete=models.CASCADE)
content = models.TextField('请求内容',blank=True,null=True)
status = models.CharField('工单状态',max_length=128,default='处理中')
business = models.ForeignKey('wf_business', on_delete=models.CASCADE)
flow_id = models.IntegerField('流程id',)
assignee = models.CharField('当前处理人', max_length=128, )
next_assignee = models.CharField('下个处理人', max_length=128, )
create_time = models.DateTimeField('创建时间',auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
finish_time = models.DateTimeField('完成时间', auto_now=True)
#duration = models.BigIntegerField('耗时')
memo = models.TextField('备注',blank=True,null=True)
suggest = models.CharField('审批结果', max_length=128,blank=True,null=True )
suggest_content = models.TextField('审批意见', blank=True,null=True)
class Meta:
verbose_name = '工单流程历史'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.type,self.status
###---------- task-deploy------------------###
class scriptType(models.Model):
type = models.CharField('脚本类型',max_length=200,default='shell',)
class Meta:
verbose_name = '脚本类型'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.type
class wf_business_deploy_history(models.Model):
name = models.CharField('业务单元', max_length=128, default='default', )
proj_id = models.CharField('项目id',max_length=128,default=183)
repo = models.CharField('项目地址',max_length=128,)
branch = models.CharField('项目分支', max_length=128, )
tag = models.CharField('项目tag', max_length=128, )
opertator = models.ForeignKey('userInfo',on_delete=models.CASCADE)
update_time = models.DateTimeField('发布时间', auto_now=True)
state = models.CharField('发布状态',max_length=128)
logs = models.TextField('发布日志',blank=True,null=True)
class Meta:
verbose_name = '发布历史'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
###---------- assets------------------###
'''
class UserProfile(models.Model):
name = models.CharField('名字',max_length=256)
email = models.EmailField('邮箱',max_length=256)
mobile = models.CharField('手机',max_length=256)
memo = models.TextField('备注',blank=True,null=True)
create_time = models.DateTimeField('创建时间',auto_now_add=True)
update_time = models.DateTimeField('更新时间',auto_now=True)
class Meta:
verbose_name = '管理用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class Admininfo(models.Model):
user = models.OneToOneField(UserProfile,on_delete=models.CASCADE)
username = models.CharField('用户名',max_length=256)
password = models.CharField('密码',max_length=256)
class Meta:
verbose_name = '管理用户'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username
'''
class DeviceStatus(models.Model):
name = models.CharField('名字', max_length=256,default='未上线')
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '设备状态'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class DeviceType(models.Model):
name = models.CharField('名称',max_length=256)
memo = models.TextField('备注',blank=True,null=True)
class Meta:
verbose_name = '设备类型'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class Asset(models.Model):
device_type =models.ForeignKey(DeviceType,on_delete=models.CASCADE)
device_status = models.ForeignKey(DeviceStatus,on_delete=models.CASCADE)
cabinet_num = models.CharField('机柜号',max_length=256,blank=True,null=True)
cabinet_order = models.CharField('机架号',max_length=256,blank=True,null=True)
create_time = models.DateTimeField('创建时间',auto_now_add=True,blank=True,null=True)
update_time = models.DateTimeField('更新时间',auto_now=True,blank=True,null=True)
idc = models.ForeignKey('IDC',verbose_name='idc机房',blank=True,null=True,on_delete=models.CASCADE)
business_unit = models.ForeignKey('wf_business',verbose_name='所属业务线',blank=True,null=True,on_delete=models.CASCADE)
admin = models.ForeignKey('userInfo',verbose_name='设备管理员',blank=True,null=True,related_name='+',on_delete=models.CASCADE)
contract = models.ForeignKey('Contract',verbose_name='合同',blank=True,null=True,on_delete=models.CASCADE)
tag = models.ManyToManyField('Tag',verbose_name='标签',blank=True)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '资产总表'
verbose_name_plural = verbose_name
def __unicode__(self):
return 'type:%s %s:%s' %(self.device_type,self.cabinet_num,self.cabinet_order)
class Server(models.Model):
asset = models.OneToOneField(Asset,on_delete=models.CASCADE)
hostname = models.CharField('主机名', max_length=128, blank=True, unique=True)
sn = models.CharField('SN号',max_length=256)
manufactory = models.CharField('厂商',max_length=256,blank=True,null=True)
model = models.CharField('型号',max_length=256,blank=True,null=True)
bios = models.CharField('BIOS', max_length=256, blank=True, null=True)
type = models.BooleanField('虚拟机',default=False)
memo = models.TextField('备注', blank=True, null=True)
create_time = models.DateTimeField('创建时间',auto_now_add=True)
update_time = models.DateTimeField('更新时间',auto_now=True)
class Meta:
verbose_name = '服务器信息'
verbose_name_plural = verbose_name
index_together = ['sn','asset']
def __unicode__(self):
return self.sn
class NetworkDevice(models.Model):
name = models.CharField('设备名称', max_length=256, blank=True)
asset = models.OneToOneField(Asset, on_delete=models.CASCADE)
sn = models.CharField('SN号', max_length=256)
manufactory = models.CharField('厂商', max_length=256, blank=True, null=True)
model = models.CharField('型号', max_length=256, blank=True, null=True)
memo = models.TextField('备注', blank=True)
create_time = models.DateTimeField('创建时间', auto_now_add=True, blank=True)
update_time = models.DateTimeField('更新时间', auto_now=True, blank=True)
class Meta:
verbose_name = '网络设备信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return '%s:%s' % (self.name, self.sn)
class CPU(models.Model):
name = models.CharField('CPU名称', max_length=256, blank=True)
model = models.CharField('CPU型号', max_length=256, blank=True)
core_num = models.IntegerField('CPU核数',blank=True,default=1)
create_time = models.DateTimeField('创建时间', auto_now_add=True, blank=True)
update_time = models.DateTimeField('更新时间', auto_now=True, blank=True)
server_info = models.ForeignKey(Server,on_delete=models.CASCADE)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = 'CPU信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return '%s:%s' % (self.name, self.model)
class Memory(models.Model):
slot = models.CharField('插槽名称', max_length=256, blank=True)
model = models.CharField('内存型号', max_length=256, blank=True)
capacity = models.FloatField('内存容量', blank=True)
ifac_type = models.CharField('接口类型', max_length=256, blank=True)
create_time = models.DateTimeField('创建时间', auto_now_add=True, blank=True)
update_time = models.DateTimeField('更新时间', auto_now=True, blank=True)
server_info = models.ForeignKey(Server,on_delete=models.CASCADE)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '内存信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return '%s:%s' % (self.slot, self.capacity)
class Disk(models.Model):
slot = models.CharField('插槽名称', max_length=256, blank=True)
model = models.CharField('磁盘型号', max_length=256, blank=True)
capacity = models.FloatField('磁盘容量',blank=True)
ifac_type = models.CharField('接口类型',max_length=256,blank=True)
create_time = models.DateTimeField('创建时间', auto_now_add=True, blank=True)
update_time = models.DateTimeField('更新时间', auto_now=True, blank=True)
server_info = models.ForeignKey(Server,on_delete=models.CASCADE)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '磁盘信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return '%s:%s' %(self.slot,self.capacity)
class NIC(models.Model):
name = models.CharField('网卡名称',max_length=256,blank=True)
model = models.CharField('网卡型号',max_length=256,blank=True)
ipaddr = models.GenericIPAddressField('ip地址')
mac = models.CharField('MAC地址',max_length=256)
netmask = models.CharField('子网掩码',max_length=256,blank=True)
create_time = models.DateTimeField('创建时间', auto_now_add=True, blank=True)
update_time = models.DateTimeField('更新时间', auto_now=True, blank=True)
server_info = models.ForeignKey(Server,on_delete=models.CASCADE)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '网卡信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return '%s:%s' %(self.name,self.ipaddr)
class Contract(models.Model):
sn = models.CharField('合同号',max_length=128,unique=True)
name = models.CharField('合同名称',max_length=256)
cost = models.IntegerField('合同金额')
start_date = models.DateTimeField(blank=True)
end_date = models.DateTimeField(blank=True)
license_num = models.IntegerField('license数量',blank=True)
memo = models.TextField('备注', blank=True, null=True)
create_time = models.DateTimeField('创建时间',auto_now_add=True,blank=True)
update_time = models.DateTimeField('更新时间',auto_now=True,blank=True)
class Meta:
verbose_name = '合同'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
'''
class BusinessUnit(models.Model):
name = models.CharField('业务线',max_length=128,unique=True)
contact = models.ForeignKey('UserProfile',default=None,on_delete=models.CASCADE)
memo = models.TextField('备注', blank=True, null=True)
class Meta:
verbose_name = '业务线'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
'''
class Tag(models.Model):
name = models.CharField('标签名',max_length=256)
memo = models.TextField('备注', blank=True)
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class IDC(models.Model):
region_display_name = models.CharField('区域名称',max_length=256,default=None)
display_name = models.CharField('机房名称',max_length=256,default=None)
floor = models.IntegerField('楼层',default=1)
memo = models.TextField('备注', blank=True)
class Meta:
verbose_name = '机房'
verbose_name_plural = verbose_name
def __unicode__(self):
return 'region:%s idc:%s floor:%s' %(self.region_display_name,self.display_name,self.floor)
class HandleLog(models.Model):
handle_type = models.CharField('操作类型',max_length=256)
summary = models.CharField(max_length=256)
detail = models.TextField()
creater = models.ForeignKey(userInfo,on_delete=models.CASCADE)
create_at = models.DateTimeField('创建时间',auto_now_add=True,blank=True)
memo = models.TextField('备注', blank=True)
class Meta:
verbose_name = '操作日志'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.handle_type
|
<filename>certgen.py
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This is a python script to create a certificate request and then process it
# using the radius server.
import argparse
import pipes
import subprocess
import sys
import time
import paramiko
class Color:
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def cred():
print(
Color.DARKCYAN
+ "\n"
+ "*********************************\n"
+ "* CertGen Utility *\n"
+ "* *\n"
+ "* Written and maintained by: *\n"
+ "* <NAME> *\n"
+ "* <EMAIL> *\n"
+ "* https://github.com/strohmy86 *\n"
+ "* *\n"
+ "*********************************\n"
+ "\n"
+ Color.END
)
# Define global variables
path = "/media/nss/VOL1/shared/madhs01rad1/requests/"
parser = argparse.ArgumentParser(
description="This is a python script\
to create a certificate request and\
then process it using the radius server."
)
parser.add_argument(
"name",
metavar="Name",
default="",
type=str,
help="Name of the PC that a certificate\
is being generated for.",
)
args = parser.parse_args()
name = args.name
# Specify private key file
k = paramiko.RSAKey.from_private_key_file("/home/lstrohm/.ssh/id_rsa")
# Configure SSH connections
fp = paramiko.SSHClient()
fp.set_missing_host_key_policy(paramiko.AutoAddPolicy())
fp.connect("10.14.10.12", username="root", pkey=k)
rad = paramiko.SSHClient()
rad.set_missing_host_key_policy(paramiko.AutoAddPolicy())
rad.connect("10.14.0.26", username="root", pkey=k)
# Function to close all SSH connections and exit the script
def close():
time.sleep(1)
fp.close()
rad.close()
sys.exit()
# Initiates certificate request
def start(path, name):
if name != "": # Creates a certificate request if a PC name is given
print(
Color.YELLOW
+ "Creating certificate request for "
+ Color.BOLD
+ name
+ Color.END
)
stdin, stdout, stderr = fp.exec_command("touch " + path + name)
time.sleep(2)
gen(path, name)
elif name == "": # If no name is given, start the function over
print(Color.RED + "No machine name specified." + Color.END)
time.sleep(1)
sys.exit(1)
return
# Checks to make sure the request was successful, then runs the cert-gen script
def gen(path, name):
resp = subprocess.call( # Checks for the request file
[
"ssh",
"-q",
"-i",
"/home/lstrohm/.ssh/id_rsa",
"root@10.14.10.12",
"test -e " + pipes.quote(path + name),
]
)
if resp == 0: # If the file is present, runs the cert-gen script
print(
Color.CYAN + "Request created successfully, awaiting certificate",
"generation..." + Color.END,
)
stdin, stdout, stderr = rad.exec_command("/root/certgen/cert-gen")
time.sleep(3)
certcheck(path, name)
elif resp != 0: # If request file is missing, recommends trying again
print(
Color.RED
+ "Certificate request failed! Please try again."
+ Color.END
)
close()
sys.exit(1)
# Checks to see if the certificate was generated correctly
def certcheck(path, name):
cert = "/media/nss/VOL1/shared/madhs01rad1/certs/" + name + "_cert.p12"
# Checks to see if the certificate file was generated correctly
gen1 = subprocess.call(
[
"ssh",
"-q",
"-i",
"/home/lstrohm/.ssh/id_rsa",
"root@10.14.10.12",
"test -e " + pipes.quote(cert),
]
)
if gen1 == 0: # If the certificate exists, exits cleanly
print(Color.GREEN + "Certificate generated successfully!" + Color.END)
close()
sys.exit(0)
# If certificate generation failed, prompts user to try again
elif gen1 != 0:
print(Color.RED + "Certificate generation failed!" + Color.END)
close()
sys.exit(1)
cred()
start(path, name) # Initiates the script
|
<gh_stars>0
import requests
import json
from retriever_library import fixtures_stats_csv_generator
import time
import pandas as pd
import numpy as np
in_file_path = "serieA-fixtures/all-fixtures-"
for season in range(2010,2020):
fixtures_stats_csv_generator(input_file_json=in_file_path + str(season) + ".json",
out_file_csv_path="../data/" + in_file_path + str(season) + ".csv")
'''
Now we want to generate a file for each season with the following structure:
each row represents a team for the season
in each row there must be the following elements:
- team id
- team name
- one column for each round with the points that the team has earned so far.
'''
#using pandas
for season in range(2010, 2020):
data = pd.read_csv("../data/serieA-fixtures/all-fixtures-" + str(season) + ".csv")
winners = []
rounds = []
#while reading each row of the dataframe i want to add a column containing the result of the match as follow
#the name of the team that has won, otherwise the word "draw"
for index, row in data.iterrows():
if row["goals_home_team"] > row["goals_away_team"]:
winners.append(row["home_team_name"])
if row["goals_home_team"] < row["goals_away_team"]:
winners.append(row["away_team_name"])
if row["goals_home_team"] == row["goals_away_team"]:
winners.append("draw")
rounds.append(row["round"].replace("Regular Season - ", ""))
if(len(winners) != 380):
for index in range(len(winners) - 1, 379):
winners.append("TBD")
np_winners = np.array(winners)
data["winner"] = np_winners
data["round"] = rounds
data.to_csv("../data/" + in_file_path + str(season) + ".csv")
cols = []
for index in range(0, 38):
cols.append("round_" + str(index + 1))
#now we have to: for each season generate a new csv file containing the points, updated per each round for every team of such season
for season in range(2010, 2020):
print("Processing season " + str(season))
data = pd.read_csv("../data/serieA-fixtures/all-fixtures-" + str(season) + ".csv")
data = data.sort_values(by='round', ascending=True)
teams = data["home_team_name"]
#getting the list of the unique teams names
teams_set = set(teams)
teams = list(teams_set)
points = {}
for team in teams:
points[team] = []
for i in range(0, 38):
points[team].append(0)
for index, row in data.iterrows():
round_index = int(row["round"]) - 1
if round_index == 0:
if row["winner"] == "draw":
points[row["home_team_name"]][round_index] += 1
points[row["away_team_name"]][round_index] += 1
else:
points[row["winner"]][round_index] += 3
else:
if row["winner"] == "draw":
points[row["home_team_name"]][round_index] = points[row["home_team_name"]][round_index - 1] + 1
points[row["away_team_name"]][round_index] = points[row["away_team_name"]][round_index - 1] + 1
else:
points[row["winner"]][round_index] = points[row["winner"]][round_index - 1] + 3
#managing the losing team
if row["winner"] == row["home_team_name"]:
points[row["away_team_name"]][round_index] = points[row["away_team_name"]][round_index - 1]
else:
points[row["home_team_name"]][round_index] = points[row["home_team_name"]][round_index - 1]
#now we have to save the data on csv files!
#print(points["Inter"], " Inter")
#for each season we have, in dictionary points, the evolution of points per each team
out_data = pd.DataFrame.from_dict(points, orient="index", columns=cols).reset_index()
out_data = out_data.sort_values(by="round_38", ascending=False)
out_path = "../data/season_points/" + str(season) + "-points.csv"
out_data.to_csv(out_path)
|
<gh_stars>0
"""Jumper_model.py: Create the agent of model of the one legged jumping tensegrity robot and provide an easy interface to be used with RL algorithms"""
__author__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>", "Prof. <NAME>", "<NAME>"]
__version__ = "1.0.0"
__email__ = "<EMAIL> / <EMAIL>"
__status__ = "Developing"
import socket
import sys
import signal
import json
from time import *
import os
import subprocess
import numpy as np
path_to_model = os.path.join(os.environ["TENSEGRITY_HOME"], "build/dev/jumper/AppJumperModel")
sim_exec = "gnome-terminal -e {}".format(path_to_model)
class JumperModel():
def __init__(self, host_name='localhost', port_num=10040, packet_size=5000,
sim_exec=sim_exec, dl=0.1, controllers_num=8, control_type="rest_length",
starting_coordinates=[0,100,0], starting_angle=[0,0], starting_leg_angle=[0,0]):
self.starting_coordinates = starting_coordinates
self.starting_angle = starting_angle
self.starting_leg_angle = starting_leg_angle
self.host_name = host_name
self.port_num = port_num
self.packet_size = packet_size
self.actions_json = {
'Controllers_val': [0,0,0,0,0,0,0,0],
'Reset': 0
}
self.sim_json = {"Rest_cables_lengths":
[0,0,0,0,0,0,0,0],
"Current_cables_lengths":
[0,0,0,0,0,0,0,0],
"End_points":
[[0.,0.,0.], [0.,0.,0.], [0.,0.,0.], [0.,0.,0.],
[0.,0.,0.],[0.,0.,0.]],
"End_points_velocities":
[[0.,0.,0.], [0.,0.,0.], [0.,0.,0.], [0.,0.,0.],
[0.,0.,0.],[0.,0.,0.]],
"Leg_end_points_world":
[[0.,0.,0.], [0.,0.,0.]],
"Time": 0.,
"ZFinished": 1,
"Flags":[1,0,0]}
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# print(self.port_num)
if(self.port_num is None):
self.port_num = 0
self.server_address = [self.host_name, self.port_num] # Bind the socket to the port
self.connection = None
self.client_address = None
self.child_process = None
print('#########\nstarting up on {}\n#########'.format(self.server_address))
try:
self.sock.bind(tuple(self.server_address))
except socket.error as exc:
self.port_num += 1
self.server_address[1] = self.port_num
print('#########\nstarting up on {} after getting an error of busy port first\n#########'.format(self.server_address))
self.sock.bind(tuple(self.server_address))
print('#########\nConnected to port: {:}\n#########'.format(self.sock.getsockname()[1]))
print('#########\nServer binding is finished\n#########')
self.sock.listen(1) # Listen for incoming connections
self.reset_flag = False
self.close_flag = False
self.dl = dl # Self modified parameter
self.end_points_num = 6
self.controllers_num = controllers_num # Self modified parameter
self.leg_end_points = [4,5]
self.leg_length = 20
self.port_num = self.sock.getsockname()[1]
self.control_type = control_type
self.control_type_index = {"rest_length": 0, "current_length": 1, "rest_length_mod": 2, "current_length_mod": 3}
self.orginal_sim_exec = sim_exec
self.set_sim_exec(self.orginal_sim_exec)
def set_sim_exec(self, sim_exec):
self.sim_exec = sim_exec + ' {:} {:} {:} {:} {:} {:} {:} {:} {:} {:}'.format(self.host_name, self.port_num, self.control_type_index[self.control_type], self.starting_coordinates[0], self.starting_coordinates[1], self.starting_coordinates[2] , self.starting_angle[0], self.starting_angle[1], self.starting_leg_angle[0], self.starting_leg_angle[1])
print("EXEC: {:}".format(self.sim_exec))
def __del__(self):
self.closeSimulator()
# sys.exit(0)
# function for writing data into TCP connection
def write(self, data):
# print('sending data to the client:"{}"'.format(data))
try:
self.connection.sendall(data.encode())
except Exception as e:
print("$$$$$$$$$$$$ ERROR in Writing $$$$$$$$$$$$")
print("Error: " + str(e))
# function for reading data from TCP connection
def read(self):
try:
data = []
counter = 1
# Receive the data in small chunks and retransmit it
while True:
data.append(self.connection.recv(self.packet_size).decode("utf-8")) #reading part
# print('{} received "{}"'.format(counter,data[-1]))
if 'ZFinished' in str(data[-1][-14:-1]):
break
counter += 1
return "".join(data)
except ValueError:
print(ValueError)
print("$$$$$$$$$$$$ ERROR in Reading $$$$$$$$$$$$")
# sleep(2)
return None
def startSimulator(self):
self.close_flag = False
self.reset_flag = False
if(self.sim_exec == sim_exec):
print("#Warning: Starting an old version")
# sleep(0.5)
# subprocess_args = [self.sim_exec[:14], self.sim_exec[15:]]
#Machine with Xscreen
subprocess_args = self.sim_exec.split(" ")
subprocess_args[2] = " ".join(subprocess_args[2:])
self.child_process = subprocess.Popen(subprocess_args[:3])
#Headless
# self.child_process = subprocess.Popen(self.sim_exec, shell=True)
#print('#########\nwaiting for a connection\n#########')
self.connection, self.clientAddress = self.sock.accept() #wait until it get a client
#print('connection from', self.clientAddress)
def closeSimulator(self):
self.close_flag = True
# kill the shell script of the simulator
if self.connection is not None:
self.connection.close()
if self.child_process is not None:
os.kill(self.child_process.pid, signal.SIGKILL)
def render(self):
pass
def reset(self):
self.reset_flag = True
self.closeSimulator()
# os.kill(self.child_process.pid, signal.SIGTERM)
# sleep(1)
self.startSimulator()
def step(self):
if (self.close_flag == False):
if (self.reset_flag == True):
self.reset()
self.write(json.dumps(self.actions_json)) # Write to the simulator module the json object with the required info
sim_raw_data = self.read()
# print(sim_raw_data)
if(sim_raw_data is not None):
self.sim_json = json.loads(sim_raw_data) # Parse the data from string to json
else:
self.closeSimulator()
def getRestCablesLengths(self, i=None):
if(i is None):
return self.sim_json["Rest_cables_lengths"]
return self.sim_json["Rest_cables_lengths"][i]
def getCurrentCablesLengths(self, i=None):
if(i is None):
return self.sim_json["Current_cables_lengths"]
return self.sim_json["Current_cables_lengths"][i]
def getEndPoints(self):
end_points = []
# Notice that the end_points are in the form (y,z,x) as it is coming from the simulator like this
for i in range(self.end_points_num):
end_points.append(self.sim_json["End_points"][i])
return end_points
def getEndPointsVelocities(self):
end_points_velocities = []
# Notice that the end_points are in the form (y,z,x) as it is coming from the simulator like this
for i in range(self.end_points_num):
end_points_velocities.append(self.sim_json["End_points_velocities"][i])
return end_points_velocities
def getLegEndPoints(self):
return [self.sim_json["Leg_end_points_world"][0], self.sim_json["Leg_end_points_world"][1]]
# point_a: is the end point of the leg from down
# point_b: is the end point of the virtual horizontal leg from up
# point_c: is the end point of the actual leg from up
def getLegAngle(self):
point_a = np.array(self.sim_json["End_points"][self.leg_end_points[0]])
point_b = [0,0,0]
point_b[:] = point_a[:]
point_b[1] += self.leg_length
point_c = np.array(self.sim_json["End_points"][self.leg_end_points[1]])
v1 = point_b - point_a
v2 = point_c - point_a
dot_product = np.dot(v1,v2)
v1_mag = np.linalg.norm(v1)
v2_mag = np.linalg.norm(v2)
# print("pointa", point_a)
# print("pointb",point_b)
# print("pointc", point_c)
# print("v1", v1)
# print("v2", v2)
# print("dot_product", dot_product)
# print("1 mag", v1_mag)
# print("2 mag", v2_mag)
# print("arccos", np.arccos(dot_product/(v1_mag*v2_mag)))
angle = np.arccos(dot_product/(v1_mag*v2_mag))
# print("angle", angle)
return angle
"""
(b)|
|
(a)|_____(c)
"""
def getSquareSidesAngles(self):
point_a = np.array(self.sim_json["End_points"][1])
point_b = np.array(self.sim_json["End_points"][2])
point_c = np.array(self.sim_json["End_points"][0])
point_d = [0,0,0]
point_e = [0,0,0]
point_d[:] = point_b[:]
point_e[:] = point_c[:]
point_d[1] = point_a[1]
point_e[1] = point_a[1]
# print("point_a: {:}".format(point_a))
# print("point_b: {:}".format(point_b))
# print("point_c: {:}".format(point_c))
# print("point_d: {:}".format(point_d))
# print("point_e: {:}".format(point_e))
v_ab = point_b - point_a
v_ac = point_c - point_a
v_ad = point_d - point_a
v_ae = point_e - point_a
# print("v_ab: {:}".format(v_ab))
# print("v_ac: {:}".format(v_ac))
# print("v_ad: {:}".format(v_ad))
# print("v_ae: {:}".format(v_ae))
dot_v_ad_v_ab = np.dot(v_ad, v_ab)
dot_v_ae_v_ac = np.dot(v_ae, v_ac)
# print("v_ad . v_ab: {:}".format(dot_v_ad_v_ab))
# print("v_ae . v_ac: {:}".format(dot_v_ae_v_ac))
mag_v_ab = np.linalg.norm(v_ab)
mag_v_ac = np.linalg.norm(v_ac)
mag_v_ad = np.linalg.norm(v_ad)
mag_v_ae = np.linalg.norm(v_ae)
# print("mag_v_ab: {:}".format(mag_v_ab))
# print("mag_v_ac: {:}".format(mag_v_ac))
# print("mag_v_ad: {:}".format(mag_v_ad))
# print("mag_v_ae: {:}".format(mag_v_ae))
angle_x = np.arccos(dot_v_ad_v_ab/(mag_v_ad*mag_v_ab))
angle_y = np.arccos(dot_v_ae_v_ac/(mag_v_ae*mag_v_ac))
return [angle_x, angle_y]
def getTime(self):
return self.sim_json["Time"]
def setStartingAngle(self, angle):
self.starting_angle = angle
self.set_sim_exec(self.orginal_sim_exec)
def setStartingLegAngle(self, angle):
self.starting_leg_angle = angle
self.set_sim_exec(self.orginal_sim_exec)
def setStartingHeight(self, height):
self.starting_coordinates[1] = height
self.set_sim_exec(self.orginal_sim_exec)
# This function for testing the model by itself
def main():
jumper = JumperModel()
jumper.actions_json["Controllers_val"][2] = 0
jumper.actions_json["Controllers_val"][5] = 0
def cleanExit(signal, frame):
print("HANDLER")
jumper.__del__()
exit()
# signal.signal(signal.SIGTERM, cleanExit)
signal.signal(signal.SIGINT, cleanExit) # Activate the listen to the Ctrl+C
jumper.startSimulator()
sleep(1)
jumper.reset()
# sleep(3)
# jumper.reset()
sleep(1)
jumper.closeSimulator()
sleep(5)
# start_time = time()
# print(start_time)
# reset = False
# # input()
# while(True):
# jumper.step()
# # input("input now")
# # sleep(0.01)
# if(time() - start_time > 20):
# # jumper.closeSimulator()
# pass
# # jumper.close_flag = True
# if( time() - start_time > 5 and reset == False):
# reset = True
# # jumper.reset()
# # jumper.reset_flag = True
if __name__ == "__main__":
main()
|
<filename>lagom/core/policies/base_gaussian_policy.py
from .base_policy import BasePolicy
import torch
from torch.distributions import Normal
class BaseGaussianPolicy(BasePolicy):
"""
Base class of Gaussian policy (independent) for continuous action space.
Action can be sampled from a Normal distribution.
Note that the user-defined network should return a dictionary
from its forward function. At least with the key ['mean', 'logvar'].
It can also contain the key 'value' for the value function of actor-critic network.
All inherited subclasses should implement the following function
1. process_network_output(self, network_out)
2. constraint_action(self, action)
Examples:
env = gym.make('Pendulum-v0')
env_spec = EnvSpec(GymEnv(env))
class MLP(BaseMLP):
def make_params(self, config):
self.fc1 = nn.Linear(in_features=3, out_features=32)
self.mean_head = nn.Linear(in_features=32, out_features=1)
self.logvar_head = nn.Linear(in_features=32, out_features=1)
def init_params(self, config):
gain = nn.init.calculate_gain(nonlinearity='relu')
nn.init.orthogonal_(self.fc1.weight, gain=gain)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.orthogonal_(self.mean_head.weight, gain=gain)
nn.init.constant_(self.mean_head.bias, 0.0)
nn.init.orthogonal_(self.logvar_head.weight, gain=gain)
nn.init.constant_(self.logvar_head.bias, 0.0)
def forward(self, x):
x = F.relu(self.fc1(x))
mean = self.mean_head(x)
logvar = self.logvar_head(x)
# Output dictionary
out = {}
out['mean'] = mean
out['logvar'] = logvar
return out
class GaussianPolicy(BaseGaussianPolicy):
def process_network_output(self, network_out):
return {}
def constraint_action(self, action):
return 2*torch.tanh(action)
network = MLP(config=None)
policy = GaussianPolicy(network=network, env_spec=env_spec)
"""
def __call__(self, x):
network_out = self.network(x)
assert isinstance(network_out, dict) and 'mean' in network_out and 'logvar' in network_out
# Get mean and logvar for the action
mean = network_out['mean']
logvar = network_out['logvar']
# Obtain std: exp(0.5*log(std**2))
std = torch.exp(0.5*logvar)
# Create indpendent normal distribution
action_dist = Normal(loc=mean, scale=std)
# Sample an action from the distribution
# We use PyTorch build-in reparameterized verion, rsample()
action = action_dist.rsample()
# Calculate log-probability of sampled action
action_logprob = action_dist.log_prob(action)
# Calculate entropy of the policy conditional on state
entropy = action_dist.entropy()
# Calculate perplexity of the policy, i.e. exp(entropy)
perplexity = action_dist.perplexity()
# Constraint action with lower/upper bounds
# TODO: where should we put before/after logprob ?
# https://discuss.pytorch.org/t/should-action-log-probability-computed-after-or-before-constraining-the-action/20976
# Note that it will be completely wrong if put constraint transformation
# before computing the log-probability. Because log-prob with transformed action is
# definitely a wrong value, it's equivalent to transformation of a Gaussian distribution
# and compute transformed samples with Gaussian density.
action = self.constraint_action(action)
# User-defined function to process any possible other output
processed_network_out = self.process_network_output(network_out)
# Dictionary of output
out = {}
out['action'] = action
out['action_logprob'] = action_logprob
out['entropy'] = entropy
out['perplexity'] = perplexity
# Augment with dictionary returned from processed network output
out = {**out, **processed_network_out}
return out
def constraint_action(self, action):
"""
User-defined function to smoothly constraint the action with upper/lower bounds.
The constraint must be smooth (differentiable), it is recommended to use functions
like tanh, or sigmoid. For example the action is in the range of [-2, 2], one can define
`constrained_action = 2*torch.tanh(action)`.
If there is no need to constraint, then it is required to send the action back.
i.e. `return action`
Args:
action (Tensor): action sampled from Normal distribution.
Returns:
constrained_action (Tensor): constrained action.
"""
raise NotImplementedError |
import regex as re
import requests
from time import sleep
from digi.xbee.devices import XBeeDevice, RemoteXBeeDevice, XBee64BitAddress
from digi.xbee.exception import TimeoutException
from datetime import datetime
class MSG_TYPES:
ACKN = 0
SYNC = 1
UPDA = 2
SYNACK = 3
class UpdatePayload:
lightIntensity = 0
temperature = 0
batteryLevel = 0
rssiToGateway = 0
motionDetected = 0
class AckPayload:
seqNumToAck = 0
class SynAckPayload:
nodeId = 0
utcSec = ""
defaultSleep = 0
class HMSFrame:
seqNum = 0
nodeId = 0
srcAddr = 0
dstAddr = 0
msgType = 0
payloadLen = 0
payload = ""
cksum = 0
class HMSGateway():
SENSOR_NODE_ID = "SENSOR_NODE"
SENSOR_NODE_ADDR = "0013A200416B4BA2"
#SENSOR_NODE_ADDR = "0000000000000001"
nodeUrl = "http://127.0.0.1:8000/rest/node/"
dataUrl = "http://127.0.0.1:8000/rest/data/"
defaultSleep = 30
ACKS = []
LAST_UPDA = []
lastSyncedAt = []
src_node = None
sequenceNum = 0
nodeID = 0
nodeAddr = 0
SYNC_IN_PROGRESS = False
NODE_ID_WITH_ADDRESS = []
def postNodeInfo(self, nodeID, rssi, motionDetected):
postData = {
"nodeId": nodeID,
"rssi": rssi,
"motionDetected": motionDetected,
"updated_at": "{}".format(datetime.now())
}
requests.post(self.nodeUrl, data = postData)
def postNodeData(self, nodeID, updatePayload):
postData = {
"fromNodeID": nodeID,
"lightIntensity": updatePayload.lightIntensity,
"temperature": updatePayload.temperature,
"batteryLevel": updatePayload.batteryLevel
}
requests.post(self.dataUrl, data = postData)
def encode_hms_frame(self, txFrame):
txFrame.payloadLen, txFrame.payload = self.encode_hmsframe_payload(txFrame)
frameAsStr = ''.join((
str(txFrame.seqNum) + ";",
str(txFrame.nodeId) + ";",
str(txFrame.srcAddr) + ";",
str(txFrame.dstAddr) + ";",
str(txFrame.msgType) + ";",
str(txFrame.payloadLen) + ";",
str(txFrame.payload) + ";",
str(txFrame.cksum) + ";",
))
print(frameAsStr)
return bytearray(frameAsStr, 'utf-8')
def decode_hms_frame(self, rxMsg):
frameData = rxMsg.split(";")
if len(frameData) != 9:
return None
rxFrame = HMSFrame()
rxFrame.seqNum = int(frameData[0])
rxFrame.nodeId = int(frameData[1])
rxFrame.srcAddr = int(frameData[2])
rxFrame.dstAddr = int(frameData[3])
rxFrame.msgType = int(frameData[4])
rxFrame.payloadLen = int(frameData[5])
rxFrame.payload = frameData[6]
rxFrame.cksum = int(frameData[7])
# check cksum
rxFrame.payload = self.decode_hmsframe_payload(rxFrame)
return rxFrame
def encode_hmsframe_payload(self, txFrame):
if txFrame.payload == "":
print("No payload in frame")
return 0, ""
if txFrame.msgType == MSG_TYPES.ACKN:
print("ACK payload")
ackPayloadAsStr = str(txFrame.payload.seqNumToAck) + "|"
return len(ackPayloadAsStr), ackPayloadAsStr
elif txFrame.msgType == MSG_TYPES.SYNACK:
print("SYNACK payload")
synAckPayloadAsStr = ''.join((
str(txFrame.payload.nodeId) + "|",
str(txFrame.payload.utcSec) + "|",
str(txFrame.payload.defaultSleep) + "|",
))
return len(synAckPayloadAsStr), synAckPayloadAsStr
else:
print("Payload not known")
return 0, ""
def decode_hmsframe_payload(self, rxFrame):
if rxFrame.payloadLen == 0:
return ""
payload = rxFrame.payload.split("|")
if rxFrame.msgType == MSG_TYPES.ACKN:
if len(payload) != 2:
return ""
acknPayload = AckPayload()
acknPayload.seqNumToAck = int(payload[0])
return acknPayload
elif rxFrame.msgType == MSG_TYPES.UPDA:
if len(payload) != 6:
return ""
print("Updating")
updatePayload = UpdatePayload()
updatePayload.lightIntensity = int(payload[0])
updatePayload.temperature = int(payload[1])
updatePayload.batteryLevel = int(payload[2])
updatePayload.rssiToGateway = int(payload[3])
updatePayload.motionDetected = int(payload[4])
return updatePayload
elif rxFrame.msgType == MSG_TYPES.SYNC:
return ""
else:
print("Unknown msg type to decode")
return ""
def process_received_frame(self, rxFrame):
if rxFrame.dstAddr == 0:
if rxFrame.msgType == MSG_TYPES.ACKN and rxFrame.payload != "":
self.ACKS.append(rxFrame.payload.seqNumToAck)
print("ACK RECEVIED")
elif rxFrame.msgType == MSG_TYPES.SYNC:
print("SYNC RECEVIED")
self.handle_sync_request(rxFrame)
elif rxFrame.msgType == MSG_TYPES.UPDA:
print("UPDA RECEVIED")
if rxFrame.nodeId != self.getNextSensorIdOrSync(rxFrame)[1]:
self.NODE_ID_WITH_ADDRESS = [item for item in self.NODE_ID_WITH_ADDRESS if item[1] != rxFrame.srcAddr]
self.handle_sync_request(rxFrame)
else:
if self.store_node_sync_if_needed(rxFrame) == True:
self.handle_sync_request(rxFrame)
else:
txFrame = HMSFrame()
txFrame.msgType = MSG_TYPES.ACKN
txFrame.dstAddr = rxFrame.srcAddr
acknPayload = AckPayload()
acknPayload.seqNumToAck = rxFrame.seqNum
txFrame.payload = acknPayload
print("SENDING ACK")
self.send_HMS_Frame(txFrame)
sleep(0.2)
current = int((datetime.utcnow()-datetime(1970,1,1)).total_seconds())
nodeNotFound = True
for i in range(0, len(self.LAST_UPDA)):
if self.LAST_UPDA[i][0] == rxFrame.nodeId:
nodeNotFound = False
if self.LAST_UPDA[i][1] < current - self.defaultSleep:
self.LAST_UPDA[i] = (rxFrame.nodeId, current)
self.postNodeData(rxFrame.nodeId, rxFrame.payload)
self.postNodeInfo(rxFrame.nodeId, rxFrame.payload.rssiToGateway, rxFrame.payload.motionDetected)
if nodeNotFound == True:
self.LAST_UPDA.append((rxFrame.nodeId, current))
self.postNodeData(rxFrame.nodeId, rxFrame.payload)
self.postNodeInfo(rxFrame.nodeId, rxFrame.payload.rssiToGateway, rxFrame.payload.motionDetected)
elif rxFrame.msgType == MSG_TYPES.SYNACK:
print("SYNACK RECEVIED")
else:
print("Msg not for Gateway")
def store_node_sync_if_needed(self, rxFrame):
nodeNotFound = True
syncNode = False
current = int((datetime.utcnow()-datetime(1970,1,1)).total_seconds())
for i in range(0, len(self.lastSyncedAt)):
if self.lastSyncedAt[i][0] == rxFrame.nodeId and self.lastSyncedAt[i][1] < (current - 600):
self.lastSyncedAt[i] = (rxFrame.nodeId, current)
nodeNotFound = False
syncNode = True
if nodeNotFound == True:
self.lastSyncedAt.append((rxFrame.nodeId, current))
return syncNode
def send_HMS_Frame(self, txFrame):
txFrame.nodeId = self.nodeID
txFrame.seqNum = self.sequenceNum
txFrame.cksum = 0
txFrame.srcAddr = self.nodeAddr
encodedFrame = self.encode_hms_frame(txFrame)
self.src_node.set_sync_ops_timeout(0.8)
for i in range(0, 5):
try:
self.src_node.send_data_broadcast(encodedFrame)
except Exception as e:
pass
self.sequenceNum += 1
return txFrame.seqNum
def handle_sync_request(self, rxFrame):
self.SYNC_IN_PROGRESS = True
txFrame = HMSFrame()
txFrame.msgType = MSG_TYPES.SYNACK
txFrame.dstAddr = rxFrame.srcAddr
synAckPayload = SynAckPayload()
synAckPayload.nodeId = self.getNextSensorIdOrSync(rxFrame)[1]
now = datetime.now()
synAckPayload.utcSec = now.strftime("%y:%m:%d:0%w:%H:%M:%S")
synAckPayload.defaultSleep = self.defaultSleep
txFrame.payload = synAckPayload
self.send_frame_and_wait_for_ack(txFrame, synAckPayload)
def getNextSensorIdOrSync(self, rxFrame):
for item in self.NODE_ID_WITH_ADDRESS:
if item[1] == rxFrame.srcAddr:
return True, item[0]
maxNodeId = len(self.NODE_ID_WITH_ADDRESS) + 1
self.NODE_ID_WITH_ADDRESS.append((maxNodeId, rxFrame.srcAddr))
return False, maxNodeId
def data_receive_callback(self, frame):
if frame is not None:
rx_data = frame.data.decode(errors='replace')
if rx_data != "":
rxMsg = rx_data.split("STR:")[1]
if rxMsg != "":
rxMsg = rxMsg.replace("#", "")
print(rxMsg)
hmsFrame = self.decode_hms_frame(rxMsg)
self.process_received_frame(hmsFrame)
def send_frame_and_wait_for_ack(self, txFrame, payload, waitForAck=False):
max_retries = 5
num_retry = 0
while(num_retry < max_retries):
seqNumToAck = self.send_HMS_Frame(txFrame)
sleep(1)
if seqNumToAck in self.ACKS:
self.ACKS.remove(seqNumToAck)
break
num_retry += 1
txFrame.payload = payload
print("RETRYING - NO ACK RECEIVED")
def init_and_open_xbee_device(self):
serialPort = input("Serial Port [COM4]: ")
if serialPort == "":
serialPort = "COM4"
bdrate = input("Baudrate [115200]: ")
if bdrate == "":
bdrate = 115200
else:
bdrate = int(bdrate)
try:
self.src_node = XBeeDevice(serialPort, bdrate)
self.src_node.open()
return True
except Exception as e:
pass
return True
####################################
def runApp(self):
print("\n\n### HOME MONITORING SYSTEM - GATEWAY ###\n\n")
ret = self.init_and_open_xbee_device()
if not ret:
print("Initialization failed -> check log\n")
print("XBEE Device initialized\n")
self.src_node.add_data_received_callback(self.data_receive_callback)
print("# CALLBACK ADDED #\n")
while(1):
sleep(1)
|
<filename>testcases/basic_func_tests/tc_008_storage_check.py<gh_stars>0
import sys
import os
from robot.libraries.BuiltIn import BuiltIn
from robot.api import logger
from decorators_for_robot_functionalities import *
from test_constants import *
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
import common_utils # noqa
BuiltIn().import_library('pabot.PabotLib')
pabot = BuiltIn().get_library_instance('pabot.PabotLib')
execute = BuiltIn().get_library_instance('execute_command')
stack_infos = BuiltIn().get_library_instance('stack_infos')
pv_name = ""
def tc_008_storage_check():
steps = ['step1_read_write_pv',
'step2_check_pv_retaining',
'step3_read_write_pv']
BuiltIn().run_keyword("tc_008_storage_check.Setup")
common_utils.keyword_runner(steps)
def Setup():
pabot.acquire_lock("pv_test_ip")
install_charts()
def step1_read_write_pv():
read_write_pv("step1.log")
@pabot_lock("health_check_2")
def step2_check_pv_retaining():
common_utils.helm_delete("storage-test")
common_utils.check_kubernetes_object(kube_object=pv_test_pod,
tester_function=common_utils.test_kubernetes_object_not_available,
timeout=90)
_install_storage_test_helm_chart()
pabot.release_lock("pv_test_ip")
def step3_read_write_pv():
read_write_pv("step3.log")
def read_write_pv(file_name):
pod_list = execute.execute_unix_command("kubectl get pod | grep pv-test-deployment | grep -i running | "
"awk '{print $1}'")
# write log on persistent storage from pods
for pod in pod_list.split("\n"):
pod = pod.strip()
logger.info("POD NAME: " + pod)
execute.execute_unix_command(
"kubectl exec " + pod + " -- sh -c 'echo test_log_" + pod + " >> /usr/share/storage_test/" + file_name +
"'")
# check if logs can be reached from containers
for pod in pod_list.split("\n"):
pod = pod.strip()
log = execute.execute_unix_command(
"kubectl exec " + pod + " -- sh -c 'cat /usr/share/storage_test/" + file_name + "'")
for pod_log in pod_list.split("\n"):
pod_log = pod_log.strip()
if pod_log not in log:
raise Exception("Log entry: test_log_" + pod_log + " is not found in log file")
@pabot_lock("health_check_2")
def install_charts():
common_utils.helm_install(chart_name="default/persistentvolume-claim", release_name="pvc")
common_utils.wait_if_pressure()
common_utils.check_kubernetes_object(kube_object=pv_test_pvc,
tester_function=common_utils.test_kubernetes_object_available,
additional_filter="Bound", timeout=90)
_install_storage_test_helm_chart()
global pv_name # pylint: disable=global-statement
pv_name = execute.execute_unix_command("kubectl get pvc | grep pvc- | awk {'print$3'}")
def _install_storage_test_helm_chart():
if stack_infos.get_worker_nodes():
common_utils.helm_install(chart_name="default/storage-test-worker", release_name="storage-test")
else:
common_utils.helm_install(chart_name="default/storage-test-oam", release_name="storage-test")
common_utils.wait_if_pressure()
common_utils.check_kubernetes_object(kube_object=pv_test_pod,
tester_function=common_utils.test_kubernetes_object_available,
additional_filter="Running", timeout=60)
|
try:
from auth import auth
except:
with open("auth.py","w") as a:
a.write("auth = ('<username>','<password>')")
print("Add login info to auth.py!")
quit()
import trainInfomation
import pygame
import datetime
import threading
import time
def firstLetterVowelDetect(string):
if string[0].lower() in ['a','e','i','o','u']:
return True
else:
return False
def updateInfomation(code):
global station
while True:
station = trainInfomation.station(code)
if stopThread:
break
time.sleep(10)
def TrainInfo(station):
if firstLetterVowelDetect(station.trains[0].operator):
scrollText = f"This is an {station.trains[0].operator} service to {station.trains[0].destination}"
else:
scrollText = f"This is a {station.trains[0].operator} service to {station.trains[0].destination}"
scrollText = f"{scrollText} Calling at: "
for i in station.trains[0].callingAt:
if i == station.trains[0].callingAt[-1]:
scrollText = f"{scrollText}and {i}"
else:
scrollText = f"{scrollText}{i}, "
return scrollText
print("\n\nBritishTrainsDepartureBoard")
print("Powered by RealTimeTrains API (https://api.rtt.io/)")
print("--------------------------------------------------------")
code = input("Type in a station code: ")
print("Please wait")
station = trainInfomation.station(code)
shownStation = station
pygame.init()
clock = pygame.time.Clock() # Clock is capital C
height = 320
width = 1200
gameDisplay = pygame.display.set_mode((width, height))
pygame.display.set_caption(f'Train Infomation: {station.inputStaion} - {code.upper()}')
pygame.display.update()
font = pygame.font.SysFont(None, 75)
scrollTextAmount = 0
updateThread = threading.Thread(target=updateInfomation, args=(code,))
stopThread = False
updateThread.start()
while True:
current_time = datetime.datetime.now().strftime("%H:%M:%S")
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
stopThread = True
print("Closing update thread, this may take a few seconds...")
quit()
gameDisplay.fill((0,0,0))
TrainOrderIndicator1 = font.render("1st", True, (255, 165, 0))
gameDisplay.blit(TrainOrderIndicator1, (20, 20))
TrainTimeIndicator1 = font.render(f"{shownStation.trains[0].time}", True, (255, 165, 0))
gameDisplay.blit(TrainTimeIndicator1, (140, 20))
TrainDestinationIndicator1 = font.render(f"{shownStation.trains[0].destination}", True, (255, 165, 0))
gameDisplay.blit(TrainDestinationIndicator1, (320, 20))
TrainEstimation1 = font.render(f"{shownStation.trains[0].getExpectedInfo()}", True, (255, 165, 0))
gameDisplay.blit(TrainEstimation1, (width - TrainEstimation1.get_rect().width-20, 20))
TrainInfomation1 = font.render(f"{TrainInfo(shownStation)}", True, (255, 165, 0))
gameDisplay.blit(TrainInfomation1, (scrollTextAmount, 100))
scrollTextAmount -= 5
if scrollTextAmount < (TrainInfomation1.get_rect().width+5)*-1:
scrollTextAmount = width
shownStation = station
TrainOrderIndicator2 = font.render("2nd", True, (255, 165, 0))
gameDisplay.blit(TrainOrderIndicator2, (20, 180))
TrainTimeIndicator2 = font.render(f"{shownStation.trains[1].time}", True, (255, 165, 0))
gameDisplay.blit(TrainTimeIndicator2, (140, 180))
TrainDestinationIndicator2 = font.render(f"{shownStation.trains[1].destination}", True, (255, 165, 0))
gameDisplay.blit(TrainDestinationIndicator2, (320, 180))
TrainEstimation2 = font.render(f"{shownStation.trains[1].getExpectedInfo()}", True, (255, 165, 0))
gameDisplay.blit(TrainEstimation2, (width - TrainEstimation2.get_rect().width-20, 180))
CurrentTime = font.render(f"{current_time}", True, (255, 165, 0))
gameDisplay.blit(CurrentTime, ((width / 2) - (CurrentTime.get_rect().width / 2), height - CurrentTime.get_rect().height-20))
clock.tick(120)
pygame.display.update() |
"""
flask_security.datastore
~~~~~~~~~~~~~~~~~~~~~~~~
This module contains an user datastore classes.
:copyright: (c) 2012 by <NAME>.
:copyright: (c) 2019-2020 by <NAME> (jwag).
:license: MIT, see LICENSE for more details.
"""
import json
import uuid
from .utils import config_value
class Datastore:
def __init__(self, db):
self.db = db
def commit(self):
pass
def put(self, model):
raise NotImplementedError
def delete(self, model):
raise NotImplementedError
class SQLAlchemyDatastore(Datastore):
def commit(self):
self.db.session.commit()
def put(self, model):
self.db.session.add(model)
return model
def delete(self, model):
self.db.session.delete(model)
class MongoEngineDatastore(Datastore):
def put(self, model):
model.save()
return model
def delete(self, model):
model.delete()
class PeeweeDatastore(Datastore):
def put(self, model):
model.save()
return model
def delete(self, model):
model.delete_instance(recursive=True)
def with_pony_session(f):
from functools import wraps
@wraps(f)
def decorator(*args, **kwargs):
from pony.orm import db_session
from pony.orm.core import local
from flask import (
after_this_request,
current_app,
has_app_context,
has_request_context,
)
from flask.signals import appcontext_popped
register = local.db_context_counter == 0
if register and (has_app_context() or has_request_context()):
db_session.__enter__()
result = f(*args, **kwargs)
if register:
if has_request_context():
@after_this_request
def pop(request):
db_session.__exit__()
return request
elif has_app_context():
@appcontext_popped.connect_via(current_app._get_current_object())
def pop(sender, *args, **kwargs):
while local.db_context_counter:
db_session.__exit__()
else:
raise RuntimeError("Needs app or request context")
return result
return decorator
class PonyDatastore(Datastore):
def commit(self):
self.db.commit()
@with_pony_session
def put(self, model):
return model
@with_pony_session
def delete(self, model):
model.delete()
class UserDatastore:
"""Abstracted user datastore.
:param user_model: A user model class definition
:param role_model: A role model class definition
.. important::
For mutating operations, the user/role will be added to the
datastore (by calling self.put(<object>). If the datastore is session based
(such as for SQLAlchemyDatastore) it is up to caller to actually
commit the transaction by calling datastore.commit().
"""
def __init__(self, user_model, role_model):
self.user_model = user_model
self.role_model = role_model
def _prepare_role_modify_args(self, role):
if isinstance(role, str):
role = self.find_role(role)
return role
def _prepare_create_user_args(self, **kwargs):
kwargs.setdefault("active", True)
roles = kwargs.get("roles", [])
for i, role in enumerate(roles):
rn = role.name if isinstance(role, self.role_model) else role
# see if the role exists
roles[i] = self.find_role(rn)
kwargs["roles"] = roles
kwargs.setdefault("fs_uniquifier", uuid.uuid4().hex)
return kwargs
def find_user(self, *args, **kwargs):
"""Returns a user matching the provided parameters."""
raise NotImplementedError
def find_role(self, *args, **kwargs):
"""Returns a role matching the provided name."""
raise NotImplementedError
def add_role_to_user(self, user, role):
"""Adds a role to a user.
:param user: The user to manipulate. Can be an User object or email
:param role: The role to add to the user. Can be a Role object or
string role name
:return: True is role was added, False if role already existed.
"""
role = self._prepare_role_modify_args(role)
if role not in user.roles:
user.roles.append(role)
self.put(user)
return True
return False
def remove_role_from_user(self, user, role):
"""Removes a role from a user.
:param user: The user to manipulate. Can be an User object or email
:param role: The role to remove from the user. Can be a Role object or
string role name
:return: True if role was removed, False if role doesn't exist or user didn't
have role.
"""
rv = False
role = self._prepare_role_modify_args(role)
if role in user.roles:
rv = True
user.roles.remove(role)
self.put(user)
return rv
def add_permissions_to_role(self, role, permissions):
"""Add one or more permissions to role.
:param role: The role to modify. Can be a Role object or
string role name
:param permissions: a set, list, or single string.
:return: True if permissions added, False if role doesn't exist.
Caller must commit to DB.
.. versionadded:: 4.0.0
"""
rv = False
role = self._prepare_role_modify_args(role)
if role:
rv = True
role.add_permissions(permissions)
self.put(role)
return rv
def remove_permissions_from_role(self, role, permissions):
"""Remove one or more permissions from a role.
:param role: The role to modify. Can be a Role object or
string role name
:param permissions: a set, list, or single string.
:return: True if permissions removed, False if role doesn't exist.
Caller must commit to DB.
.. versionadded:: 4.0.0
"""
rv = False
role = self._prepare_role_modify_args(role)
if role:
rv = True
role.remove_permissions(permissions)
self.put(role)
return rv
def toggle_active(self, user):
"""Toggles a user's active status. Always returns True."""
user.active = not user.active
self.put(user)
return True
def deactivate_user(self, user):
"""Deactivates a specified user. Returns `True` if a change was made.
This will immediately disallow access to all endpoints that require
authentication either via session or tokens.
The user will not be able to log in again.
:param user: The user to deactivate
"""
if user.active:
user.active = False
self.put(user)
return True
return False
def activate_user(self, user):
"""Activates a specified user. Returns `True` if a change was made.
:param user: The user to activate
"""
if not user.active:
user.active = True
self.put(user)
return True
return False
def set_uniquifier(self, user, uniquifier=None):
"""Set user's Flask-Security identity key.
This will immediately render outstanding auth tokens,
session cookies and remember cookies invalid.
:param user: User to modify
:param uniquifier: Unique value - if none then uuid.uuid4().hex is used
This method is a no-op if the user model doesn't contain the attribute
``fs_uniquifier``
.. versionadded:: 3.3.0
"""
if not uniquifier:
uniquifier = uuid.uuid4().hex
user.fs_uniquifier = uniquifier
self.put(user)
def create_role(self, **kwargs):
"""
Creates and returns a new role from the given parameters.
Supported params (depending on RoleModel):
:kwparam name: Role name
:kwparam permissions: a comma delimited list of permissions, a set or a list.
These are user-defined strings that correspond to strings used with
@permissions_required()
.. versionadded:: 3.3.0
"""
# By default we just use raw DB model create - for permissions we want to
# be nicer and allow sending in a list or set or comma separated string.
if "permissions" in kwargs and hasattr(self.role_model, "permissions"):
perms = kwargs["permissions"]
if isinstance(perms, list) or isinstance(perms, set):
perms = ",".join(perms)
elif isinstance(perms, str):
# squash spaces.
perms = ",".join([p.strip() for p in perms.split(",")])
kwargs["permissions"] = perms
role = self.role_model(**kwargs)
return self.put(role)
def find_or_create_role(self, name, **kwargs):
"""Returns a role matching the given name or creates it with any
additionally provided parameters.
"""
kwargs["name"] = name
return self.find_role(name) or self.create_role(**kwargs)
def create_user(self, **kwargs):
"""Creates and returns a new user from the given parameters.
:kwparam email: required.
:kwparam password: <PASSWORD>.
:kwparam roles: list of roles to be added to user.
Can be Role objects or strings
.. note::
No normalization is done on email - it is assumed the caller has already
done that.
.. danger::
Be aware that whatever `password` is passed in will
be stored directly in the DB. Do NOT pass in a plaintext password!
Best practice is to pass in ``hash_password(plaintext_password)``.
Furthermore, no validation nor normalization is done on the password
(e.g for minimum length).
Best practice is::
pbad, pnorm = app.security._password_util.validate(password, True)
Look for `pbad` being None. Pass the normalized password `pnorm` to this
method.
The new user's ``active`` property will be set to ``True``
unless explicitly set to ``False`` in `kwargs`.
"""
kwargs = self._prepare_create_user_args(**kwargs)
user = self.user_model(**kwargs)
return self.put(user)
def delete_user(self, user):
"""Deletes the specified user.
:param user: The user to delete
"""
self.delete(user)
def reset_user_access(self, user):
"""
Use this method to reset user authentication methods in the case of compromise.
This will:
* reset fs_uniquifier - which causes session cookie, remember cookie, auth
tokens to be unusable
* remove all unified signin TOTP secrets so those can't be used
* remove all two-factor secrets so those can't be used
Note that if using unified sign in and allow 'email' as a way to receive a code;
if the email is compromised - login is still possible. To handle this - it
is better to deactivate the user.
Note - this method isn't used directly by Flask-Security - it is provided
as a helper for an application's administrative needs.
Remember to call commit on DB if needed.
.. versionadded:: 3.4.1
"""
self.set_uniquifier(user)
if hasattr(user, "us_totp_secrets"):
self.us_reset(user)
if hasattr(user, "tf_primary_method"):
self.tf_reset(user)
def tf_set(self, user, primary_method, totp_secret=None, phone=None):
"""Set two-factor info into user record.
This carefully only changes things if different.
If totp_secret isn't provided - existing one won't be changed.
If phone isn't provided, the existing phone number won't be changed.
This could be called from an application to apiori setup a user for two factor
without the user having to go through the setup process.
To get a totp_secret - use ``app.security._totp_factory.generate_totp_secret()``
.. versionadded: 3.4.1
"""
changed = False
if user.tf_primary_method != primary_method:
user.tf_primary_method = primary_method
changed = True
if totp_secret and user.tf_totp_secret != totp_secret:
user.tf_totp_secret = totp_secret
changed = True
if phone and user.tf_phone_number != phone:
user.tf_phone_number = phone
changed = True
if changed:
self.put(user)
def tf_reset(self, user):
"""Disable two-factor auth for user
.. versionadded: 3.4.1
"""
user.tf_primary_method = None
user.tf_totp_secret = None
user.tf_phone_number = None
self.put(user)
def us_get_totp_secrets(self, user):
"""Return totp secrets.
These are json encoded in the DB.
Returns a dict with methods as keys and secrets as values.
.. versionadded:: 3.4.0
"""
if not user.us_totp_secrets:
return {}
return json.loads(user.us_totp_secrets)
def us_put_totp_secrets(self, user, secrets):
"""Save secrets. Assume to be a dict (or None)
with keys as methods, and values as (encrypted) secrets.
.. versionadded:: 3.4.0
"""
user.us_totp_secrets = json.dumps(secrets) if secrets else None
self.put(user)
def us_set(self, user, method, totp_secret=None, phone=None):
"""Set unified sign in info into user record.
If totp_secret isn't provided - existing one won't be changed.
If phone isn't provided, the existing phone number won't be changed.
This could be called from an application to apiori setup a user for unified
sign in without the user having to go through the setup process.
To get a totp_secret - use ``app.security._totp_factory.generate_totp_secret()``
.. versionadded: 3.4.1
"""
if totp_secret:
totp_secrets = self.us_get_totp_secrets(user)
totp_secrets[method] = totp_secret
self.us_put_totp_secrets(user, totp_secrets)
if phone and user.us_phone_number != phone:
user.us_phone_number = phone
self.put(user)
def us_reset(self, user):
"""Disable unified sign in for user.
Be aware that if "email" is an allowed way to receive codes, they
will still work (as totp secrets are generated on the fly).
This will disable authenticator app and SMS.
.. versionadded: 3.4.1
"""
user.us_totp_secrets = None
user.us_phone_number = None
self.put(user)
class SQLAlchemyUserDatastore(SQLAlchemyDatastore, UserDatastore):
"""A SQLAlchemy datastore implementation for Flask-Security that assumes the
use of the Flask-SQLAlchemy extension.
"""
def __init__(self, db, user_model, role_model):
SQLAlchemyDatastore.__init__(self, db)
UserDatastore.__init__(self, user_model, role_model)
def find_user(self, case_insensitive=False, **kwargs):
from sqlalchemy import func as alchemyFn
query = self.user_model.query
if config_value("JOIN_USER_ROLES") and hasattr(self.user_model, "roles"):
from sqlalchemy.orm import joinedload
query = query.options(joinedload("roles"))
if case_insensitive:
# While it is of course possible to pass in multiple keys to filter on
# that isn't the normal use case. If caller asks for case_insensitive
# AND gives multiple keys - throw an error.
if len(kwargs) > 1:
raise ValueError("Case insensitive option only supports single key")
attr, identifier = kwargs.popitem()
subquery = alchemyFn.lower(
getattr(self.user_model, attr)
) == alchemyFn.lower(identifier)
return query.filter(subquery).first()
else:
return query.filter_by(**kwargs).first()
def find_role(self, role):
return self.role_model.query.filter_by(name=role).first()
class SQLAlchemySessionUserDatastore(SQLAlchemyUserDatastore, SQLAlchemyDatastore):
"""A SQLAlchemy datastore implementation for Flask-Security that assumes the
use of the flask_sqlalchemy_session extension.
"""
def __init__(self, session, user_model, role_model):
class PretendFlaskSQLAlchemyDb:
"""This is a pretend db object, so we can just pass in a session."""
def __init__(self, session):
self.session = session
SQLAlchemyUserDatastore.__init__(
self, PretendFlaskSQLAlchemyDb(session), user_model, role_model
)
def commit(self):
super().commit()
class MongoEngineUserDatastore(MongoEngineDatastore, UserDatastore):
"""A MongoEngine datastore implementation for Flask-Security that assumes
the use of the Flask-MongoEngine extension.
"""
def __init__(self, db, user_model, role_model):
MongoEngineDatastore.__init__(self, db)
UserDatastore.__init__(self, user_model, role_model)
def find_user(self, case_insensitive=False, **kwargs):
from mongoengine.queryset.visitor import Q, QCombination
from mongoengine.errors import ValidationError
try:
if case_insensitive:
# While it is of course possible to pass in multiple keys to filter on
# that isn't the normal use case. If caller asks for case_insensitive
# AND gives multiple keys - throw an error.
if len(kwargs) > 1:
raise ValueError("Case insensitive option only supports single key")
attr, identifier = kwargs.popitem()
query = {f"{attr}__iexact": identifier}
return self.user_model.objects(**query).first()
else:
queries = map(lambda i: Q(**{i[0]: i[1]}), kwargs.items())
query = QCombination(QCombination.AND, queries)
return self.user_model.objects(query).first()
except ValidationError: # pragma: no cover
return None
def find_role(self, role):
return self.role_model.objects(name=role).first()
class PeeweeUserDatastore(PeeweeDatastore, UserDatastore):
"""A PeeweeD datastore implementation for Flask-Security that assumes the
use of Peewee Flask utils.
:param user_model: A user model class definition
:param role_model: A role model class definition
:param role_link: A model implementing the many-to-many user-role relation
"""
def __init__(self, db, user_model, role_model, role_link):
PeeweeDatastore.__init__(self, db)
UserDatastore.__init__(self, user_model, role_model)
self.UserRole = role_link
def find_user(self, case_insensitive=False, **kwargs):
from peewee import fn as peeweeFn
try:
if case_insensitive:
# While it is of course possible to pass in multiple keys to filter on
# that isn't the normal use case. If caller asks for case_insensitive
# AND gives multiple keys - throw an error.
if len(kwargs) > 1:
raise ValueError("Case insensitive option only supports single key")
attr, identifier = kwargs.popitem()
return self.user_model.get(
peeweeFn.lower(getattr(self.user_model, attr))
== peeweeFn.lower(identifier)
)
else:
return self.user_model.filter(**kwargs).get()
except self.user_model.DoesNotExist:
return None
def find_role(self, role):
try:
return self.role_model.filter(name=role).get()
except self.role_model.DoesNotExist:
return None
def create_user(self, **kwargs):
"""Creates and returns a new user from the given parameters."""
roles = kwargs.pop("roles", [])
user = self.user_model(**self._prepare_create_user_args(**kwargs))
user = self.put(user)
for role in roles:
self.add_role_to_user(user, role)
self.put(user)
return user
def add_role_to_user(self, user, role):
"""Adds a role to a user.
:param user: The user to manipulate
:param role: The role to add to the user
"""
role = self._prepare_role_modify_args(role)
result = self.UserRole.select().where(
self.UserRole.user == user.id, self.UserRole.role == role.id
)
if result.count():
return False
else:
self.put(self.UserRole.create(user=user.id, role=role.id))
return True
def remove_role_from_user(self, user, role):
"""Removes a role from a user.
:param user: The user to manipulate
:param role: The role to remove from the user
"""
role = self._prepare_role_modify_args(role)
result = self.UserRole.select().where(
self.UserRole.user == user, self.UserRole.role == role
)
if result.count():
query = self.UserRole.delete().where(
self.UserRole.user == user, self.UserRole.role == role
)
query.execute()
return True
else:
return False
class PonyUserDatastore(PonyDatastore, UserDatastore):
"""A Pony ORM datastore implementation for Flask-Security.
Code primarily from https://github.com/ET-CS but taken over after
being abandoned.
"""
def __init__(self, db, user_model, role_model):
PonyDatastore.__init__(self, db)
UserDatastore.__init__(self, user_model, role_model)
@with_pony_session
def find_user(self, case_insensitive=False, **kwargs):
if case_insensitive:
# While it is of course possible to pass in multiple keys to filter on
# that isn't the normal use case. If caller asks for case_insensitive
# AND gives multiple keys - throw an error.
if len(kwargs) > 1:
raise ValueError("Case insensitive option only supports single key")
# TODO - implement case insensitive look ups.
return self.user_model.get(**kwargs)
@with_pony_session
def find_role(self, role):
return self.role_model.get(name=role)
@with_pony_session
def add_role_to_user(self, *args, **kwargs):
return super().add_role_to_user(*args, **kwargs)
@with_pony_session
def create_user(self, **kwargs):
return super().create_user(**kwargs)
@with_pony_session
def create_role(self, **kwargs):
return super().create_role(**kwargs)
|
# std
from __future__ import annotations
from abc import ABC, abstractclassmethod
from typing import Callable, Text, Tuple, Union, List, Optional
# 3rd party
import numpy as np
from math import degrees, atan2
from blessed import Terminal
# local
from .exceptions import (BorderOutOfBounds, CellOutOfBounds, ElementNotPlaced,
InvalidAttributes, InvalidElement, InvalidLayout,
PaddingOverflow, RectangleTooSmall)
from .helpers import gaussian, getFirstAssigned
from .constants import (BorderStyle, Direction, HAlignment, Layout, Response,
VAlignment, State, Side, WindowState, MAX_ANGLE)
class Point():
def __init__(self, x: int, y: int) -> None:
self.x = x
self.y = y
def __add__(self, p: Point) -> Point:
return Point(self.x + p.x, self.y + p.y)
def __sub__(self, p: Point) -> Point:
return Point(self.x - p.x, self.y - p.y)
def __str__(self) -> str:
return f"({self.x},{self.y})"
class Element(ABC):
def __init__(self, parent: Parent, width: int, height: int) -> None:
self.border: Optional[Box] = None
self.parent = parent
self.parent.addElement(self)
self.width = width
self.height = height
self.active = False
def getWidth(self) -> int:
return self.width
def getHeight(self) -> int:
return self.height
def place(self, x: int, y: int) -> None:
"Works with Layout.ABSOLUTE"
if not isinstance(self.parent, AbsoluteFrame):
raise InvalidLayout("Frame is not of type AbsoluteFrame")
assert (isinstance(self.parent, AbsoluteFrame))
self.border = self.parent.placeElement(self, x, y)
self.activate()
def grid(self,
column: int,
row: int,
rowspan: int = 1,
columnspan: int = 1,
padx: int = 0,
pady: int = 0) -> None:
if not isinstance(self.parent, GridFrame):
raise InvalidLayout("Frame is not of type GridFrame")
assert (isinstance(self.parent, GridFrame))
self.border = self.parent.placeElement(element=self,
padx=padx,
pady=pady,
row=row,
column=column,
rowspan=rowspan,
columnspan=columnspan)
self.activate()
def getWindow(self) -> Window:
return self.parent.getWindow()
def getBorder(self) -> Box:
if self.border is None:
raise ElementNotPlaced("Element must be placed before drawing")
else:
# If it was None an error would've been raised
# assert(self.border is not None)
return self.border
def isActive(self) -> bool:
return self.active
def activate(self, draw: bool = True) -> None:
self.active = True
if draw:
self.draw()
def deactivate(self) -> None:
self.active = False
self.clear()
def toggle(self) -> None:
if self.active:
self.deactivate()
else:
self.activate()
def isPlaced(self) -> bool:
if self.border:
return True
return False
def raiseIfNotPlaced(self) -> None:
if not self.isPlaced():
raise ElementNotPlaced(
"Element must first be placed on parent frame")
@abstractclassmethod
def draw(self) -> None:
pass
def clear(self) -> None:
self.raiseIfNotPlaced()
# Remove after MainFrame solution TODO
command = ''
if isinstance(self.parent, Frame):
bg_color = self.parent.getStyle().bg_color
if bg_color:
command += bg_color
else:
command += self.getWindow().term.normal
# Remove after MainFrame solution
for row in range(self.getBorder().getEdge(Side.TOP),
self.getBorder().getEdge(Side.BOTTOM) + 1):
command += self.getWindow().moveXY(
Point(self.getBorder().getEdge(Side.LEFT), row))
command += " " * self.getBorder().getWidth()
print(command)
def remove(self) -> None:
self.deactivate()
self.parent.removeElement(self)
class HasText(ABC):
def __init__(self, text: Optional[str], padding: List[int],
h_align: HAlignment, v_align: VAlignment, width: int,
height: int) -> None:
self.setText(text)
self.h_align = h_align
self.v_align = v_align
self.setPadding(padding, width, height)
def setText(self, text: Optional[str]) -> None:
self.text = text
def setPadding(self, padding: List[int], width: int, height: int) -> None:
if width < padding[1] + padding[3]:
raise PaddingOverflow(
"Ammount of padding on x axis exceeds button width")
if height < padding[0] + padding[2]:
raise PaddingOverflow(
"Ammount of padding on y axis exceeds button height")
self.padding = padding
class Visible(Element): # Frame vs Label
def __init__(
self,
parent: Parent,
width: int,
height: int, # Element
style: BoxStyle = None) -> None: # Visible
super().__init__(parent, width, height)
self.setStyle(style)
self.state = State.IDLE
@abstractclassmethod
def constructDefaultStyle(self, style: Optional[BoxStyle]) -> BoxStyle:
pass
def constructDefaultStyleTemplate(
self,
default_style: BoxStyle,
style: Optional[BoxStyle] = None,
inheritance_vector: Tuple[bool, bool, bool,
bool] = (False, False, False, False)
) -> BoxStyle:
"""
Constructs default style in the following order of priority in descending order:
1. Given style
2. Inherited style
3. Default style
Inheritence vector controls which features are inherited (1 for true, 0 for false)
inheritence_vector is of form (bg_color, text_style, border_color, border_style)
"""
if style is None:
style = BoxStyle()
inheritanceStyle = BoxStyle()
if not isinstance(self.parent, Window): # TODO: Make MainFrame class?
parentStyle = self.parent.getStyle()
# Controls which features are inherited
if inheritance_vector[0]:
inheritanceStyle.bg_color = parentStyle.bg_color
if inheritance_vector[1]:
inheritanceStyle.text_style = parentStyle.text_style
if inheritance_vector[2]:
inheritanceStyle.border_color = parentStyle.border_color
if inheritance_vector[3]:
inheritanceStyle.border_style = parentStyle.border_style
bg_color: Optional[str] = getFirstAssigned(
[style.bg_color, inheritanceStyle.bg_color],
default=default_style.bg_color)
text_style: Optional[str] = getFirstAssigned(
[style.text_style, inheritanceStyle.text_style],
default=default_style.text_style)
border_color: Optional[str] = getFirstAssigned(
[style.border_color, inheritanceStyle.border_color],
default=default_style.border_color)
border_style: Optional[BorderStyle] = getFirstAssigned(
[style.border_style], default=default_style.border_style)
return BoxStyle(bg_color=bg_color,
text_style=text_style,
border_color=border_color,
border_style=border_style)
def setStyle(self, style: Optional[BoxStyle]) -> None:
self.style = self.constructDefaultStyle(style)
def getStyle(self) -> BoxStyle:
return self.style
class Interactable(Visible):
def __init__(
self,
parent: Parent,
width: int,
height: int, # Element
style: BoxStyle = None, # Visible
selected_style: BoxStyle = None,
clicked_style: BoxStyle = None,
disabled_style: BoxStyle = None) -> None:
super().__init__(parent, width, height, style) # Visible
self.setSelectedStyle(selected_style)
self.setClickedStyle(clicked_style)
self.setDisabledStyle(disabled_style)
self.navigation_override: dict[Direction, Optional[Interactable]] = {
Direction.UP: None,
Direction.DOWN: None,
Direction.LEFT: None,
Direction.RIGHT: None
}
def overrideNavigation(self, direction: Direction, element: Interactable):
self.navigation_override[direction] = element
def navigate(self, direction: Direction) -> Optional[Interactable]:
return self.navigation_override[direction]
def setSelectedStyle(self, selected_style: Optional[BoxStyle]) -> None:
self.selected_style = self.constructDefaultStyle(selected_style)
def setDisabledStyle(self, disabled_style: Optional[BoxStyle]) -> None:
self.disabled_style = self.constructDefaultStyle(disabled_style)
def setClickedStyle(self, clicked_style: Optional[BoxStyle]) -> None:
self.clicked_style = self.constructDefaultStyle(clicked_style)
def getSelectedStyle(self) -> BoxStyle:
return self.selected_style
def getDisabledStyle(self) -> BoxStyle:
return self.disabled_style
def getClickedStyle(self) -> BoxStyle:
return self.clicked_style
def getStyle(self) -> BoxStyle:
if self.state is State.DISABLED:
return self.getDisabledStyle()
elif self.state is State.SELECTED:
return self.getSelectedStyle()
elif self.state is State.CLICKED:
return self.getClickedStyle()
return super().getStyle()
@abstractclassmethod
def click(self) -> Response:
"returns response"
pass
def onClick(self, command):
self.click = command
def toggleSelected(self) -> None:
if self.state is State.SELECTED:
self.unselect()
else:
self.select()
def select(self) -> None:
self.state = State.SELECTED
self.draw()
def unselect(self) -> None:
self.state = State.IDLE
self.draw()
class Focusable(Interactable):
def __init__(
self,
parent: Parent,
width: int,
height: int, # Element
style: Optional[BoxStyle] = None, # Visible
selected_style: Optional[BoxStyle] = None,
clicked_style: Optional[BoxStyle] = None,
disabled_style: Optional[BoxStyle] = None,
focused_style: Optional[BoxStyle] = None,
on_focused_command: Optional[Callable] = None,
on_unfocused_command: Optional[Callable] = None,
) -> None:
super().__init__(
parent,
width,
height, # Element
style, # Visible
selected_style,
clicked_style,
disabled_style) # Interactable
self.setFocudesStyle(focused_style)
self.setOnFocused(on_focused_command)
self.setOnUnfocused(on_unfocused_command)
def setOnFocused(self, command: Optional[Callable]) -> None:
"Triggered when entry is focused"
self.onFocused = command
def setOnChange(self, command: Optional[Callable]) -> None:
"Triggered when any character gets changed"
self.onChange = command
def setOnUnfocused(self, command: Optional[Callable]) -> None:
"Triggered when entry is unfocused"
self.onUnfocused = command
@abstractclassmethod
def handleKeyEvent(self, val) -> Response:
pass
def setFocudesStyle(self, focused_style: Optional[BoxStyle]) -> None:
self.focused_style = self.constructDefaultStyle(focused_style)
def getFocusedStyle(self) -> BoxStyle:
return self.focused_style
def getStyle(self) -> BoxStyle:
if self.state is State.FOCUSED:
return self.getFocusedStyle()
return super().getStyle()
def focus(self) -> Response:
if self.onFocused:
self.onFocused()
self.state = State.FOCUSED
self.draw()
return Response.FOCUSED
def unfocus(self) -> Response:
if self.onUnfocused:
self.onUnfocused()
self.state = State.SELECTED
self.draw()
return Response.UNFOCUSED
def toggleFocused(self) -> Response:
if self.state is State.FOCUSED:
return self.unfocus()
else:
return self.focus()
class Frame(Visible):
def __init__(self,
parent: Parent,
width: int,
height: int,
style: BoxStyle = None) -> None:
super().__init__(parent, width, height, style)
self.elements: List[Element] = []
def getAnchor(self) -> Point:
self.raiseIfNotPlaced()
return self.getBorder().corners["tl"]
def constructDefaultStyle(self,
style: Optional[BoxStyle] = None) -> BoxStyle:
return Interactable.constructDefaultStyleTemplate(
self,
style=style,
default_style=BoxStyle(border_color=self.getWindow().term.white),
inheritance_vector=(True, True, True, True))
def checkOutOfBounds(self, border: Box, element: Element) -> None:
if border.getEdge(Side.LEFT) < self.getBorder().getEdge(Side.LEFT):
raise BorderOutOfBounds(
f"{type(element).__name__} {str(border)} "
f"LEFT border edge ({border.getEdge(Side.LEFT)}) "
f"exceeds {type(element.parent).__name__} {str(self.border)} "
f"LEFT border edge ({self.getBorder().getEdge(Side.LEFT)})")
elif border.getEdge(Side.BOTTOM) < self.getBorder().getEdge(
Side.BOTTOM):
raise BorderOutOfBounds(
f"{type(element).__name__} {str(border)} "
f"BOTTOM border edge ({border.getEdge(Side.BOTTOM)}) "
f"exceeds {type(element.parent).__name__} {str(self.border)} "
f"BOTTOM border edge ({self.getBorder().getEdge(Side.BOTTOM)})")
elif border.getEdge(Side.TOP) > self.getBorder().getEdge(Side.TOP):
raise BorderOutOfBounds(
f"{type(element).__name__} {str(border)} "
f"top border edge ({border.getEdge(Side.TOP)}) "
f"exceeds {type(element.parent).__name__} {str(self.border)} "
f"top border edge ({self.getBorder().getEdge(Side.TOP)})")
elif border.getEdge(Side.RIGHT) > self.getBorder().getEdge(Side.RIGHT):
raise BorderOutOfBounds(
f"{type(element).__name__} {str(border)} "
f"RIGHT border edge ({border.getEdge(Side.RIGHT)}) "
f"exceeds {type(element.parent).__name__} {str(self.border)} "
f"RIGHT border edge ({self.getBorder().getEdge(Side.RIGHT)})")
def addElement(self, element: Element) -> None:
# if self.checkOutOfBounds(element):
# raise BorderOutOfBounds("Child coordinates are out of bounds of the parent")
self.elements.append(element)
def removeElement(self, element: Element) -> None:
self.elements.remove(element)
def addElements(self, *elements: Element) -> None:
for element in elements:
self.addElement(element)
def getAllElements(self,
element_filter: Optional[Callable] = None
) -> List[Element]:
elements = []
for element in self.elements:
if element.isActive(
): # TODO: Only searching active elements, might want to change
if isinstance(element, Frame):
elements.extend(element.getAllElements(element_filter))
else:
if element_filter:
if element_filter(element):
elements.append(element)
else:
elements.append(element)
return elements
class AbsoluteFrame(Frame):
def __init__(self,
parent: Parent,
width: int,
height: int,
style: BoxStyle = None) -> None:
super().__init__(parent, width, height, style=style)
def placeElement(self, element: Element, x: int, y: int) -> Box:
border = Box(
self.getAnchor() + Point(x, y),
self.getAnchor() + Point(x, y) +
Point(element.width, element.height - 1))
# self.checkOutOfBounds(border, element)
return border
def draw(self) -> None:
self.raiseIfNotPlaced()
if self.isActive():
self.getBorder().drawBackground(self.getWindow(), self.getStyle())
for element in self.elements:
if element.isPlaced() and element.isActive():
element.draw()
class GridFrame(Frame):
def __init__(self,
parent: Parent,
style: BoxStyle,
widths: List[int],
heights: List[int],
inner_border: bool = False) -> None:
self.width = sum(widths)
self.height = sum(heights)
self.widths = widths
self.heights = heights
self.setMatrix()
if inner_border:
self.width += len(widths) + 1
self.height += len(heights) + 1
elif style.border_style is not None:
self.width += 2
self.height += 2
super().__init__(parent, self.width, self.height, style=style)
self.inner_border = inner_border
if self.inner_border and self.style.border_style is None:
raise InvalidAttributes(
"Must declare outter border style if inner border is used")
self.setRows(len(heights))
self.setColumns(len(widths))
def setMatrix(self) -> None:
self.matrix: List[List[Optional[Element]]] = []
for r in range(len(self.heights)):
temp: List[Optional[Element]] = []
for c in range(len(self.widths)):
temp.append(None)
self.matrix.append(temp)
def assignCells(self, element: Element, row: int, column: int, rowspan: int,
columnspan: int) -> None:
for r in range(row, row + rowspan):
for c in range(column, column + columnspan):
if self.matrix[r][c] is not None:
raise CellOutOfBounds("Cell is out of bounds")
else:
self.matrix[r][c] = element
def raiseIfBorderOutOfBounds(self, element: Element, padx: int, pady: int,
row: int, column: int, rowspan: int,
columnspan: int) -> None:
element_height = pady + element.getHeight()
element_width = padx + element.getWidth()
cell_height = sum(self.heights[row:row + rowspan])
cell_width = sum(self.widths[column:column + columnspan])
if self.inner_border:
cell_height += rowspan - 1
cell_width += columnspan - 1
if element_height > cell_height:
raise BorderOutOfBounds(f"Height of element: {element_height} "
f"exceeds height of cell: {cell_height}")
if element_width > cell_width:
raise BorderOutOfBounds(f"Width of element: {element_width} "
f"exceeds width of cell: {cell_width}")
def placeElement(self,
element: Element,
padx: int,
pady: int,
row: int,
column: int,
rowspan: int = 1,
columnspan: int = 1) -> Box:
self.assignCells(element, row, column, rowspan, columnspan)
# self.raiseIfBorderOutOfBounds(element, padx, pady, row, column, rowspan, columnspan)
border = Box(
self.getAnchor() + Point(
sum(self.widths[:column]) + padx,
sum(self.heights[:row]) + pady),
self.getAnchor() + Point(
sum(self.widths[:column]) + element.getWidth() + padx,
sum(self.heights[:row]) + element.getHeight() + pady - 1))
if self.style.border_style is not None:
border.p1 = border.p1 + Point(1, 1)
border.p2 = border.p2 + Point(1, 1)
border.updateCorners()
if self.inner_border:
border.p1 = border.p1 + Point(column, row)
border.p2 = border.p2 + Point(column, row)
border.updateCorners()
# self.checkOutOfBounds(border, element)
return border
def setRows(self, rows: int) -> None:
maxplaces = self.height
if rows < 1:
raise ValueError("Rows must be >= 1")
if self.inner_border:
maxplaces -= rows + 1
if maxplaces < rows:
raise ValueError("Not enough space for each row")
self.rows = rows
def setColumns(self, columns: int) -> None:
maxplaces = self.width
if columns < 1:
raise ValueError("Columns must be >= 1")
if self.inner_border:
maxplaces -= columns + 1
if maxplaces < columns:
raise ValueError("Not enough space for each column")
self.columns = columns
def compareCells(self, x1: int, y1: int, x2: int, y2: int) -> bool:
return self.matrix[y1][x1] == self.matrix[y2][x2] and self.matrix[y1][
x1] is not None
def drawGrid(self) -> None:
command = ''
border = self.getBorder()
style = self.getStyle()
window = self.getWindow()
if style.bg_color:
command += style.bg_color
if self.getWidth() < 2 or self.getHeight() < 2:
raise RectangleTooSmall(
"Unable to fit border on such small rectangle, must be at least 2x2"
)
else:
if style.border_style is BorderStyle.SINGLE:
br = "┌"
lr = "─"
blr = "┬"
bl = "┐"
tb = "│"
tbr = "├"
tblr = "┼"
tbl = "┤"
tr = "└"
tlr = "┴"
tl = "┘"
elif style.border_style is BorderStyle.DOUBLE:
br = "╔"
lr = "═"
blr = "╦"
bl = "╗"
tb = "║"
tbr = "╠"
tblr = "╬"
tbl = "╣"
tr = "╚"
tlr = "╩"
tl = "╝"
else:
br = " "
lr = " "
blr = " "
bl = " "
tb = " "
tbr = " "
tblr = " "
tbl = " "
tr = " "
tlr = " "
tl = " "
if style.border_color:
command += style.border_color
for y, height in enumerate(self.heights):
for slice in range(height + 1):
command += window.moveXY(
Point(
border.getEdge(Side.LEFT),
border.getEdge(Side.TOP) + sum(self.heights[:y]) +
y + slice))
if y == 0 and slice == 0:
command += br
for x, width in enumerate(self.widths):
command += lr * width
if x == len(self.widths) - 1:
command += bl
else:
if self.compareCells(x, y, x + 1, y):
command += lr
else:
command += blr
elif slice == 0 and y != 0:
x = 0
if self.compareCells(x, y, x, y - 1):
command += tb
else:
command += tbr
for x, width in enumerate(self.widths):
if self.compareCells(x, y, x, y - 1):
command += " " * width
else:
command += lr * width
if x == len(self.widths) - 1:
if self.compareCells(x, y - 1, x, y):
command += tb
else:
command += tbl
else:
bot = not self.compareCells(x, y, x + 1, y)
top = not self.compareCells(
x, y - 1, x + 1, y - 1)
left = not self.compareCells(x, y - 1, x, y)
right = not self.compareCells(
x + 1, y - 1, x + 1, y)
if not top and not bot and not left and not right:
command += " "
elif not top and not bot and left and right:
command += lr
elif not top and bot and not left and right:
command += br
elif not top and bot and left and not right:
command += bl
elif not top and bot and left and right:
command += blr
elif top and not bot and not left and right:
command += tr
elif top and not bot and left and not right:
command += tl
elif top and not bot and left and right:
command += tlr
elif top and bot and not left and not right:
command += tb
elif top and bot and not left and right:
command += tbr
elif top and bot and left and not right:
command += tbl
elif top and bot and left and right:
command += tblr
else:
command += tb
for x, width in enumerate(self.widths):
command += " " * width
if x != len(self.widths) - 1:
if self.compareCells(x, y, x + 1, y):
command += " "
else:
command += tb
command += tb
command += window.moveXY(
Point(border.getEdge(Side.LEFT),
border.getEdge(Side.TOP) + sum(self.heights) + y + 1))
command += tr
for x, width in enumerate(self.widths):
if x == len(self.widths) - 1:
command += lr * width + tl
else:
command += lr * width
if self.compareCells(x, y, x + 1, y):
command += lr
else:
command += tlr
print(command)
window.flush()
def draw(self) -> None:
self.raiseIfNotPlaced()
if self.isActive():
if self.inner_border:
self.drawGrid()
else:
self.getBorder().drawBackground(self.getWindow(),
self.getStyle())
for element in self.elements:
if element.isPlaced() and element.isActive():
element.draw()
class Window():
def __init__(self, term: Terminal) -> None:
self.term = term
self.window_state = WindowState.VIEW
self.active_element: Optional[Interactable] = None
AbsoluteFrame(self, self.term.width, self.term.height)
self.mainframe.activate()
self.hotkeys: dict[str, Callable] = {}
def bind(self, val: str, command: Callable) -> None:
self.hotkeys[val] = command
def checkBindings(self, val) -> Response:
if val.is_sequence:
val = val.name
else:
val = val.lower()
for hotkey in self.hotkeys:
if hotkey == val:
self.hotkeys[val]()
return Response.COMPLETE
return Response.CONTINUE
def getWindow(self) -> Window:
return self
def draw(self) -> None:
self.clear()
self.mainframe.draw()
def moveXY(self, p: Point) -> str: # TODO issue 17
return self.term.move_xy(p.x, p.y)
def clear(self) -> None:
print(self.term.normal + self.term.clear)
def flush(self) -> None:
"Resets pointer and color"
print(self.term.home + self.term.normal)
def getAllElements(self,
element_filter: Optional[Callable] = None
) -> List[Element]:
return self.mainframe.getAllElements(element_filter)
def getAllInteractive(self):
return self.getAllElements(lambda element: isinstance(
element, Interactable) and element.isActive())
def getExtremeElement(self, direction: Direction) -> Optional[Interactable]:
extreme_element: Optional[Interactable] = None
if direction is Direction.DOWN:
highest_y = 0
for element in self.getAllInteractive():
if element.getBorder().getEdge(Side.TOP) >= highest_y:
highest_y = element.getBorder().getEdge(Side.TOP)
extreme_element = element
elif direction is Direction.RIGHT:
highest_x = 0
for element in self.getAllInteractive():
if element.getBorder().getEdge(Side.RIGHT) >= highest_x:
highest_x = element.getBorder().getEdge(Side.RIGHT)
extreme_element = element
elif direction is Direction.UP:
lowest_y = float('inf')
for element in self.getAllInteractive():
if element.getBorder().getEdge(Side.BOTTOM) <= lowest_y:
lowest_y = element.getBorder().getEdge(Side.BOTTOM)
extreme_element = element
elif direction is Direction.LEFT:
lowest_x = float('inf')
for element in self.getAllInteractive():
if element.getBorder().getEdge(Side.LEFT) <= lowest_x:
lowest_x = element.getBorder().getEdge(Side.LEFT)
extreme_element = element
return extreme_element
def calculateWeightedDistance(self, p1: Point, p2: Point,
direction: Direction) -> float:
delta_x = p1.x - p2.x
delta_y = p1.y - p2.y
argument = degrees(atan2(delta_y, delta_x))
delta_angle = abs(direction.value - argument)
if delta_angle > 180:
delta_angle = 360 - delta_angle
if delta_angle > MAX_ANGLE:
return float('inf')
distance = np.linalg.norm(np.array((delta_x, delta_y)))
return distance / gaussian(x=delta_angle / 90, mean=0, std=0.45)
def getActivePoint(self, element: Element, direction: Direction) -> Point:
border = element.getBorder()
if direction is Direction.UP:
return Point(border.getCenter().x, border.getEdge(Side.TOP))
elif direction is Direction.DOWN:
return Point(border.getCenter().x, border.getEdge(Side.BOTTOM))
elif direction is Direction.RIGHT:
return Point(border.getEdge(Side.RIGHT), border.getCenter().y)
elif direction is Direction.LEFT:
return Point(border.getEdge(Side.LEFT), border.getCenter().y)
def getCandidatePoint(self, element: Element,
direction: Direction) -> Point:
border = element.getBorder()
if direction is Direction.UP:
return Point(border.getCenter().x, border.getEdge(Side.BOTTOM))
elif direction is Direction.DOWN:
return Point(border.getCenter().x, border.getEdge(Side.TOP))
elif direction is Direction.RIGHT:
return Point(border.getEdge(Side.LEFT), border.getCenter().y)
elif direction is Direction.LEFT:
return Point(border.getEdge(Side.RIGHT), border.getCenter().y)
def findElement(self, direction: Direction) -> Optional[Interactable]:
if self.active_element is None:
raise TypeError(
"Unable to find element if active_element isn't set")
else:
assert (isinstance(self.active_element, Element))
active_point = self.getActivePoint(self.active_element, direction)
min_wighted_distance = float('inf')
closest_element: Optional[Interactable] = None
for element in self.getAllInteractive():
if element != self.active_element and element.isActive():
point = self.getCandidatePoint(element, direction)
weighted_distance = self.calculateWeightedDistance(
active_point, point, direction)
if weighted_distance < min_wighted_distance:
min_wighted_distance = weighted_distance
closest_element = element
return closest_element
def addElement(self, element: Element) -> None:
"Allows for only one element to be added, which is a single Frame"
if not isinstance(element, AbsoluteFrame):
raise InvalidLayout(
"Only a single element of type AbsoluteFrame can be added to a Window"
)
self.mainframe = element
self.mainframe.border = Box(Point(0, 0),
Point(self.term.width, self.term.height))
def removeElement(self, element: Element) -> None:
raise Exception("Not allowed to remove elements from Window")
def handleKeyEvent(self, val) -> Response:
if not val:
pass
else:
if self.window_state is WindowState.VIEW:
# Active element can't be set if WindowState.VIEW
assert (self.active_element is None)
res = self.checkBindings(val)
if res is Response.COMPLETE:
return Response.COMPLETE
if val.is_sequence:
if val.name == "KEY_UP":
self.active_element = self.getExtremeElement(
Direction.UP)
elif val.name == "KEY_RIGHT":
self.active_element = self.getExtremeElement(
Direction.RIGHT)
elif val.name == "KEY_DOWN":
self.active_element = self.getExtremeElement(
Direction.DOWN)
elif val.name == "KEY_LEFT":
self.active_element = self.getExtremeElement(
Direction.LEFT)
if self.active_element is not None:
self.active_element.toggleSelected()
self.window_state = WindowState.SELECTION
elif val:
if val.lower() == 'q':
return Response.QUIT
elif self.window_state is WindowState.SELECTION:
# Active element must be set if WindowState.SELECTION
assert (isinstance(self.active_element, Interactable))
res = self.checkBindings(val)
if res is Response.COMPLETE:
return Response.COMPLETE
direction = None
next_element = None
if val.is_sequence:
if val.name == "KEY_UP":
direction = Direction.UP
elif val.name == "KEY_RIGHT":
direction = Direction.RIGHT
elif val.name == "KEY_DOWN":
direction = Direction.DOWN
elif val.name == "KEY_LEFT":
direction = Direction.LEFT
elif val.name == "KEY_ENTER":
res = self.active_element.click()
if res is Response.FOCUSED:
self.window_state = WindowState.FOCUSED
elif val.name == "KEY_ESCAPE" or val.name == "KEY_BACKSPACE":
self.window_state = WindowState.VIEW
self.active_element.toggleSelected()
self.active_element = None
if direction: # If a key is pressed which gives direction
# If a direction is given the active element couldn't have been set to None
assert (self.active_element is not None)
next_element = self.active_element.navigate(direction)
if not next_element:
next_element = self.findElement(direction)
if next_element: # If a good next element is found
self.active_element.toggleSelected()
self.active_element = next_element
self.active_element.toggleSelected()
elif val:
if val.lower() == 'q':
return Response.QUIT
elif self.window_state is WindowState.FOCUSED:
assert (isinstance(self.active_element, Focusable))
res = self.active_element.handleKeyEvent(val)
if res is Response.UNFOCUSED:
self.window_state = WindowState.SELECTION
elif res is Response.CONTINUE:
if val:
if val.lower() == 'q':
return Response.QUIT
elif res is Response.QUIT:
return Response.QUIT
return Response.CONTINUE
def loop(self):
with self.term.cbreak():
self.clear()
self.draw()
res = Response.CONTINUE
while res != Response.QUIT:
val = self.term.inkey(timeout=3)
res = self.handleKeyEvent(val)
self.clear()
self.flush()
Parent = Union[Frame, Window]
class Box():
def __init__(self, p1: Point, p2: Point) -> None:
self.setP1(p1)
self.setP2(p2)
self.updateCorners()
def __str__(self) -> str:
return f"[{self.p1}, {self.p2}]"
def setP1(self, p1: Point) -> None:
self.p1 = p1
def setP2(self, p2: Point) -> None:
self.p2 = p2
def updateCorners(self) -> None:
self.corners = {
"tl": Point(min(self.p1.x, self.p2.x), min(self.p1.y, self.p2.y)),
"tr": Point(max(self.p1.x, self.p2.x), min(self.p1.y, self.p2.y)),
"bl": Point(min(self.p1.x, self.p2.x), max(self.p1.y, self.p2.y)),
"br": Point(max(self.p1.x, self.p2.x), max(self.p1.y, self.p2.y))
}
def getEdge(self, side: Side) -> int:
if side is Side.TOP:
return self.corners["tl"].y
elif side is Side.RIGHT:
return self.corners["tr"].x
elif side is Side.BOTTOM:
return self.corners["bl"].y
else: # LEFT
return self.corners["bl"].x
def getWidth(self) -> int:
return self.getEdge(Side.RIGHT) - self.getEdge(Side.LEFT)
def getHeight(self) -> int:
return self.getEdge(Side.BOTTOM) - self.getEdge(Side.TOP)
def getMiddleX(self) -> int:
return self.getEdge(Side.LEFT) + self.getWidth() // 2
def getMiddleY(self) -> int:
return self.getEdge(Side.BOTTOM) + self.getHeight() // 2
def getCenter(self) -> Point:
return Point(self.getMiddleX(), self.getMiddleY())
def drawBackground(self, window: Window, style: BoxStyle) -> None:
command = ''
if style.bg_color:
command += style.bg_color
if style.border_style is not BorderStyle.NONE and style.border_style is not None:
if self.getWidth() < 2 or self.getHeight() < 2:
raise RectangleTooSmall(
"Unable to fit border on such small rectangle, must be at least 2x2"
)
else:
if style.border_color:
command += style.border_color
for row in range(self.getEdge(Side.TOP),
self.getEdge(Side.BOTTOM) + 1):
command += window.moveXY(Point(self.getEdge(Side.LEFT),
row))
if row == self.getEdge(Side.BOTTOM):
if style.border_style is BorderStyle.SINGLE:
command += "└" + "─" * (self.getWidth() - 2) + "┘"
elif style.border_style is BorderStyle.DOUBLE:
command += "╚" + "═" * (self.getWidth() - 2) + "╝"
elif row == self.getEdge(Side.TOP):
if style.border_style is BorderStyle.SINGLE:
command += "┌" + "─" * (self.getWidth() - 2) + "┐"
elif style.border_style is BorderStyle.DOUBLE:
command += "╔" + "═" * (self.getWidth() - 2) + "╗"
else:
if style.border_style is BorderStyle.SINGLE:
command += "│" + " " * (self.getWidth() - 2) + "│"
elif style.border_style is BorderStyle.DOUBLE:
command += "║" + " " * (self.getWidth() - 2) + "║"
else:
if style.bg_color:
for row in range(self.getEdge(Side.TOP),
self.getEdge(Side.BOTTOM) + 1):
command += window.moveXY(Point(self.getEdge(Side.LEFT),
row))
command += " " * self.getWidth()
print(command)
window.flush()
def writeText(self, window: Window, style: BoxStyle, text: Optional[str],
padding: List[int], h_align: HAlignment,
v_align: VAlignment) -> None:
command = ''
if text:
# Text style
if style.bg_color:
command += style.bg_color
if style.text_style:
command += style.text_style
# Cut of text if it wont fit
max_text_len = self.getWidth() - (padding[1] + padding[3])
text = text[:max_text_len]
# Text alignment
# Horizontal
if h_align is HAlignment.LEFT:
text_start_x = self.getEdge(Side.LEFT) + padding[3]
elif h_align is HAlignment.MIDDLE:
text_start_x = self.getEdge(Side.LEFT) + padding[3] + (
self.getWidth() // 2) - (len(text) // 2)
elif h_align is HAlignment.RIGHT:
text_start_x = self.getEdge(Side.RIGHT) - padding[1] - len(text)
# Vertical
if v_align is VAlignment.TOP:
text_start_y = self.getEdge(Side.BOTTOM) - padding[0]
elif v_align is VAlignment.MIDDLE:
text_start_y = self.getEdge(
Side.BOTTOM) - padding[0] - (self.getHeight() // 2)
elif v_align is VAlignment.BOTTOM:
text_start_y = self.getEdge(Side.TOP) + padding[2]
command += window.moveXY(Point(text_start_x, text_start_y))
command += text
print(command)
window.flush()
def draw(self, window: Window, style: BoxStyle, text: Optional[str],
padding: List[int], h_align: HAlignment,
v_align: VAlignment) -> None:
self.drawBackground(window, style)
self.writeText(window, style, text, padding, h_align, v_align)
class BoxStyle():
def __init__(self,
bg_color: Optional[str] = None,
text_style: Optional[str] = None,
border_color: Optional[str] = None,
border_style: Optional[BorderStyle] = None) -> None:
"Leave all parameters empty for default style"
self.bg_color = bg_color
self.text_style = text_style
self.border_color = border_color
self.border_style = border_style
class Label(Visible, HasText):
def __init__(
self,
parent: Parent,
width: int,
height: int,
text: Optional[str] = None,
style: BoxStyle = None,
padding: List[int] = [0] * 4,
h_align: HAlignment = HAlignment.MIDDLE,
v_align: VAlignment = VAlignment.MIDDLE,
) -> None:
Visible.__init__(
self,
parent,
width,
height, # Element
style) # Visible
HasText.__init__(self, text, padding, h_align, v_align, width, height)
def constructDefaultStyle(self, style: Optional[BoxStyle] = None):
return Interactable.constructDefaultStyleTemplate(
self,
default_style=BoxStyle(bg_color=self.getWindow().term.normal,
text_style=self.getWindow().term.white),
style=style,
inheritance_vector=(True, True, True, True))
def draw(self) -> None:
self.getBorder().draw(self.getWindow(), self.getStyle(), self.text,
self.padding, self.h_align, self.v_align)
class Button(Interactable, HasText):
def __init__(self,
parent: Parent,
width: int,
height: int,
command: Optional[Callable] = None,
style: BoxStyle = None,
text: Optional[str] = None,
h_align: HAlignment = HAlignment.MIDDLE,
v_align: VAlignment = VAlignment.MIDDLE,
padding: List[int] = [0] * 4,
disabled_style: BoxStyle = None,
selected_style: BoxStyle = None,
clicked_style: BoxStyle = None) -> None:
Interactable.__init__(
self,
parent,
width,
height, # Element
style, # Visible
selected_style,
clicked_style,
disabled_style) # Interactable
HasText.__init__(self, text, padding, h_align, v_align, width, height)
self.onClick(command)
def constructDefaultStyle(self,
style: Optional[BoxStyle] = None) -> BoxStyle:
return Interactable.constructDefaultStyleTemplate(
self,
style=style,
default_style=BoxStyle(bg_color=self.getWindow().term.on_white,
text_style=self.getWindow().term.black),
inheritance_vector=(False, True, False,
False)) # Only inherits text style
def draw(self) -> None:
self.getBorder().draw(self.getWindow(), self.getStyle(), self.text,
self.padding, self.h_align, self.v_align)
def onClick(self, command: Optional[Callable]) -> None:
self.command = command
def click(self) -> Response:
if self.command:
return self.command()
return Response.CONTINUE
class Entry(Focusable, HasText):
def __init__(
self,
parent: Parent,
width: int,
height: int,
default_text: Optional[str] = None,
style: BoxStyle = None,
padding: List[int] = [0] * 4,
h_align: HAlignment = HAlignment.LEFT,
v_align: VAlignment = VAlignment.TOP,
selected_style: BoxStyle = None,
clicked_style: BoxStyle = None,
disabled_style: BoxStyle = None,
focused_style: BoxStyle = None,
cursor_style: str = None,
cursor_bg_color: str = None,
highlight_color: str = None,
on_focused_command: Optional[Callable] = None,
on_unfocused_command: Optional[Callable] = None,
on_change_command: Optional[Callable] = None,
) -> None:
# Assigning text
Focusable.__init__(
self,
parent,
width,
height, # Element
style, # Visible
selected_style,
clicked_style,
disabled_style, # Interactable
focused_style, # Focusable
on_focused_command=on_focused_command,
on_unfocused_command=on_unfocused_command)
HasText.__init__(self, default_text, padding, h_align, v_align, width,
height)
self.saved_text = ''
self.state = State.IDLE
self.cursor_pos = 0
self.text: str = getFirstAssigned([self.text], '')
self.cursor_style: str = getFirstAssigned(
[cursor_style],
self.getWindow().term.on_goldenrod1)
self.cursor_bg_color: str = getFirstAssigned(
[cursor_bg_color],
self.getWindow().term.gray33)
self.highlight_color: str = getFirstAssigned(
[highlight_color],
self.getWindow().term.on_gray38)
self.setOnChange(on_change_command)
def constructDefaultStyle(self, style: Optional[BoxStyle] = None):
return Interactable.constructDefaultStyleTemplate(
self,
style=style,
default_style=BoxStyle(bg_color=self.getWindow().term.normal,
text_style=self.getWindow().term.white,
border_color=self.getWindow().term.white,
border_style=BorderStyle.SINGLE),
inheritance_vector=(False, True, True,
True)) # Doesn't inherit bg_color
def setText(self, text: Optional[str] = None) -> None:
HasText.setText(self, text)
self.updateCursor()
def updateCursor(self) -> None:
if self.text is None or self.text == '':
self.cursor_pos = 0
else:
self.cursor_pos = len(self.text)
def clear(self) -> None:
self.text = ''
self.saved_text = ''
self.updateCursor()
def click(self) -> Response:
self.text = self.saved_text
self.focus()
return Response.FOCUSED
def getSavedText(self) -> str:
return self.saved_text
def handleKeyEvent(self, val) -> Response:
"Returns True if key event was handled"
if val.is_sequence:
if val.name == "KEY_UP":
return Response.COMPLETE
elif val.name == "KEY_RIGHT":
self.cursor_pos = min(self.cursor_pos + 1, len(self.text))
self.draw()
return Response.COMPLETE
elif val.name == "KEY_DOWN":
return Response.COMPLETE
elif val.name == "KEY_LEFT":
self.cursor_pos = max(self.cursor_pos - 1, 0)
self.draw()
return Response.COMPLETE
elif val.name == "KEY_BACKSPACE":
if self.cursor_pos > 0:
self.setText(self.text[:self.cursor_pos - 1] +
self.text[self.cursor_pos:])
self.draw()
return Response.COMPLETE
elif val.name == "KEY_ENTER":
self.saved_text = self.text
self.unfocus()
return Response.UNFOCUSED
elif val.name == "KEY_ESCAPE":
self.text = self.saved_text
self.unfocus()
return Response.UNFOCUSED
elif val:
if self.onChange:
self.onChange()
if len(self.text) < self.getWidth(
): # TODO: Support text larger than width
self.setText(self.text + val)
self.draw()
return Response.COMPLETE
else:
pass
return Response.CONTINUE
def drawCursor(self, border: Box, window: Window) -> None:
command = ''
# Cursor style
command += self.cursor_style
command += self.cursor_bg_color
# Cut of text if it wont fit
max_text_len = border.getWidth() - (self.padding[1] + self.padding[3])
text = self.text[:max_text_len]
# Get cursor character
if self.cursor_pos >= len(text):
cursor_character = ' '
else:
cursor_character = text[self.cursor_pos]
# Text alignment
# Horizontal
if self.h_align is HAlignment.LEFT:
text_start_x = border.getEdge(Side.LEFT) + self.padding[3]
elif self.h_align is HAlignment.MIDDLE:
text_start_x = border.getEdge(Side.LEFT) + self.padding[3] + (
border.getWidth() // 2) - (len(text) // 2)
elif self.h_align is HAlignment.RIGHT:
text_start_x = border.getEdge(
Side.RIGHT) - self.padding[1] - max_text_len
# Vertical
if self.v_align is VAlignment.TOP:
text_start_y = border.getEdge(Side.TOP) - self.padding[0]
elif self.v_align is VAlignment.MIDDLE:
text_start_y = border.getEdge(
Side.TOP) - self.padding[0] - (border.getHeight() // 2)
elif self.v_align is VAlignment.BOTTOM:
text_start_y = border.getEdge(Side.BOTTOM) + self.padding[2]
command += window.moveXY(
Point(text_start_x + self.cursor_pos, text_start_y))
command += cursor_character
print(command)
window.flush()
def draw(self) -> None:
self.getBorder().draw(self.getWindow(), self.getStyle(), self.text,
self.padding, self.h_align, self.v_align)
if self.state is State.FOCUSED:
self.drawCursor(self.getBorder(), self.getWindow())
class DropdownMenu(Focusable, HasText):
def __init__(self,
parent: Parent,
width: int,
height: int,
text: Optional[str],
auto_redraw: bool = True,
style: Optional[BoxStyle] = None,
padding: List[int] = [0] * 4,
h_align: HAlignment = HAlignment.LEFT,
v_align: VAlignment = VAlignment.MIDDLE,
selected_style: Optional[BoxStyle] = None,
clicked_style: Optional[BoxStyle] = None,
disabled_style: Optional[BoxStyle] = None,
focused_style: Optional[BoxStyle] = None) -> None:
Focusable.__init__(
self,
parent,
width,
height, # Element
style, # Visible
selected_style,
clicked_style,
disabled_style, # Interactable
focused_style) # Focusable
HasText.__init__(self, None, padding, h_align, v_align, width, height)
self.itemFrame = AbsoluteFrame(parent, width, height)
# TODO: add ▼ to main button
self.mainButton = Button(self.itemFrame,
width,
height,
command=self.toggleFocused,
style=self.style,
text=text,
selected_style=self.selected_style,
clicked_style=self.clicked_style,
disabled_style=self.disabled_style)
self.itemButtons: List[Button] = [self.mainButton]
self.active_index = 0
self.active_item = self.mainButton
self.auto_redraw = auto_redraw
def place(self, x: int, y: int) -> None:
self.itemFrame.height = self.getItemHeight() * len(self.itemButtons)
self.itemFrame.place(x, y)
self.mainButton.place(0, 0)
self.border = self.mainButton.getBorder()
for i, itemButton in enumerate(self.itemButtons):
itemButton.place(0, i * self.getItemHeight())
self.activate()
self.itemFrame.deactivate()
def focus(self) -> Response:
self.itemFrame.activate()
return super().focus()
def unfocus(self) -> Response:
self.active_item.toggleSelected()
self.active_index = 0
self.active_item = self.itemButtons[self.active_index]
self.active_item.toggleSelected()
self.itemFrame.deactivate()
if self.auto_redraw:
self.getWindow().draw()
return super().unfocus()
def click(self) -> Response:
assert (self.mainButton.command is not None) # Always declared in init
return self.mainButton.command()
def selectNext(self) -> None:
if self.active_index < len(self.itemButtons) - 1:
self.active_item.toggleSelected()
self.active_index += 1
self.active_item = self.itemButtons[self.active_index]
self.active_item.toggleSelected()
def selectPrev(self) -> None:
if self.active_index > 0:
self.active_item.toggleSelected()
self.active_index -= 1
self.active_item = self.itemButtons[self.active_index]
self.active_item.toggleSelected()
def handleKeyEvent(self, val) -> Response:
if val.is_sequence:
if val.name == "KEY_UP":
self.selectPrev()
return Response.COMPLETE
elif val.name == "KEY_RIGHT":
pass
elif val.name == "KEY_DOWN":
self.selectNext()
return Response.COMPLETE
elif val.name == "KEY_LEFT":
pass
elif val.name == "KEY_ENTER":
res = self.active_item.click()
if res:
return res
return Response.COMPLETE
elif val.name == "KEY_BACKSPACE":
return self.toggleFocused()
elif val.name == "KEY_ESCAPE":
return self.toggleFocused()
return Response.CONTINUE
def toggleSelected(self) -> None:
self.mainButton.toggleSelected()
if self.state is State.SELECTED:
self.unselect()
else:
self.select()
def constructDefaultStyle(self, style: Optional[BoxStyle] = None):
return Interactable.constructDefaultStyleTemplate(
self,
default_style=BoxStyle(bg_color=self.getWindow().term.on_white,
text_style=self.getWindow().term.black),
style=style,
inheritance_vector=(True, False, False, False)) # Doesn't inherit
def getItemHeight(self) -> int:
return self.height
def addItem(self,
text: str,
command: Callable,
style: Optional[BoxStyle] = None,
padding: Optional[List[int]] = None,
h_align: Optional[HAlignment] = None,
v_align: Optional[VAlignment] = None,
selected_style: Optional[BoxStyle] = None,
clicked_style: Optional[BoxStyle] = None,
disabled_style: Optional[BoxStyle] = None) -> None:
# Extend frame to fit option
# Match mainButton params if none are given
padding = getFirstAssigned([padding], self.mainButton.padding)
h_align = getFirstAssigned([h_align], self.mainButton.h_align)
v_align = getFirstAssigned([v_align], self.mainButton.v_align)
style = getFirstAssigned([style], self.mainButton.style)
selected_style = getFirstAssigned([selected_style],
self.mainButton.selected_style)
clicked_style = getFirstAssigned([clicked_style],
self.mainButton.clicked_style)
disabled_style = getFirstAssigned([disabled_style],
self.mainButton.disabled_style)
optionButton = Button(parent=self.itemFrame,
width=self.width,
height=self.height,
command=command,
text=text,
style=style,
selected_style=selected_style,
clicked_style=clicked_style,
disabled_style=disabled_style)
self.itemButtons.append(optionButton)
def draw(self) -> None:
self.raiseIfNotPlaced()
self.mainButton.draw()
self.itemFrame.draw()
class OptionMenu(DropdownMenu):
def __init__(self,
parent: Parent,
width: int,
height: int,
default_text=Optional[str],
options=List[str],
style: Optional[BoxStyle] = None,
padding: List[int] = [0] * 4,
h_align: HAlignment = HAlignment.LEFT,
v_align: VAlignment = VAlignment.MIDDLE,
selected_style: Optional[BoxStyle] = None,
clicked_style: Optional[BoxStyle] = None,
disabled_style: Optional[BoxStyle] = None,
focused_style: Optional[BoxStyle] = None) -> None:
super().__init__(parent=parent,
width=width,
height=height,
text=default_text,
style=style,
padding=padding,
h_align=h_align,
v_align=v_align,
selected_style=selected_style,
clicked_style=clicked_style,
disabled_style=disabled_style,
focused_style=focused_style)
self.options = options
for option in self.options:
self.addOption(text=option,
style=style,
padding=padding,
h_align=h_align,
v_align=v_align,
selected_style=selected_style,
clicked_style=clicked_style,
disabled_style=disabled_style)
def getValue(self) -> Optional[str]:
return self.mainButton.text
def switchOptions(self, optionIndex: int) -> Response:
optionButton = self.itemButtons[optionIndex]
self.mainButton.text = optionButton.text
self.mainButton.style = optionButton.style
self.mainButton.selected_style = optionButton.selected_style
self.mainButton.clicked_style = optionButton.clicked_style
self.mainButton.disabled_style = optionButton.disabled_style
return self.unfocus()
def addOption(self,
text: str,
style: Optional[BoxStyle] = None,
padding: Optional[List[int]] = None,
h_align: Optional[HAlignment] = None,
v_align: Optional[VAlignment] = None,
selected_style: Optional[BoxStyle] = None,
clicked_style: Optional[BoxStyle] = None,
disabled_style: Optional[BoxStyle] = None) -> None:
if self.mainButton.text is None:
self.mainButton.text = text
else:
optionIndex = len(self.itemButtons)
super().addItem(text=text,
command=lambda: self.switchOptions(optionIndex),
style=style,
padding=padding,
h_align=h_align,
v_align=v_align,
selected_style=selected_style,
clicked_style=clicked_style,
disabled_style=disabled_style)
|
<reponame>xmrsmoothx/Python-world-gen
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 25 21:04:55 2021
@author: Bri
"""
import random
from src_tools import *
from src_events import *
import string
class Magic:
def __init__(self,c):
self.tt = "magic"
# Choose a type of magic spell. This is (mostly?) cosmetic.
kinds = ["incantation","meditation","spell","prayer","invocation","channeling","concoction","ritual","song","divination"]
# Choose one effect to do to the target; greater magnitudes are harder/less likely to be cast and generated
effects = {"curse":-1,"bless":1,"destroy":-2.5,"create":2,"transmute":-0.25,"transport":-0.25,"damage":-0.75,"heal":0.75,"resurrect":3.5}
# Choose a target. Greater magnitudes are harder/less likely to be cast and generated
targets = {"item":2,"person":3,"group":4,"bloodline":5,"location":6,"city":8,"region":12,"nation":21}
# These combinations of effects and targets won't be allowed.
impossibleSpells = ["create region","create location",
"transport nation","transport location","transport region",
"transmute nation","transmute region","transmute location",
"transmute city"]
self.kind = random.choice(kinds)
self.effect = random.choice(list(effects.keys()))
self.target = random.choice(list(targets.keys()))
while self.effect + " " + self.target in impossibleSpells or abs(effects[self.effect]*targets[self.target]) > random.uniform(0,120):
self.effect = random.choice(list(effects.keys()))
self.target = random.choice(list(targets.keys()))
self.strength = random.random()
self.creator = c
self.creator.magic.append(self)
self.culture = self.creator.culture
suffixes = {"curse":["doom","curse","hate","hex","spite"],
"bless":["blessing","sanctity","beatitude","consecration","purification"],
"destroy":["death","fire","inferno","mortality","horror","finality","nightmare","doom","destruction"],
"create":["genesis","forge","creation","primality","molding","conjuration"],
"transmute":["transmutation","transformation","alchemy","recreation"],
"transport":["teleportation","translocation","transportation","flicker"],
"damage":["fire","vitriol","brimstone","meteor","pain","blood"],
"heal":["blessing","purification","mending","healing","touch","light"],
"resurrect":["necromancy","revival","resurrection","unearthing","resuscitation","light"]}
prefixes = {"curse":["doom","curse","hate","hex","spite"],
"bless":["holy","sacrosanct","consecrating","purifying"],
"destroy":["deadly","death","inferno","fire","skull","mortal","horror","final","nightmare","doom","destruction"],
"create":["genesis","primal","conjure"],
"transmute":[],
"transport":[],
"damage":["fire","vitriol","brimstone","meteor","pain","blood"],
"heal":["blessed","purification","mending","healing","light"],
"resurrect":["necromantic","soul","light","blessed","corpse","grave"]}
s = self.culture.language.genName()
roll = random.random()
if roll < 0.25:
s = s + " " + self.culture.language.genName()
elif roll < 0.5 and len(suffixes[self.effect]) > 0:
s = s + " " + random.choice(suffixes[self.effect])
elif roll < 0.75 and len(prefixes[self.effect]) > 0:
s = random.choice(prefixes[self.effect]) + " " + s
else:
s = s
self.name = string.capwords(s)
self.culture.magic.append(self)
def justName(self):
return self.name
def nameFull(self):
s = self.kind
s += " " + self.name
if self.creator != None:
s += " by the " + self.creator.nameFull()
return s
def description(self):
vowels = ["a","e","i","o","u"]
s = self.name + " is a "
if self.strength < 0.333:
s += "weak"
elif self.strength < 0.666:
s += "strong"
else:
s += "powerful"
s += " magic "
s += self.kind
s += " to "
s += self.effect
s += " a"
s = s + "n " + self.target if self.target[0].lower() in vowels else s + " " + self.target
s += ".\n"
s += "It was created by the " + self.creator.nameFull() + "."
return s |
import time
import attr
import pytest
from saltfactories.daemons.container import Container
from saltfactories.utils import random_string
from saltfactories.utils.ports import get_unused_localhost_port
docker = pytest.importorskip("docker")
@attr.s(kw_only=True, slots=True)
class MySQLImage:
name = attr.ib()
tag = attr.ib()
container_id = attr.ib()
def __str__(self):
return "{}:{}".format(self.name, self.tag)
@attr.s(kw_only=True, slots=True)
class MySQLCombo:
mysql_name = attr.ib()
mysql_version = attr.ib()
mysql_port = attr.ib()
mysql_user = attr.ib()
mysql_passwd = attr.ib()
@mysql_port.default
def _mysql_port(self):
return get_unused_localhost_port()
def get_test_versions():
test_versions = []
name = "mysql/mysql-server"
for version in ("5.5", "5.6", "5.7", "8.0"):
test_versions.append(
MySQLImage(
name=name,
tag=version,
container_id=random_string("mysql-{}-".format(version)),
)
)
name = "mariadb"
for version in ("10.1", "10.2", "10.3", "10.4", "10.5"):
test_versions.append(
MySQLImage(
name=name,
tag=version,
container_id=random_string("mariadb-{}-".format(version)),
)
)
name = "percona"
for version in ("5.5", "5.6", "5.7", "8.0"):
test_versions.append(
MySQLImage(
name=name,
tag=version,
container_id=random_string("percona-{}-".format(version)),
)
)
return test_versions
@pytest.fixture(scope="module")
def docker_client():
try:
client = docker.from_env()
except docker.errors.DockerException:
pytest.skip("Failed to get a connection to docker running on the system")
connectable = Container.client_connectable(client)
if connectable is not True: # pragma: nocover
pytest.skip(connectable)
return client
def get_test_version_id(value):
return "container={}".format(value)
@pytest.fixture(scope="module", params=get_test_versions(), ids=get_test_version_id)
def mysql_image(request, docker_client):
image = request.param
try:
docker_client.images.pull(image.name, tag=image.tag)
except docker.errors.APIError as exc:
pytest.skip(
"Failed to pull docker image '{}:{}': {}".format(image.name, image.tag, exc)
)
return image
@pytest.fixture(scope="module")
def mysql_container(salt_factories, docker_client, salt_call_cli, mysql_image):
mysql_user = "root"
mysql_passwd = "password"
combo = MySQLCombo(
mysql_name=mysql_image.name,
mysql_version=mysql_image.tag,
mysql_user=mysql_user,
mysql_passwd=<PASSWORD>,
)
container = salt_factories.get_container(
mysql_image.container_id,
"{}:{}".format(combo.mysql_name, combo.mysql_version),
docker_client=docker_client,
check_ports=[combo.mysql_port],
container_run_kwargs={
"ports": {"3306/tcp": combo.mysql_port},
"environment": {
"MYSQL_ROOT_PASSWORD": <PASSWORD>,
"MYSQL_ROOT_HOST": "%",
},
},
)
with container.started():
authenticated = False
login_attempts = 6
while login_attempts:
login_attempts -= 1
# Make sure "MYSQL" is ready
ret = salt_call_cli.run(
"docker.run",
name=mysql_image.container_id,
cmd="mysql --user=root --password=password -e 'SELECT 1'",
)
authenticated = ret.exitcode == 0
if authenticated:
break
time.sleep(2)
if authenticated:
yield combo
else:
pytest.fail(
"Failed to login into mysql server running in container(id: {})".format(
mysql_image.container_id
)
)
|
<reponame>sjklipp/automol
""" vector functions
"""
import numbers
import numpy
import transformations as tf
def unit_norm(xyz):
""" Normalize a vector (xyz) to 1.0.
:param xyz: vector
:type xyz: tuple, list, or numpy nd.array
:rtype: float
"""
norm = numpy.linalg.norm(xyz)
uxyz = numpy.divide(xyz, norm)
assert numpy.allclose(numpy.linalg.norm(uxyz), 1.0)
return uxyz
def unit_direction(xyz1, xyz2):
""" Calculate a unit direction vector from `xyz1` to `xyz2`.
:param xyz1: 3D vector
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector
:type xyz2: tuple, list, or numpy nd.array
:rtype: float
"""
dxyz12 = numpy.subtract(xyz2, xyz1)
uxyz12 = unit_norm(dxyz12)
return uxyz12
def are_parallel(xyz1, xyz2, orig_xyz=(0., 0., 0.), tol=1e-7):
""" Assess if two vectors are parallel to each other.
:param xyz1: 3D vector
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector
:type xyz2: tuple, list, or numpy nd.array
:param orig_xyz: origin of coordinate system `xyz1` and `xyz2` are in
:type orig_xyz: tuple, list, or numpy nd.array
:param tol: tolerance for checking determinant
:type tol: float
:rtype: bool
"""
det = numpy.linalg.det([list(orig_xyz), list(xyz1), list(xyz2)])
return det > tol
def orthogonalize(xyz1, xyz2, normalize=False):
""" orthogonalize `xyz2` against `xyz1`
"""
overlap = numpy.dot(xyz1, xyz2)
norm = numpy.dot(xyz1, xyz1)
oxyz2 = numpy.subtract(xyz2, numpy.multiply(overlap/norm, xyz1))
if normalize:
oxyz2 = unit_norm(oxyz2)
return oxyz2
def arbitrary_unit_perpendicular(xyz, orig_xyz=(0., 0., 0.)):
""" determine an arbitrary perpendicular vector
"""
for xyz2 in ([1., 0., 0.], [0., 1., 0.], [0., 0., 1]):
uxyz = unit_perpendicular(xyz, xyz2, orig_xyz=orig_xyz)
if numpy.linalg.norm(uxyz) > 1e-7:
break
return uxyz
def unit_perpendicular(xyz1, xyz2, orig_xyz=(0., 0., 0.), allow_parallel=True):
""" Calculate a unit perpendicular on `xyz1` and `xyz2`.
:param xyz1: 3D vector
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector
:type xyz2: tuple, list, or numpy nd.array
:param orig_xyz: origin of coordinate system `xyz1` and `xyz2` are in
:type orig_xyz: tuple, list, or numpy nd.array
:param allow_parallel: parameter to allow if vector can be parallel
:type allow_parallel: bool
:rtype: numpy.ndarray
"""
xyz1 = numpy.subtract(xyz1, orig_xyz)
xyz2 = numpy.subtract(xyz2, orig_xyz)
xyz3 = numpy.cross(xyz1, xyz2)
if numpy.linalg.norm(xyz3) > 1e-7:
uxyz3 = unit_norm(xyz3)
elif allow_parallel:
uxyz3 = numpy.zeros((3,))
else:
raise ValueError
return uxyz3
def unit_bisector(xyz1, xyz2, orig_xyz):
""" Calculate a unit bisector.
:param xyz1: 3D vector
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector
:type xyz2: tuple, list, or numpy nd.array
:param orig_xyz: origin of coordinate system `xyz1` and `xyz2` are in
:type orig_xyz: tuple, list, or numpy nd.array
:rtype: numpy.ndarray
"""
ang = central_angle(xyz1, orig_xyz, xyz2)
rot_ = rotater(
axis=unit_perpendicular(xyz1, xyz2, orig_xyz), angle=ang/2.0,
orig_xyz=orig_xyz)
xyz = unit_norm(numpy.subtract(rot_(xyz1), orig_xyz))
return xyz
def from_internals(dist=0., xyz1=(0., 0., 0.), ang=0., xyz2=(0., 0., 1.),
dih=0., xyz3=(0., 1., 0.)):
""" Determine the position of a point (xyz4) in Cartesian coordinates whose
position is related to three other points (xyz1, xyz2, xyz3) via a set
of internal coordinates.
:param dist: distance between `xyz1` and `xyz2` (in bohr)
:type dist: float
:param xyz1: 3D vector to point 1
:type xyz1: tuple, list, or numpy nd.array
:param angle: angle between `xyz1`, `xyz2`, `xyz3` (in radians)
:type angle: float
:param xyz2: 3D vector to point 2
:type xyz2: tuple, list, or numpy nd.array
:param dih: dihedral from `xyz1`, `xyz2`, `xyz3` to `xyz4` (in radians)
:type dih: float
:param xyz3: 3D vector to point 2
:type xyz3: tuple, list, or numpy nd.array
:rtyp: tuple(float)
"""
local_xyz = _local_position(dist=dist, ang=ang, dih=dih)
local_basis = _local_axes(xyz1=xyz1, xyz2=xyz2, xyz3=xyz3)
xyz4 = tuple(xyz1 + numpy.dot(local_xyz, local_basis))
return xyz4
def _local_position(dist=0., ang=0., dih=0.):
""" Determine the xyz coordinates of a point in the local axis frame
defined by a set of internal coordinates.
:param dist: distance between `xyz1` and `xyz2` (in bohr)
:type dist: float
:param angle: angle between `xyz1`, `xyz2`, `xyz3` (in radians)
:type angle: float
:param dih: dihedral from `xyz1`, `xyz2`, `xyz3` to `xyz4` (in radians)
:type dih: float
:rtyp: tuple(float)
"""
x_comp = dist * numpy.sin(ang) * numpy.sin(dih)
y_comp = dist * numpy.sin(ang) * numpy.cos(dih)
z_comp = dist * numpy.cos(ang)
return (x_comp, y_comp, z_comp)
def _local_axes(xyz1=(0., 0., 0.), xyz2=(0., 0., 1.), xyz3=(0., 1., 0.)):
""" Determine the local axes for defining bond, angle, dihedral from
the Cartesian coordinates of three support atoms.
:param xyz1: 3D vector to point 1
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector to point 2
:type xyz2: tuple, list, or numpy nd.array
:param xyz3: 3D vector to point 2
:type xyz3: tuple, list, or numpy nd.array
:rtyp: tuple(float)
"""
uxyz12 = unit_direction(xyz1, xyz2)
uxyz23 = unit_direction(xyz2, xyz3)
uxyz123_perp = unit_perpendicular(uxyz23, uxyz12)
z_ax = tuple(uxyz12)
y_ax = tuple(unit_perpendicular(uxyz12, uxyz123_perp))
x_ax = tuple(unit_perpendicular(y_ax, z_ax))
return (x_ax, y_ax, z_ax)
def distance(xyz1, xyz2):
""" Measure the distance between points.
:param xyz1: 3D vector to point 1
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector to point 2
:type xyz2: tuple, list, or numpy nd.array
:rtype: float
"""
dist = numpy.linalg.norm(numpy.subtract(xyz1, xyz2))
return dist
def central_angle(xyz1, xyz2, xyz3):
""" Measure the angle inscribed by three atoms.
:param xyz1: 3D vector to point 1
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector to point 2
:type xyz2: tuple, list, or numpy nd.array
:param xyz3: 3D vector to point 3
:type xyz3: tuple, list, or numpy nd.array
:rtype: float
"""
uxyz21 = unit_direction(xyz2, xyz1)
uxyz23 = unit_direction(xyz2, xyz3)
cos = numpy.dot(uxyz21, uxyz23)
if cos < -1.0:
assert numpy.allclose(cos, -1.0)
cos = -1.0
elif cos > 1.0:
assert numpy.allclose(cos, 1.0)
cos = 1.0
ang = numpy.arccos(cos)
return ang
def projected_central_angle(xyz1, xyz2, xyz3):
""" Measure the angle inscribed by three atoms,
projected onto the normal plane of the central atom.
:param xyz1: 3D vector to point 1
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector to point 2
:type xyz2: tuple, list, or numpy nd.array
:param xyz3: 3D vector to point 3
:type xyz3: tuple, list, or numpy nd.array
:rtype: float
"""
uxyz21 = unit_perpendicular(xyz2, xyz1)
uxyz23 = unit_perpendicular(xyz2, xyz3)
cos = numpy.dot(uxyz21, uxyz23)
if cos < -1.0:
assert numpy.allclose(cos, -1.0)
cos = -1.0
elif cos > 1.0:
assert numpy.allclose(cos, 1.0)
cos = 1.0
ang = numpy.arccos(cos)
return ang
def dihedral_angle(xyz1, xyz2, xyz3, xyz4):
""" Measure the dihedral angle defined by four atoms.
:param xyz1: 3D vector to point 1
:type xyz1: tuple, list, or numpy nd.array
:param xyz2: 3D vector to point 2
:type xyz2: tuple, list, or numpy nd.array
:param xyz3: 3D vector to point 3
:type xyz3: tuple, list, or numpy nd.array
:param xyz4: 3D vector to point 4
:type xyz4: tuple, list, or numpy nd.array
:rtype: float
"""
# Get the cosine of the angle
uxyz21 = unit_direction(xyz2, xyz1)
uxyz23 = unit_direction(xyz2, xyz3)
uxyz32 = unit_direction(xyz3, xyz2)
uxyz34 = unit_direction(xyz3, xyz4)
uxyz123_perp = unit_perpendicular(uxyz21, uxyz23)
uxyz234_perp = unit_perpendicular(uxyz32, uxyz34)
cos = numpy.dot(uxyz123_perp, uxyz234_perp)
# Get the sign of the angle
val = numpy.dot(uxyz123_perp, uxyz34)
val = max(min(val, 1.), -1.)
sign = 2 * (val < 0) - 1
# Before plugging it into the arccos function, make sure we haven't
# Slightly run out of bounds
if cos < -1.0:
assert numpy.allclose(cos, -1.0)
cos = -1.0
elif cos > 1.0:
assert numpy.allclose(cos, 1.0)
cos = 1.0
dih = sign * numpy.arccos(cos)
dih = numpy.mod(dih, 2*numpy.pi)
return dih
# transformations
def rotater(axis, angle, orig_xyz=None):
""" A function to rotate vectors about an axis at a particular point.
:param axis: axis to rotate about
:type axis: list, tuple, or nd.array
:param angle: angle by which to rotate the vectors
:type angle: float
:param orig_xyz: origin of system
:type: list, tuple, or nd.array
:rtype: tuple(tuple(float))
"""
aug_rot_mat = tf.rotation_matrix(angle, axis, point=orig_xyz)
return _transformer(aug_rot_mat)
def aligner(xyz1a, xyz1b, xyz2a, xyz2b):
""" a function to translate and rotate a system, bringing two points into
alignment
Takes 1a-1b into 2a-2b if they are equidistant; otherwise, takes 1a into 2a
and takes 1b onto the 2a-2b line
"""
xyz1a, xyz1b, xyz2a, xyz2b = map(numpy.array, (xyz1a, xyz1b, xyz2a, xyz2b))
trans = xyz2a - xyz1a
trans_mat = tf.translation_matrix(trans)
xyz1b = xyz1b + trans
rot_vec = unit_perpendicular(xyz1b, xyz2b, orig_xyz=xyz2a)
rot_ang = central_angle(xyz1b, xyz2a, xyz2b)
rot_mat = tf.rotation_matrix(rot_ang, rot_vec, point=xyz2a)
mat = numpy.dot(rot_mat, trans_mat)
return _transformer(mat)
def _transformer(aug_mat):
def _transform(xyz):
aug_xyz = _augmented(xyz)
rot_aug_xyz = numpy.dot(aug_mat, aug_xyz)
rot_xyz = tuple(rot_aug_xyz[:3])
return rot_xyz
return _transform
def _augmented(xyz):
assert len(xyz) == 3
assert all(isinstance(val, numbers.Real) for val in xyz)
xyz_aug = tuple(xyz) + (1.,)
return xyz_aug
# I/O
def string(vec, num_per_row=None, val_format='{0:>8.3f}'):
""" Write a vector to a string.
:param vec: vector to form string with
:type vec: list, tuple, or nd.array
:param num_per_row: number of vector elements to write to a row
:type num_per_row: int
:rtype: str
"""
if num_per_row is None:
num_per_row = len(vec)
assert isinstance(num_per_row, int), 'num_per_row must be an integer'
nvals = len(vec)
vec_str = ''
for i, val in enumerate(vec):
vec_str += val_format.format(val)
if ((i+1) % num_per_row) == 0 and (i+1) != nvals:
vec_str += '\n'
return vec_str
|
import pymongo
import requests
import json
'''
This module polls Mate3 solar controller devices
'''
def targetVoltage(
batt: float
) -> tuple[float, float]:
'''
Caclulates target voltage, accounts for night mode
'''
if batt <= 18:
target = 12.65
low = 11.89
elif 18 < batt <= 30:
target = 25.3
low = 23.78
elif batt >= 30:
target = 50.6
low = 47.56
if batt > target:
target = batt
return(target, low)
def calcSOC(
batt_volts: float,
target: float,
low: float
) -> float:
'''
Calculates the state of charge % (decimal)
'''
batt_volts = float(batt_volts)
_range = target - low
charge = batt_volts - low
soc = (charge / _range) * 100
soc = int(soc)
return(soc)
def percentage(
total: float,
value: float
) -> float:
'''
Calculates percentage
'''
total = float(total)
value = float(value)
if value > total:
percentage = 100
else:
percentage = (value / total) * 100
percentage = int(percentage)
return(percentage)
def getColor(
soc: float
) -> str:
''' Turns percentage into color
'''
try:
if soc <= 50:
color = "danger"
elif 50 < soc <= 90:
color = "warning"
elif 90 < soc:
color = "success"
else:
color = "info"
except ZeroDivisionError:
color = "info"
return(color)
def getData(
ip: str
) -> dict:
'''
Performs http get request and returns dictionary of values
'''
data = {}
try:
# Battery status
response = requests.get(f"http://{ip}/Dev_batt.cgi", timeout=2)
response = json.loads(response.text)
data['sys_battery'] = response['sys_battery']
# Status
response = requests.get(f"http://{ip}/Dev_status.cgi?&Port=0", timeout=2)
response = json.loads(response.text)
data['devstatus'] = response['devstatus']
data['online'] = True
except requests.exceptions.ConnectTimeout:
data['online'] = False
except json.decoder.JSONDecodeError:
data['online'] = False
except requests.exceptions.ConnectionError:
data['online'] = False
return(data)
def formatData(
controller: dict,
data: dict
) -> list:
'''
Combines and formats the data prior to updating the database
'''
strings = []
if data['online']:
for port in data['devstatus']['ports']:
if port['Dev'] == "CC":
target, low = targetVoltage(port['Batt_V'])
soc = calcSOC(port['Batt_V'], target, low)
solar_percentage = percentage(100, port['In_V'])
string = {
'ip' : controller['ip'],
'name' : f"{controller['name']} - String {port['Port']}",
'location' : controller['location'],
'controller_oid' : controller["_id"],
'string' : port['Port'],
'live' : {
'online' : data['online'],
'charge_state' : port['CC_mode'],
'batt' : {
'volts' : port['Batt_V'],
'current' : port['Out_I'],
'target' : target,
'min' : low,
'soc' : soc,
'color' : getColor(soc)
},
'solar' : {
'volts' : round(port['In_V']),
'current' : port['In_I'],
'max' : 100,
'percentage' : solar_percentage,
'color' : getColor(solar_percentage)
}
}
}
strings.append(string)
else:
string = {
'ip' : controller['ip'],
'name' : f"{controller['name']}",
'location' : controller['location'],
'controller_oid' : controller["_id"],
'string' : "-",
'live' : {
'online' : data['online'],
'charge_state' : "OFFLINE",
'batt' : {
'volts' : "",
'current' : "",
'target' : "",
'min' : "",
'soc' : "",
'color' : ""
},
'solar' : {
'volts' : "",
'current' : "",
'max' : "",
'percentage' : "",
'color' : ""
}
}
}
strings.append(string)
return(strings)
def updateDB(
strings: list,
db: pymongo.MongoClient
) -> None:
''' Inserts data in to DB
'''
for string in strings:
if string['live']['online']:
db['solar_data'].find_one_and_update(
{
'controller_oid' : string['controller_oid'],
'string' : string['string']
},
{
'$set' : string
},
upsert=True
)
else:
db['solar_data'].delete_many(
{
'controller_oid' : string['controller_oid'],
}
)
db['solar_data'].insert_one(
string
)
def parse(
controller: dict,
db: pymongo.MongoClient
) -> None:
'''
Main function to call
'''
data = getData(controller['ip'])
data = formatData(controller, data)
updateDB(data, db)
if __name__ == '__main__':
parse({'ip': '172.19.246.194'}) |
# pyOCD debugger
# Copyright (c) 2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from time import sleep
from ...flash.flash import Flash
from ...core.coresight_target import CoreSightTarget
from ...coresight import (ap, dap)
from ...core.memory_map import (RomRegion, FlashRegion, RamRegion, MemoryMap)
from ...core.target import Target
from ...coresight.cortex_m import CortexM
from ...debug.svd.loader import SVDFile
from ...core import exceptions
from ...utility.timeout import Timeout
LOG = logging.getLogger(__name__)
flash_algo = {
'load_address': 0x00100000,
# Flash algorithm as a hex string
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x280a49fa, 0x698ad10a, 0x7f80f012, 0x698ad1fb, 0x0f7ff412, 0x220dd1fb, 0x2020f881, 0xf012698a,
0xd1fb7f80, 0xf412698a, 0xd1fb0f7f, 0x0020f881, 0xb1284770, 0x70116802, 0x1c496801, 0x47706001,
0x48eab2ca, 0xd10a2a0a, 0xf0116981, 0xd1fb7f80, 0xf4116981, 0xd1fb0f7f, 0xf880210d, 0x69811020,
0x7f80f011, 0x6981d1fb, 0x0f7ff411, 0xf880d1fb, 0x47702020, 0x41f0e92d, 0x460e1e14, 0xf04f4680,
0xf04f0500, 0xdd100720, 0x21007832, 0xb1224630, 0x2f01f810, 0x2a001c49, 0x42a1d1fa, 0x2400bfac,
0xf0131a64, 0xbf180f02, 0xf0132730, 0xd1090f01, 0xdd072c00, 0x46404639, 0xffbbf7ff, 0x1c6d1e64,
0xdcf72c00, 0xb1407830, 0x4640b2c1, 0xffb1f7ff, 0x0f01f816, 0x28001c6d, 0x2c00d1f6, 0x4639dd07,
0xf7ff4640, 0x1e64ffa6, 0x2c001c6d, 0x4628dcf7, 0x81f0e8bd, 0x45f0e92d, 0x469cb083, 0xe9dd4680,
0x2000730b, 0x46059e0a, 0xb1294682, 0x0f00f1bc, 0x2a0ad016, 0xe013d00e, 0xf88d2030, 0xf88d0000,
0x463ba001, 0x46694632, 0xf7ff4640, 0xb003ffa3, 0x85f0e8bd, 0x0c00f1b1, 0x2001bfbc, 0x0100f1cc,
0x040bf10d, 0xa00bf88d, 0xfbb1b189, 0xfb02fcf2, 0xf1bc1c1c, 0xbfa40f0a, 0xf1ac449c, 0xf10c0c3a,
0xfbb10c30, 0xf804f1f2, 0x2900cd01, 0xb178d1ed, 0xbf182e00, 0x0f02f017, 0xf04fd007, 0x4640012d,
0xff57f7ff, 0x1e761c6d, 0x202de002, 0x0d01f804, 0x4632463b, 0x46404621, 0xff6cf7ff, 0x4428b003,
0x85f0e8bd, 0xe92db40f, 0xb0844df0, 0x9c0c2700, 0xad0d463e, 0x28007820, 0xf04fd075, 0xf04f0b41,
0x46ba0861, 0x2825b2c0, 0x2200d17b, 0x0f01f814, 0x28004613, 0x2825d07f, 0x282dd073, 0x2301bf04,
0x78201c64, 0xd1052830, 0x0f01f814, 0x0302f043, 0xd0f92830, 0x3830b2c0, 0xd80a2809, 0x0082eb02,
0xf8140040, 0x38301b01, 0x7820180a, 0x28093830, 0x7820d9f4, 0xd00a2873, 0xd0112864, 0xd01c2878,
0xd0272858, 0xd0322875, 0xd03f2863, 0xf855e04e, 0x29001b04, 0xa16ebf08, 0xf7ff4638, 0xe024ff1b,
0x2300e9cd, 0x8008f8cd, 0x1b04f855, 0x220a2301, 0xf7ff4638, 0x4406ff4f, 0xe9cde038, 0xf8cd2300,
0xf8558008, 0x23001b04, 0x46382210, 0xff42f7ff, 0xe02b4406, 0x2300e9cd, 0xb008f8cd, 0x1b04f855,
0x22102300, 0xf7ff4638, 0x4406ff35, 0xe9cde01e, 0xf8cd2300, 0xf8558008, 0x23001b04, 0x4638220a,
0xff28f7ff, 0xe01be7f1, 0xe014e00b, 0x0b04f815, 0x000cf88d, 0xa00df88d, 0x4638a903, 0xfedaf7ff,
0xb2c1e7e3, 0xf7ff4638, 0x1c76feb4, 0x0f01f814, 0xf47f2800, 0x2f00af77, 0x6838bf1c, 0xa000f880,
0xb0044630, 0x0df0e8bd, 0xfb14f85d, 0xea236803, 0x43110101, 0x47706001, 0x42812100, 0x1c49bfb8,
0x4770dbfb, 0xf44f493c, 0xf84140c6, 0x60480f70, 0x60c86088, 0x61486108, 0x21014838, 0x21006741,
0x49376781, 0x21646041, 0x1c402000, 0xdbfc4288, 0xf04f4770, 0x48334202, 0xf8c24934, 0x48320100,
0x49336008, 0x60081200, 0x12001d09, 0x49316008, 0x1d096008, 0x3001f04f, 0x210a6008, 0x1c402000,
0xdbfc4288, 0x2136482c, 0x21106241, 0x21706281, 0xf24062c1, 0x63013101, 0x60012155, 0x210a4827,
0x0100f8c2, 0x1c402000, 0xdbfc4288, 0xf3bf4770, 0xf3bf8f6f, 0x49228f4f, 0x60082000, 0x8f6ff3bf,
0x8f4ff3bf, 0x481f4920, 0x47706008, 0x8f6ff3bf, 0x8f4ff3bf, 0x21e0f04f, 0x61082000, 0x8f6ff3bf,
0x8f4ff3bf, 0xf3bf4770, 0xf3bf8f6f, 0x20008f4f, 0xf1004601, 0x1d0022e0, 0x1100f8c2, 0xdbf82820,
0x8f6ff3bf, 0x8f4ff3bf, 0x00004770, 0x83015000, 0x6c756e28, 0x0000296c, 0x82021000, 0x85020000,
0x8660061a, 0xc1900d01, 0x01000001, 0x82000a04, 0x83000a00, 0x85000a00, 0x85024000, 0xc1900d11,
0xe000ef50, 0x00040200, 0xe000ed14, 0x20e0f04f, 0xf8402100, 0x4ab31f10, 0x7100f04f, 0x17516011,
0x1170f8c0, 0x1174f8c0, 0x1270f8c0, 0x1274f8c0, 0x4bad4770, 0xf0406818, 0x60180002, 0x680849ab,
0x0001f040, 0x49aa6008, 0xf0406808, 0x60080001, 0xf8d149a8, 0xb1780118, 0xf8c12200, 0x200f2100,
0x0104f8c1, 0x0108f8d1, 0xd1fb280f, 0x2104f8c1, 0x0108f8d1, 0xd1fb2800, 0x20002264, 0x42901c40,
0xf8d1dbfc, 0xf0200110, 0xf8c10003, 0xf8d10110, 0xf0200118, 0xf8c10003, 0xf8d10118, 0xf0200114,
0xf8c10003, 0x68180114, 0x0002f020, 0x47706018, 0x21004891, 0x68016201, 0x0104f021, 0x47706001,
0x45f0e92d, 0xf04f488c, 0xf8c00c00, 0x6801c020, 0x0104f021, 0xf7ff6001, 0xf3bfff1c, 0xf3bf8f6f,
0x48868f4f, 0xc000f8c0, 0x8f6ff3bf, 0x8f4ff3bf, 0x48834984, 0xf3bf6008, 0xf3bf8f6f, 0xf04f8f4f,
0xf3bf20e0, 0xf3bf8f6f, 0xf8c08f4f, 0x4a75c010, 0x7100f04f, 0x17516011, 0x1180f8c0, 0x1184f8c0,
0x1280f8c0, 0x1284f8c0, 0x8f6ff3bf, 0x8f4ff3bf, 0xf1002000, 0x1d0021e0, 0xc100f8c1, 0xdbf82820,
0x8f6ff3bf, 0x8f4ff3bf, 0xff73f7ff, 0x81bcf8df, 0x486d2201, 0xf8c84b6e, 0xf2472038, 0xf8c03101,
0xf8c3c0c4, 0xf8c31130, 0xe9c01134, 0x26032c02, 0xe9c02406, 0xe9c04c04, 0xf243c206, 0xf04f3785,
0xe9c04502, 0xf8d56700, 0xf0155100, 0xf04f4f00, 0xbf190504, 0x6aa1f44f, 0xa20ae9c0, 0x0aa8f04f,
0xa50ae9c0, 0x0a07f04f, 0xc038f8c0, 0x20c4f8c0, 0x2038f8c8, 0xc0c4f8c0, 0x1130f8c3, 0x1134f8c3,
0x60476006, 0x2121f240, 0xf8c06081, 0x6104c00c, 0xc014f8c0, 0xc018f8c0, 0x216a61c2, 0x62c46281,
0x63456305, 0xc03cf8c0, 0xa008f8c0, 0x20002164, 0x42881c40, 0xf44fdbfc, 0x671840c6, 0x67986758,
0xf8c367d8, 0xf8c30080, 0x48420084, 0x60414942, 0x2c1de9c0, 0x20002164, 0x42881c40, 0x2000dbfc,
0x85f0e8bd, 0xb510493b, 0x6048483c, 0xf7ffa03c, 0x2000fda9, 0xb510bd10, 0xf7ffa03b, 0x4b3dfda3,
0xf44f4934, 0x20001480, 0x0cfff04f, 0x20dcf891, 0x0f01f012, 0xeb03d1fa, 0xf1a23200, 0x610a4280,
0xc05ef881, 0x20dcf891, 0x0f01f012, 0x1c40d1fa, 0x3f14ebb0, 0xa030d3ea, 0xfd84f7ff, 0xbd102000,
0x4c24b570, 0x68624605, 0xa02d4601, 0xfd7af7ff, 0x20dcf894, 0x0f01f012, 0xf1a5d1fa, 0x61204080,
0xf88420ff, 0xf894005e, 0xf01000dc, 0xd1fa0001, 0xb570bd70, 0x44a0f100, 0x46154816, 0x6843460e,
0x4621460a, 0xf7ffa028, 0x1cf0fd5d, 0x0003f030, 0xf855d005, 0xf8441b04, 0x1f001b04, 0x2000d1f9,
0x0000bd70, 0xe000ed04, 0x82020460, 0x82020500, 0x82020520, 0x82020000, 0x83011000, 0xe000ef50,
0x00040200, 0xe000ed14, 0x83015000, 0x85041000, 0x82021000, 0x85020000, 0x8660061a, 0x0660860a,
0x6e696e55, 0x000a7469, 0x53415245, 0x48432045, 0x000a5049, 0x406f4000, 0x454e4f44, 0x0000000a,
0x20337773, 0x53415245, 0x45532045, 0x524f5443, 0x7830202c, 0x202c7825, 0x73616c66, 0x78305b68,
0x0a5d7825, 0x00000000, 0x676f7250, 0x206d6172, 0x3d726461, 0x78257830, 0x7a73202c, 0x2578303d,
0x66202c78, 0x6873616c, 0x2578305b, 0x000a5d78, 0x00000000
],
# Relative function addresses
'pc_init': 0x00100501,
'pc_unInit': 0x00100665,
'pc_program_page': 0x001006f3,
'pc_erase_sector': 0x001006c1,
'pc_eraseAll': 0x00100677,
'static_base': 0x001007d0,
'begin_stack': 0x00100a00,
'begin_data': 0x00100000 + 0x1000,
'page_size': 0x400,
'analyzer_supported': False,
'analyzer_address': 0x00000000,
# 'page_buffers': [0x00101000, 0x00101400], # Enable double buffering
'min_program_length': 0x400
}
class Flash_s5js100(Flash):
def __init__(self, target, flash_algo):
super(Flash_s5js100, self).__init__(target, flash_algo)
self._did_prepare_target = False
# LOG.info("S5JS100.Flash_s5js100.__init__ c")
def init(self, operation, address=None, clock=0, reset=True):
# LOG.info("S5JS100.Flash_s5js100.init c")
if self._active_operation != operation and self._active_operation is not None:
self.uninit()
super(Flash_s5js100, self).init(operation, address, clock, reset)
def uninit(self):
# LOG.info("S5JS100.Flash_s5js100.uninit c")
if self._active_operation is None:
return
super(Flash_s5js100, self).uninit()
ERASE_ALL_WEIGHT = 140 # Time it takes to perform a chip erase
ERASE_SECTOR_WEIGHT = 1 # Time it takes to erase a page
# Time it takes to program a page (Not including data transfer time)
PROGRAM_PAGE_WEIGHT = 1
class S5JS100(CoreSightTarget):
VENDOR = "Samsung"
AP_NUM = 0
ROM_ADDR = 0xE00FE000
memoryMap = MemoryMap(
FlashRegion(start=0x406f4000, length=0x00100000,
page_size=0x400, blocksize=0x1000,
is_boot_memory=True,
erased_byte_value=0xFF,
algo=flash_algo,
erase_all_weight=ERASE_ALL_WEIGHT,
erase_sector_weight=ERASE_SECTOR_WEIGHT,
program_page_weight=PROGRAM_PAGE_WEIGHT,
flash_class=Flash_s5js100),
RamRegion(start=0x00100000, length=0x80000)
)
def __init__(self, link):
super(S5JS100, self).__init__(link, self.memoryMap)
self.AP_NUM = 0
def create_init_sequence(self):
seq = super(S5JS100, self).create_init_sequence()
seq.wrap_task(
'discovery', lambda seq: seq.replace_task(
'find_aps', self.find_aps).replace_task(
'create_cores', self.create_s5js100_core).insert_before(
'find_components', ('fixup_ap_base_addrs', self._fixup_ap_base_addrs), ))
return seq
def _fixup_ap_base_addrs(self):
self.dp.aps[self.AP_NUM].rom_addr = self.ROM_ADDR
def find_aps(self):
if self.dp.valid_aps is not None:
return
self.dp.valid_aps = (self.AP_NUM,)
def create_s5js100_core(self):
core = CortexM_S5JS100(
self.session, self.aps[self.AP_NUM], self.memory_map, 0)
core.default_reset_type = self.ResetType.SW
self.aps[self.AP_NUM].core = core
core.init()
self.add_core(core)
class CortexM_S5JS100(CortexM):
def reset(self, reset_type=None):
# Always use software reset for S5JS100 since the hardware version
self.session.notify(Target.Event.PRE_RESET, self)
# LOG.info("s5js100 reset HW")
self.S5JS100_reset_type = reset_type
self.write_memory(0x82020018, 0x1 << 1)
self.write_memory(0x83011000, 0x4 << 0) # enable watchdog
self.write_memory(0x8301100c, 0x1 << 0)
self.write_memory(0x83011010, 0x1 << 0)
self.write_memory(0x83011020, 0x1 << 0)
self.write_memory(0x83011800, 0x1 << 0) # clock gating disable
# set 1s to be reset , 1 sec=32768
self.write_memory(0x83011004, 32768 << 0)
# force to load value to be reset
self.write_memory(0x83011008, 0xFF << 0)
xpsr = self.read_core_register('xpsr')
if xpsr & self.XPSR_THUMB == 0:
self.write_core_register('xpsr', xpsr | self.XPSR_THUMB)
self.write_memory(0x120000, 0xe7fe)
self.write_core_register('pc', 0x120000)
self.flush()
self.resume()
with Timeout(5.0) as t_o:
while t_o.check():
try:
dhcsr_reg = self.read32(CortexM.DHCSR)
LOG.debug("reg = %x", dhcsr_reg)
if (dhcsr_reg & CortexM.S_RESET_ST) != 0:
break
sleep(0.1)
except exceptions.TransferError:
self.flush()
self._ap.dp.init()
self._ap.dp.power_up_debug()
sleep(0.01)
else:
raise exceptions.TimeoutError("Timeout waiting for reset")
self.session.notify(Target.Event.POST_RESET, self)
def reset_and_halt(self, reset_type=None):
# LOG.info("reset_and_halt")
reset_catch_saved_demcr = self.read_memory(CortexM.DEMCR)
if (reset_catch_saved_demcr & CortexM.DEMCR_VC_CORERESET) == 0:
self.write_memory(
CortexM.DEMCR,
reset_catch_saved_demcr | CortexM.DEMCR_VC_CORERESET)
self.reset(reset_type)
sleep(0.1)
self.halt()
self.wait_halted()
self.write_memory(CortexM.DEMCR, reset_catch_saved_demcr)
def wait_halted(self):
with Timeout(5.0) as t_o:
while t_o.check():
try:
if not self.is_running():
break
except exceptions.TransferError:
self.flush()
sleep(0.01)
else:
raise exceptions.TimeoutError("Timeout waiting for target halt")
def get_state(self):
# LOG.info("s5js100.get_state")
try:
dhcsr = self.read_memory(CortexM.DHCSR)
# LOG.info("s5js100.get_state dhcsr 0x%x", dhcsr)
except exceptions.TransferError:
# LOG.info("s5js100.get_state read fail dhcsr..try more")
self._ap.dp.init()
self._ap.dp.power_up_debug()
dhcsr = self.read_memory(CortexM.DHCSR)
# LOG.info("fail s5js100.get_state dhcsr 0x%x", dhcsr)
if dhcsr & CortexM.S_RESET_ST:
# Reset is a special case because the bit is sticky and really means
# "core was reset since last read of DHCSR". We have to re-read the
# DHCSR, check if S_RESET_ST is still set and make sure no instructions
# were executed by checking S_RETIRE_ST.
newDhcsr = self.read_memory(CortexM.DHCSR)
if (newDhcsr & CortexM.S_RESET_ST) and not (
newDhcsr & CortexM.S_RETIRE_ST):
return Target.State.RESET
if dhcsr & CortexM.S_LOCKUP:
return Target.State.LOCKUP
elif dhcsr & CortexM.S_SLEEP:
return Target.State.SLEEPING
elif dhcsr & CortexM.S_HALT:
return Target.State.HALTED
else:
return Target.State.RUNNING
def set_breakpoint(self, addr, type=Target.BreakpointType.HW):
# s5js100 don't support Target.BreakpointType.SW
return super(
CortexM_S5JS100,
self).set_breakpoint(
addr,
Target.BreakpointType.HW)
|
from bearlibterminal import terminal
from loguru import logger
from utilities import configUtilities, armourManagement, common, externalfileutilities, input_handlers, itemsHelp, \
jewelleryManagement, spellHelp
from static.data import constants
def display_spell_info_popup(menu_selection, gameworld, player_entity):
logger.info('Items and spells info panel accessed')
game_config = configUtilities.load_config()
armour_map = {"A": "head", "B": "chest", "C": "hands", "D": "legs", "E": "feet"}
jewellery_map = {"F": "lear", "G": "rear", "H": "lhand", "I": "rhand", "J": "neck"}
# unicode strings of colours
unicode_frame_colour = '[font=dungeon][color=SPELLINFO_FRAME_COLOUR]['
key_colour_string = "[color=DISPLAY_ITEM_EQUIPPED]"
value_colour_string = "[/color][color=PLAYER_DEBUG]"
item_entity = 0
spell_entity = 0
armour_selection_keys = constants.KEYS_ARMOUR_KEYS
jewellery_selection_keys = constants.KEYS_JEWELLERY_KEYS
spell_selection_keys = constants.KEYS_SPELL_KEYS
spell_item_info_start_x = configUtilities.get_config_value_as_integer(configfile=game_config,
section='spellInfoPopup',
parameter='SP_START_X')
spell_item_info_start_y = configUtilities.get_config_value_as_integer(configfile=game_config,
section='spellInfoPopup',
parameter='SP_START_Y')
spell_item_info_width = configUtilities.get_config_value_as_integer(configfile=game_config,
section='spellInfoPopup',
parameter='SP_WIDTH')
spell_item_info_depth = configUtilities.get_config_value_as_integer(configfile=game_config,
section='spellInfoPopup',
parameter='SP_DEPTH')
# clear the area under the panel
terminal.clear_area(spell_item_info_start_x, spell_item_info_start_y, spell_item_info_width,
spell_item_info_depth)
# draw outer frame
draw_outer_frame(startx=spell_item_info_start_x, starty=spell_item_info_start_y, width=spell_item_info_width,
frame_colour=unicode_frame_colour, depth=spell_item_info_depth)
# display control message
terminal.printf(x=spell_item_info_start_x + 2, y=(spell_item_info_start_y + spell_item_info_depth) - 2,
s='Press Escape to return')
#
# DISPLAY SPELL INFORMATION
#
if str(menu_selection) in spell_selection_keys:
logger.info('Spell selected')
spell_entity = display_spell_information(gameworld=gameworld, game_config=game_config,
player_entity=player_entity, menu_selection=menu_selection,
key_colour=key_colour_string, value_colour=value_colour_string)
#
# DISPLAY ARMOUR INFORMATION
#
elif menu_selection in armour_selection_keys:
logger.info('Armour selected')
item_entity = display_armour_information(gameworld=gameworld, game_config=game_config,
player_entity=player_entity, bodylocation=armour_map[menu_selection],
key_colour=key_colour_string, value_colour=value_colour_string,
frame_colour=unicode_frame_colour)
#
# DISPLAY JEWELLERY INFORMATION
#
elif menu_selection in jewellery_selection_keys:
item_entity = display_jewellery_information(gameworld=gameworld, game_config=game_config,
player_entity=player_entity,
bodylocation=jewellery_map[menu_selection],
key_colour=key_colour_string, value_colour=value_colour_string,
frame_colour=unicode_frame_colour)
selected_entity = item_entity + spell_entity
if selected_entity > 0:
# blit the terminal
terminal.refresh()
# wait for escape key
player_not_pressed_a_key = True
while player_not_pressed_a_key:
event_to_be_processed, event_action = input_handlers.handle_game_keys()
if event_to_be_processed == 'keypress':
logger.info('event action is {}', event_action)
if event_action == 'quit':
player_not_pressed_a_key = False
def draw_fluff_text(x, y, key_colour_string, value_colour_string, width, fluff_text):
terminal.printf(x=x, y=y,
s=key_colour_string + 'Description...')
terminal.print_(x=x, y=y + 1,
width=width, height=5, align=terminal.TK_ALIGN_LEFT,
s=value_colour_string + fluff_text.capitalize())
def format_cooldown_string(spell_cooldown):
if spell_cooldown > 0:
cooldown_string = 'Yes (' + str(spell_cooldown) + ') turns remaining'
else:
cooldown_string = 'No'
return cooldown_string
def get_resources_as_string(resource_list):
resource_string = ' '
if len(resource_list) > 0:
resource_string = ''
cnt = 0
for _ in resource_list:
resource_string += resource_list[cnt].capitalize() + ' '
cnt += 1
return resource_string
def get_boons_as_string(boon_list):
boon_string = 'Nothing'
if len(boon_list) > 0:
boon_string = ''
cnt = 0
for _ in boon_list:
boon_string += boon_list[cnt].capitalize() + ' '
cnt += 1
return boon_string
def get_condis_as_string(condi_list):
condi_string = 'Nothing'
if len(condi_list) > 0:
condi_string = ''
cnt = 0
for _ in condi_list:
condi_string += condi_list[cnt].capitalize() + ' '
cnt += 1
return condi_string
def draw_horizontal_line_after_portrait(x, y, w, string_colour, horiz_glyph, left_t_glyph, right_t_glyph):
for z in range(x, (x + w)):
terminal.printf(x=z, y=y,
s=string_colour + horiz_glyph + ']')
terminal.printf(x=x, y=y,
s=string_colour + left_t_glyph + ']')
terminal.printf(x=x + w, y=y,
s=string_colour + right_t_glyph + ']')
def draw_outer_frame(startx, width, starty, frame_colour, depth):
spell_item_info_bottom_right_t_junction = constants.ASCII_SINGLE_BOTTOM_T_JUNCTION
spell_item_info_top_left_corner = constants.ASCII_SINGLE_TOP_LEFT
spell_item_info_cross_junction = constants.ASCII_SINGLE_CROSS_JUNCTION
spell_item_info_bottom_left_corner = constants.ASCII_SINGLE_BOTTOM_LEFT
spell_item_info_horizontal = constants.ASCII_SINGLE_HORIZONTAL
spell_item_info_vertical = constants.ASCII_SINGLE_VERTICAL
# draw top/bottom horizontals
for z in range(startx, (startx + width)):
terminal.printf(x=z, y=starty, s=frame_colour + spell_item_info_horizontal + ']')
terminal.printf(x=z, y=(starty + depth) - 1, s=frame_colour + spell_item_info_horizontal + ']')
# draw left vertical
for zz in range(depth - 1):
terminal.printf(x=startx, y=starty + zz, s=frame_colour + spell_item_info_vertical + ']')
# draw corners
terminal.printf(x=startx, y=starty, s=frame_colour + spell_item_info_top_left_corner + ']')
terminal.printf(x=startx, y=(starty + depth) - 1, s=frame_colour + spell_item_info_bottom_left_corner + ']')
terminal.printf(x=(startx + width), y=starty, s=frame_colour + spell_item_info_cross_junction + ']')
terminal.printf(x=(startx + width), y=(starty + depth) - 1,
s=frame_colour + spell_item_info_bottom_right_t_junction + ']')
def is_tile_string_plural(spell_range):
if spell_range == 1:
tile_string = ' tile'
else:
tile_string = ' tiles'
return tile_string
def draw_portrait(startx, starty, portrait_file):
portraits_folder = constants.FILE_PORTRAITSFOLDER
filepath = portraits_folder + portrait_file
font_string = "[font=portrait]"
file_content = externalfileutilities.Externalfiles.load_existing_file(filename=filepath)
posy = starty + 2
for row in file_content:
terminal.printf(x=startx + 7, y=posy, s=font_string + row)
posy += 1
def draw_spell_info(startx, starty, gameworld, spell_entity):
key_colour_string = "[color=DISPLAY_ITEM_EQUIPPED]"
value_colour_string = "[/color][color=PLAYER_DEBUG]"
effects_title = key_colour_string + 'Effects...'
status_effects_condi_list_title = key_colour_string + 'Causes: ' + value_colour_string
status_effects_boon_title = key_colour_string + 'Gives: ' + value_colour_string
terminal.printf(x=startx, y=starty + 6, s=key_colour_string + 'Embedded Spell Info...')
if spell_entity == 0:
terminal.printf(x=startx, y=starty + 7, s='No embedded spell')
else:
spell_name = spellHelp.SpellUtilities.get_spell_name(gameworld=gameworld, spell_entity=spell_entity)
spell_cooldown = spellHelp.SpellUtilities.get_spell_cooldown_remaining_turns(gameworld=gameworld,
spell_entity=spell_entity)
spell_range = spellHelp.SpellUtilities.get_spell_max_range(gameworld=gameworld, spell_entity=spell_entity)
spell_condi_effects_list = spellHelp.SpellUtilities.get_all_condis_for_spell(gameworld=gameworld,
spell_entity=spell_entity)
spell_boon_effects_list = spellHelp.SpellUtilities.get_all_boons_for_spell(gameworld=gameworld,
spell_entity=spell_entity)
terminal.printf(x=startx, y=starty + 7, s=key_colour_string + 'Name:' + value_colour_string + spell_name)
terminal.printf(x=startx, y=starty + 8,
s=key_colour_string + 'Cooldown:' + value_colour_string + str(spell_cooldown))
terminal.printf(x=startx, y=starty + 9, s=key_colour_string + 'Range:' + value_colour_string + str(spell_range))
terminal.printf(x=startx, y=starty + 11, s=effects_title)
condi_string = get_condis_as_string(condi_list=spell_condi_effects_list)
boon_string = get_boons_as_string(boon_list=spell_boon_effects_list)
terminal.printf(x=startx, y=starty + 13, s=status_effects_condi_list_title + condi_string)
terminal.printf(x=startx, y=starty + 14, s=status_effects_boon_title + boon_string)
def display_spell_information(gameworld, menu_selection, player_entity, game_config, key_colour, value_colour):
spell_key_colour_string = key_colour
spell_value_colour_string = value_colour
effects_title = spell_key_colour_string + 'Effects...'
status_effects_condi_list_title = spell_key_colour_string + 'Causes: ' + spell_value_colour_string
status_effects_boon_title = spell_key_colour_string + 'Gives: ' + spell_value_colour_string
spell_type_string = spell_key_colour_string + 'Type: ' + spell_value_colour_string
spell_cooldown_string = spell_key_colour_string + 'On cooldown: ' + spell_value_colour_string
spell_range_string = spell_key_colour_string + 'Max Range: ' + spell_value_colour_string
spell_targets_string = spell_key_colour_string + 'No Targets: ' + spell_value_colour_string
spell_item_info_item_imp_text_x = configUtilities.get_config_value_as_integer(configfile=game_config,
section='spellInfoPopup',
parameter='SP_IMPORTANT_TEXT_X')
spell_item_info_start_y = configUtilities.get_config_value_as_integer(configfile=game_config,
section='spellInfoPopup',
parameter='SP_START_Y')
spell_item_info_width = configUtilities.get_config_value_as_integer(configfile=game_config,
section='spellInfoPopup',
parameter='SP_WIDTH')
spell_entity = spellHelp.SpellUtilities.get_spell_entity_from_spellbar_slot(gameworld=gameworld,
slot=menu_selection - 1,
player_entity=player_entity)
if spell_entity > 0:
spell_name = spellHelp.SpellUtilities.get_spell_name(gameworld=gameworld, spell_entity=spell_entity)
spell_cooldown = spellHelp.SpellUtilities.get_spell_cooldown_remaining_turns(gameworld=gameworld,
spell_entity=spell_entity)
spell_type = spellHelp.SpellUtilities.get_spell_type(gameworld=gameworld, spell_entity=spell_entity)
spell_range = spellHelp.SpellUtilities.get_spell_max_range(gameworld=gameworld, spell_entity=spell_entity)
spell_description = spellHelp.SpellUtilities.get_spell_description(gameworld=gameworld,
spell_entity=spell_entity)
spell_condi_effects_list = spellHelp.SpellUtilities.get_all_condis_for_spell(gameworld=gameworld,
spell_entity=spell_entity)
spell_boon_effects_list = spellHelp.SpellUtilities.get_all_boons_for_spell(gameworld=gameworld,
spell_entity=spell_entity)
spell_resources_list = spellHelp.SpellUtilities.get_all_resources_for_spell(gameworld=gameworld,
spell_entity=spell_entity)
spell_no_targets = spellHelp.SpellUtilities.get_spell_max_targets(gameworld=gameworld,
spell_entity=spell_entity)
y_pos = spell_item_info_start_y + 1
terminal.print_(x=spell_item_info_item_imp_text_x, y=y_pos, width=spell_item_info_width,
height=1, align=terminal.TK_ALIGN_CENTER, s=spell_value_colour_string + spell_name)
y_pos += 2
terminal.printf(x=spell_item_info_item_imp_text_x, y=y_pos,
s=spell_type_string + spell_type)
cooldown_string = format_cooldown_string(spell_cooldown)
y_pos += 1
terminal.printf(x=spell_item_info_item_imp_text_x, y=y_pos,
s=spell_cooldown_string + cooldown_string)
y_pos += 1
tile_string = is_tile_string_plural(spell_range=spell_range)
terminal.printf(x=spell_item_info_item_imp_text_x, y=y_pos,
s=spell_range_string + str(spell_range) + tile_string)
y_pos += 1
terminal.printf(x=spell_item_info_item_imp_text_x, y=y_pos,
s=spell_targets_string + str(spell_no_targets))
y_pos += 2
terminal.printf(x=spell_item_info_item_imp_text_x, y=y_pos, s=effects_title)
condi_string = get_condis_as_string(condi_list=spell_condi_effects_list)
boon_string = get_boons_as_string(boon_list=spell_boon_effects_list)
resource_string = get_resources_as_string(spell_resources_list)
y_pos += 1
terminal.printf(x=spell_item_info_item_imp_text_x, y=y_pos,
s=status_effects_condi_list_title + condi_string)
y_pos += 1
terminal.printf(x=spell_item_info_item_imp_text_x, y=y_pos,
s=status_effects_boon_title + boon_string + resource_string)
# draw fluff text
y_pos += 3
draw_fluff_text(x=spell_item_info_item_imp_text_x, y=y_pos, width=spell_item_info_width,
fluff_text=spell_description, key_colour_string=spell_key_colour_string,
value_colour_string=spell_value_colour_string)
return spell_entity
def display_armour_information(gameworld, game_config, player_entity, bodylocation, key_colour, value_colour,
frame_colour):
armour_key_colour_string = key_colour
armour_value_colour_string = value_colour
# unicode strings of colours
unicode_frame_colour = frame_colour
armour_defense_string = armour_key_colour_string + 'Defense:' + armour_value_colour_string
armour_armourset_string = armour_key_colour_string + 'Armourset:' + armour_value_colour_string
armour_quality_string = armour_key_colour_string + 'Quality:' + armour_value_colour_string
item_coords = common.CommonUtils.get_item_ui_common_coords()
spell_item_info_item_imp_text = item_coords[4] + 2
spell_item_info_horizontal = constants.ASCII_SINGLE_HORIZONTAL
spell_item_info_left_t_junction = constants.ASCII_SINGLE_LEFT_T_JUNCTION
spell_item_info_right_t_junction = constants.ASCII_SINGLE_RIGHT_T_JUNCTION
item_entity = armourManagement.ArmourUtilities.get_armour_entity_from_body_location(gameworld=gameworld,
entity=player_entity,
bodylocation=bodylocation)
if item_entity > 0:
logger.debug('Armour entity is {}', item_entity)
# draw portrait
item_displayname = itemsHelp.ItemUtilities.get_item_displayname(gameworld=gameworld, entity=item_entity)
portrait_file = item_displayname + '.txt'
draw_portrait(startx=item_coords[0], starty=item_coords[2], portrait_file=portrait_file)
# draw middle horizontal line
draw_horizontal_line_after_portrait(x=item_coords[1], y=item_coords[4],
w=item_coords[3], string_colour=unicode_frame_colour,
horiz_glyph=spell_item_info_horizontal,
left_t_glyph=spell_item_info_left_t_junction,
right_t_glyph=spell_item_info_right_t_junction)
# draw armour stuff
defense_value = armourManagement.ArmourUtilities.get_armour_defense_value(gameworld=gameworld,
entity=item_entity)
armourset_value = armourManagement.ArmourUtilities.get_armour_set_name(gameworld=gameworld, entity=item_entity)
quality_value = itemsHelp.ItemUtilities.get_item_quality(gameworld=gameworld, entity=item_entity)
spell_entity = itemsHelp.ItemUtilities.get_spell_from_item(gameworld=gameworld, item_entity=item_entity)
armour_description_value = itemsHelp.ItemUtilities.get_item_description(gameworld=gameworld, entity=item_entity)
terminal.printf(x=item_coords[0], y=spell_item_info_item_imp_text + 1,
s=armour_defense_string + str(defense_value))
terminal.printf(x=item_coords[0], y=spell_item_info_item_imp_text + 3,
s=armour_armourset_string + armourset_value)
terminal.printf(x=item_coords[0], y=spell_item_info_item_imp_text + 4,
s=armour_quality_string + quality_value)
draw_spell_info(startx=item_coords[0], starty=spell_item_info_item_imp_text,
gameworld=gameworld, spell_entity=spell_entity)
# draw fluff text
draw_fluff_text(x=item_coords[0], y=spell_item_info_item_imp_text + 16,
width=item_coords[3], fluff_text=armour_description_value,
key_colour_string=armour_key_colour_string, value_colour_string=armour_value_colour_string)
return item_entity
def display_jewellery_information(gameworld, game_config, player_entity, bodylocation, key_colour, value_colour,
frame_colour):
jewellery_key_colour_string = key_colour
jewellery_value_colour_string = value_colour
# unicode strings of colours
unicode_frame_colour = frame_colour
jewellery_bonus_string = jewellery_key_colour_string + 'Bonus to:' + jewellery_value_colour_string
jewellery_attribute_string = jewellery_key_colour_string + 'Attribute type:' + jewellery_value_colour_string
jewellery_att_bonus_string = jewellery_key_colour_string + 'Bonus:' + jewellery_value_colour_string
item_coords = common.CommonUtils.get_item_ui_common_coords()
spell_item_info_item_imp_text = item_coords[4] + 2
spell_item_info_horizontal = constants.ASCII_SINGLE_HORIZONTAL
spell_item_info_left_t_junction = constants.ASCII_SINGLE_LEFT_T_JUNCTION
spell_item_info_right_t_junction = constants.ASCII_SINGLE_RIGHT_T_JUNCTION
item_entity = jewelleryManagement.JewelleryUtilities.get_jewellery_entity_from_body_location(gameworld=gameworld,
entity=player_entity,
bodylocation=bodylocation)
if item_entity > 0:
# draw portrait
item_displayname = itemsHelp.ItemUtilities.get_item_name(gameworld=gameworld, entity=item_entity)
portrait_file = item_displayname + '.txt'
draw_portrait(startx=item_coords[0], starty=item_coords[2], portrait_file=portrait_file)
# draw middle horizontal line
draw_horizontal_line_after_portrait(x=item_coords[1], y=item_coords[4],
w=item_coords[3], string_colour=unicode_frame_colour,
horiz_glyph=spell_item_info_horizontal,
left_t_glyph=spell_item_info_left_t_junction,
right_t_glyph=spell_item_info_right_t_junction)
# draw important text
jewellery_statbonus = jewelleryManagement.JewelleryUtilities.get_jewellery_stat_bonus(gameworld=gameworld,
jewellery_entity=item_entity)
spell_entity = itemsHelp.ItemUtilities.get_spell_from_item(gameworld=gameworld, item_entity=item_entity)
jewellery_description = itemsHelp.ItemUtilities.get_item_description(gameworld=gameworld, entity=item_entity)
# draw jewellery stuff
terminal.printf(x=item_coords[0], y=spell_item_info_item_imp_text + 1,
s=jewellery_bonus_string + jewellery_statbonus[0])
terminal.printf(x=item_coords[0], y=spell_item_info_item_imp_text + 2,
s=jewellery_attribute_string + 'Primary')
terminal.printf(x=item_coords[0], y=spell_item_info_item_imp_text + 3,
s=jewellery_att_bonus_string + '+' + str(jewellery_statbonus[1]))
# embedded spell
draw_spell_info(startx=item_coords[0], starty=spell_item_info_item_imp_text, gameworld=gameworld,
spell_entity=spell_entity)
# draw fluff text
draw_fluff_text(x=item_coords[0], y=spell_item_info_item_imp_text + 15,
width=item_coords[3], fluff_text=jewellery_description,
key_colour_string=jewellery_key_colour_string,
value_colour_string=jewellery_value_colour_string)
return item_entity
|
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from typing import List, Callable
import numpy as np
from PIL import Image
#:
FILETYPES = ['jpeg', 'jpg', 'bmp', 'png']
class FolderImageLoader(object):
"""
Class for images loading, processing and retrieving.
"""
def __init__(self,
folder: str,
preprocessing: List[Callable],
batch_size: int,
file_types: List[str] = FILETYPES):
""" Initialize a FolderImageLoader object.
Args:
folder: Path of folder with images to load. The path has to exist, and has to contain at
least one image.
preprocessing: List of functions to use when processing the images before retrieving them.
batch_size: Number of images to retrieve each sample.
file_types: Files types to scan in the folder. Default list is :data:`~model_compression_toolkit.common.data_loader.FILETYPES`
Examples:
Instantiate a FolderImageLoader using a directory of images, that returns 10 images randomly each time it is sampled:
>>> image_data_loader = FolderImageLoader('path/to/images/directory', preprocessing=[], batch_size=10)
>>> images = image_data_loader.sample()
To preprocess the images before retrieving them, a list of preprocessing methods can be passed:
>>> image_data_loader = FolderImageLoader('path/to/images/directory', preprocessing=[lambda x: (x-127.5)/127.5], batch_size=10)
For the FolderImageLoader to scan only specific files extensions, a list of extensions can be passed:
>>> image_data_loader = FolderImageLoader('path/to/images/directory', preprocessing=[], batch_size=10, file_types=['png'])
"""
self.folder = folder
self.image_list = []
print(f"Starting Scanning Disk: {self.folder}")
for root, dirs, files in os.walk(self.folder):
for file in files:
file_type = file.split('.')[-1].lower()
if file_type in file_types:
self.image_list.append(os.path.join(root, file))
self.n_files = len(self.image_list)
assert self.n_files > 0, f'Folder to load can not be empty.'
print(f"Finished Disk Scanning: Found {self.n_files} files")
self.preprocessing = preprocessing
self.batch_size = batch_size
def _sample(self):
"""
Read batch_size random images from the image_list the FolderImageLoader holds.
Process them using the preprocessing list that was passed at initialization, and
prepare it for retrieving.
"""
index = np.random.randint(0, self.n_files, self.batch_size)
image_list = []
for i in index:
file = self.image_list[i]
img = np.uint8(np.array(Image.open(file).convert('RGB')))
for p in self.preprocessing: # preprocess images
img = p(img)
image_list.append(img)
self.next_batch_data = np.stack(image_list, axis=0)
def sample(self):
"""
Returns: A sample of batch_size images from the folder the FolderImageLoader scanned.
"""
self._sample()
data = self.next_batch_data # get current data
return data
|
<reponame>marcelo-bn/Banco-de-Dados
from flask import Flask, flash, redirect, url_for, request, session, render_template, jsonify
from flask_bootstrap import Bootstrap
from flask_nav import Nav
from flask_nav.elements import Navbar, View, Subgroup
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func, desc
from sqlalchemy.ext.automap import automap_base
from datetime import datetime, timedelta, date
import math
# https://fontawesome.com/icons
from flask_fontawesome import FontAwesome
# Salvando senhas de maneira apropriada no banco de dados.
# https://werkzeug.palletsprojects.com/en/1.0.x/utils/#module-werkzeug.security
# Para gerar a senha a ser salva no DB, faça:
# senha = generate_password_hash('<PASSWORD>')
# from werkzeug.security import generate_password_hash, check_password_hash
from forms.pesquisar_paciente import PesquisarPaciente
from forms.cadastrar_paciente import CadastrarPaciente
from forms.calendario import PesquisarCalendario
from forms.vacinacao import VacinarPaciente
app = Flask(__name__)
app.secret_key = "SECRET_KEY"
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:marcelomac@localhost:3306/pp02marcelo'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db = SQLAlchemy(app)
Base = automap_base()
Base.prepare(db.engine, reflect=True)
Pessoa = Base.classes.Pessoa
Vacinacao = Base.classes.Vacinacao
Faixa_Etaria = Base.classes.Faixa_Etaria
Ano = Base.classes.Ano
Vacina_Geral = Base.classes.Vacina_Geral
Cidade = Base.classes.Cidade
Endereco = Base.classes.Endereco
Telefone = Base.classes.Telefone
Vacina_Doenca = Base.classes.Vacina_Doenca
Doenca = Base.classes.Doenca
Intervalo = Base.classes.Intervalo
Vacina = Base.classes.Vacina
Estoque = Base.classes.Estoque
Unidade_Saude = Base.classes.Unidade_Saude
Historico = Base.classes.Historico
Proxima_Dose = Base.classes.Proxima_Dose
Compra = Base.classes.Compra
boostrap = Bootstrap(app)
fa = FontAwesome(app)
nav = Nav()
nav.init_app(app)
@nav.navigation()
def meunavbar():
menu = Navbar('Sistema de Vacinação')
menu.items = [View('Início', 'inicio'), ]
menu.items.append(View('Calendário', 'listar_calendario'))
menu.items.append(Subgroup('Vacinar', View('Pesquisar', 'pesquisar_paciente'), View('Cadastrar', 'cadastrar_paciente')))
return menu
@app.route('/')
def inicio():
session['logged_in'] = True
return render_template('index.html', title='SICOVAC')
@app.route('/pesquisar', methods=['GET', 'POST'])
def pesquisar_paciente():
'''Realiza a pesquisa por um paciente'''
form = PesquisarPaciente()
if request.method == 'GET':
return render_template('pesquisar_paciente.html', title='Pesquisar paciente', form=form)
if form.validate_on_submit():
cpf = request.form['cpf']
paciente = db.session.query(Pessoa).filter(Pessoa.cpf == cpf).first()
if paciente is not None:
session['cpf'] = paciente.cpf
session['idPaciente'] = paciente.id
return redirect(url_for('historico_paciente'))
else:
flash('Paciente não encontrado, favor realizar o seu cadastro')
return render_template('index.html', title='SICOVAC')
@app.route('/cadastrar', methods=['GET', 'POST'])
def cadastrar_paciente():
'''Realiza o cadastro de um paciente'''
form = CadastrarPaciente()
cidades = db.session.query(Cidade).all()
form.cidade.choices = [(c.id, c.nome) for c in cidades]
if request.method == 'GET':
return render_template('cadastrar_paciente.html', title='Cadastrar paciente', form=form)
if form.validate_on_submit():
cpf = request.form['cpf']
nome = request.form['nome']
nascimento = request.form['dataNasc']
cidade = request.form['cidade']
rua = request.form['rua']
telefone = request.form['telefone']
paciente = db.session.query(Pessoa).filter(Pessoa.cpf == cpf).first()
format = '%d/%m/%Y'
new_format = '%Y-%m-%d'
dataNasc = datetime.strptime(nascimento, format).strftime(new_format)
if paciente is not None:
flash('Paciente já possui cadastro')
return redirect(url_for('inicio'))
else:
# Cadastrando Pessoa
# Objeto Pessoa
p = Pessoa()
# Verificando se deve criar Telefone
telefone_existente = db.session.query(Telefone).filter(Telefone.numero == telefone).first()
telefone_max_id = db.session.query(func.max(Telefone.id)).scalar()
# Objeto telefone
t = Telefone()
if telefone_existente is None:
t.id = telefone_max_id + 1
t.numero = telefone
db.session.add(t)
p.idTelefone = telefone_max_id + 1
else:
p.idTelefone = telefone_existente.id
# Verifica se a rua já existe
rua_existente = db.session.query(Endereco).filter(Endereco.idCidade == cidade, Endereco.rua == rua).first()
rua_max_id = db.session.query(func.max(Endereco.id)).scalar()
pessoa_max_id = db.session.query(func.max(Pessoa.id)).scalar()
if rua_existente is None:
e = Endereco()
e.id = rua_max_id+1
e.rua = rua
e.idCidade = cidade
db.session.add(e)
p.idEndereco = rua_max_id+1
else:
p.idEndereco = rua_existente.id
p.id = pessoa_max_id + 1
p.cpf = cpf
p.nome = nome
p.nascimento = dataNasc
db.session.add(p)
db.session.commit()
db.session.close()
return redirect(url_for('inicio'))
@app.route('/historico')
def historico_paciente():
'''Fornece o histórico de um paciente'''
# Dados do paciente
paciente = db.session.query(Pessoa).filter(Pessoa.id == session['idPaciente']).first()
# Sua idade em mês
d1 = datetime.strptime(str(paciente.nascimento), "%Y-%m-%d")
d2 = datetime.strptime(str(datetime.now().strftime('%Y-%m-%d')), "%Y-%m-%d")
idade_mes = math.floor((abs((d2 - d1).days)/365)*12)
# Buscando alguma Faixa Etaria
faixa_etaria = db.session.query(Faixa_Etaria).filter(Faixa_Etaria.faixa == idade_mes).first()
# Buscando id do ano atual
now = datetime.now()
ano = db.session.query(Ano).filter(Ano.ano == str(now.year)).first()
session['idAnoAtual'] = ano.id
# Vacinas geral
lista_vacinas_geral = []
if not faixa_etaria:
print("> Nenhuma faixa etaria.")
else:
session['faixaEtariaPaciente'] = faixa_etaria.faixa
session['idFaixaEtaria'] = faixa_etaria.id
# Vacinas a serem aplicadas no momento da tabela Vacinacao
vacinas = db.session.query(Vacinacao).filter(Vacinacao.idFaixaEtaria == faixa_etaria.id, Vacinacao.idAno == ano.id).all()
# Vacinas que ja foram aplicadas nessa faixa etaria a partir da Tabela historico
vacinas_ja_aplicadas = db.session.query(Historico).filter(Historico.idPessoa == session['idPaciente'],Historico.idFaixaEtaria == faixa_etaria.id)
# Seleiconando vacinas geral que nao foram aplicadas na faixa etaria
aux = False
for v in vacinas:
vacina_geral = db.session.query(Vacina_Geral).filter(Vacina_Geral.id == v.idVacinaGeral).first()
for vj in vacinas_ja_aplicadas:
if vacina_geral.id == vj.idVacinaGeral:
aux = True
if aux:
print("> Vacina já aplicada para essa faixa etária")
else:
lista_vacinas_geral.append(vacina_geral)
aux = False
# Verificando qual a dose da vacina geral
lista_vacinas_geral_num_dose = []
for i in lista_vacinas_geral:
# Verificando todas as faixas etarias de cada Vacina Geral escolhida
faixas_vacina_escolhida = db.session.query(Faixa_Etaria).join(Vacinacao).filter(Vacinacao.idVacinaGeral == i.id, Vacinacao.idAno == ano.id).order_by(Faixa_Etaria.faixa)
lista_faixas_vacina_escolhida = []
for i in faixas_vacina_escolhida: # Lista com todas as faixas da vacina geral
lista_faixas_vacina_escolhida.append(i.faixa)
numDoseAtual = lista_faixas_vacina_escolhida.index(session['faixaEtariaPaciente']) + 1
lista_vacinas_geral_num_dose.append(numDoseAtual)
# Historico
historico = db.session.query(Historico).filter(Historico.idPessoa == session['idPaciente'])
lista_historico = []
if not historico:
print("> Nenhum histórico")
else:
for h in historico:
item = []
vacina_geral = db.session.query(Vacina_Geral).filter(Vacina_Geral.id == h.idVacinaGeral).first()
local = db.session.query(Unidade_Saude).filter(Unidade_Saude.id == h.idUnidadeSaude).first()
vacina = db.session.query(Vacina).filter(Vacina.id == h.idVacina).first()
item.append(vacina_geral.nome)
item.append(local.nome)
item.append(h.data)
item.append(vacina.nome)
item.append(h.doseAtual)
item.append(vacina.nDose)
lista_historico.append(item)
# Proxima Dose
pd = db.session.query(Proxima_Dose).filter(Proxima_Dose.idPessoa == session['idPaciente'])
# Listando somente datas que ainda nao ocorreram
lista_pd_ativas = []
hoje = date.today()
for i in pd:
if i.data > hoje:
lista_pd_ativas.append(i)
lista_proximas_vacinas = []
for l in lista_pd_ativas:
lista_aux_pd = []
vacinaGeral = db.session.query(Vacina_Geral).filter(Vacina_Geral.id == l.idVacinaGeral).first()
lista_aux_pd.append(vacinaGeral.nome)
lista_aux_pd.append(l.data)
lista_proximas_vacinas.append(lista_aux_pd)
return render_template('historico.html', title='HISTÓRICO', paciente=paciente, idade_mes=idade_mes, vacinas_geral=lista_vacinas_geral, historico=lista_historico, proximas=lista_proximas_vacinas, faixas=lista_vacinas_geral_num_dose, dadosvacina=zip(lista_vacinas_geral,lista_vacinas_geral_num_dose))
@app.route('/vacinacao', methods=['GET', 'POST'])
def vacinacao_paciente():
'''Realiza a vacinacao de um paciente'''
# Vacina Geral
session['idVacinaGeral'] = request.args['idVacinaGeral']
# Vacinas da Vacina Geral
vacinas = db.session.query(Vacina).join(Vacina_Doenca).join(Vacina_Geral).filter(Vacina_Geral.id == request.args['idVacinaGeral'])
lista_vacinas_disponiveis = []
for v in vacinas:
e = db.session.query(Estoque).filter(Estoque.idVacina == v.id).first()
if e.dosesTotal > (e.vacAplicadas + e.vacDescartadas): # Essa vacina ainda está em estoque
lista_vacinas_disponiveis.append(v)
# Unidades de saude
unidades_saude = []
unidade = db.session.query(Unidade_Saude).all()
for u in unidade:
unidades_saude.append(u)
form = VacinarPaciente()
form.vacina.choices = [(v.id, v.nome) for v in lista_vacinas_disponiveis]
form.unidadeSaude.choices = [(u.id, u.nome) for u in unidades_saude]
if request.method == 'GET':
return render_template('vacinacao.html', title='VACINAÇÃO', form=form,)
if form.validate_on_submit():
vacina_aplicada = db.session.query(Vacina).filter(Vacina.id == request.form['vacina']).first()
doseAtual = int(request.form['doseAtual'])
# Buscando id do ano atual
now = datetime.now()
ano = db.session.query(Ano).filter(Ano.ano == str(now.year)).first()
intervalo = db.session.query(Intervalo).join(Vacina).filter(Vacina.id == vacina_aplicada.id).first()
periodo_dias = intervalo.periodo
# Próxima dose
if doseAtual < vacina_aplicada.nDose: # Não está na última dose da vacina
aux = datetime.now()
proximaDose = aux + timedelta(days=periodo_dias)
proximaDose = proximaDose.strftime('%Y-%m-%d')
session['proximaDose'] = proximaDose
else:
session['proximaDose'] = 'ultima'
hoje = date.today()
dataHoje = hoje.strftime("%Y-%m-%d")
h = Historico()
h.idVacinaGeral = session['idVacinaGeral']
h.idFaixaEtaria = session['idFaixaEtaria']
h.idAno = session['idAnoAtual']
h.idPessoa = session['idPaciente']
h.data = dataHoje
h.idUnidadeSaude = request.form['unidadeSaude']
h.doseAtual = doseAtual
h.idVacina = vacina_aplicada.id
db.session.add(h)
# Alterando estoque (incrementando vacina utilizada)
est = db.session.query(Estoque).filter(Estoque.idVacina == vacina_aplicada.id).first()
est.vacAplicadas = est.vacAplicadas + 1
# Povoando proxima dose
if session['proximaDose'] is not 'ultima':
pDose_max_id = db.session.query(func.max(Proxima_Dose.id)).scalar()
pDose = Proxima_Dose()
pDose.id = pDose_max_id+1
pDose.data = session['proximaDose']
pDose.idPessoa = session['idPaciente']
pDose.idVacinaGeral = session['idVacinaGeral']
db.session.add(pDose)
db.session.commit()
db.session.close()
return redirect(url_for('historico_paciente'))
@app.route('/calendario', methods=['GET', 'POST'])
def listar_calendario():
'''Lista um determinado calendário de vacinas'''
form = PesquisarCalendario()
anos = db.session.query(Ano).order_by(Ano.ano)
form.calendario.choices = [(a.id, a.ano) for a in anos]
if request.method == 'GET':
return render_template('calendario.html', title='CALENDÁRIO', form=form)
if form.validate_on_submit():
# Vacinas Gerais do calendário
vacinas_calendario = db.session.query(Vacina_Geral).join(Vacinacao).filter(Vacinacao.idAno == request.form['calendario'])
# Doencas de cada Vacina Geral
doencas_vacinas_lista = [] # Lista com doencas de cada Vacina Geral
for vg in vacinas_calendario:
doencas_vacina_geral = db.session.query(Doenca).join(Vacina_Doenca).filter(Vacina_Doenca.idVacinaGeral == vg.id)
doencas = ''
for dvg in doencas_vacina_geral:
doencas = doencas + dvg.nome + ', '
doencas = doencas[:-2]
doencas_vacinas_lista.append(doencas)
# Faixas Etárias de cada Vacina
faixa_vacinas_lista = []
for vg in vacinas_calendario:
faixas = ''
faixas_vacina_geral = db.session.query(Faixa_Etaria).join(Vacinacao).filter(Vacinacao.idVacinaGeral == vg.id).all()
for i in faixas_vacina_geral:
if i.faixa > 24:
faixa_ano = int(math.floor(i.faixa / 12))
frac, full = math.modf((i.faixa/12))
if frac > 0.0:
sobra_mes = int(frac * 12)
faixas = faixas + str(faixa_ano) + ' anos e ' + str(sobra_mes) + ' meses, '
else:
faixas = faixas + str(faixa_ano) + ' anos, '
elif (i.faixa == 0) or (i.faixa == 1):
faixas = faixas + str(i.faixa) + ' mês, '
else:
faixas = faixas + str(i.faixa) + ' meses, '
faixas = faixas[:-2]
faixa_vacinas_lista.append(faixas)
return render_template('calendario.html', title='SICOVAC', form=form, dados=zip(vacinas_calendario,doencas_vacinas_lista,faixa_vacinas_lista))
@app.errorhandler(404)
def page_not_found(e):
'''
Para tratar erros de páginas não encontradas - HTTP 404
:param e:
:return:
'''
return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True)
|
# Copyright 2017 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import socket
import jsonschema
CODE_PARSE_ERROR = -32700
CODE_INVALID_REQUEST = -32600
CODE_UNKNOWN_METHOD = -32601
CODE_INVALID_PARAMS = -32602
CODE_INTERNAL_ERROR = -32603
REQUEST_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A JSON RPC 2.0 request",
"oneOf": [
{
"description": "An individual request",
"$ref": "#/definitions/request"
},
{
"description": "An array of requests",
"type": "array",
"items": {"$ref": "#/definitions/request"}
}
],
"definitions": {
"request": {
"type": "object",
"required": ["jsonrpc", "method"],
"properties": {
"jsonrpc": {"enum": ["2.0"]},
"method": {
"type": "string"
},
"id": {
"type": ["string", "number", "null"],
"note": [
"While allowed, null should be avoided: "\
"http://www.jsonrpc.org/specification#id1",
"While allowed, a number with a fractional part " \
"should be avoided: http://www.jsonrpc.org/specification#id2"
]
},
"params": {
"type": ["array", "object"]
}
}
}
}
}
RESPONSE_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A JSON RPC 2.0 response",
"oneOf": [
{"$ref": "#/definitions/success"},
{"$ref": "#/definitions/error"},
{
"type": "array",
"items": {
"oneOf": [
{"$ref": "#/definitions/success"},
{"$ref": "#/definitions/error"}
]
}
}
],
"definitions": {
"common": {
"required": ["id", "jsonrpc"],
"not": {
"description": "cannot have result and error at the same time",
"required": ["result", "error"]
},
"type": "object",
"properties": {
"id": {
"type": ["string", "integer", "null"],
"note": [
"spec says a number which should not contain a fractional part",
"We choose integer here, but this is unenforceable with some languages"
]
},
"jsonrpc": {"enum": ["2.0"]}
}
},
"success": {
"description": "A success. The result member is then required and can be anything.",
"allOf": [
{"$ref": "#/definitions/common"},
{"required": ["result"]}
]
},
"error": {
"allOf" : [
{"$ref": "#/definitions/common"},
{
"required": ["error"],
"properties": {
"error": {
"type": "object",
"required": ["code", "message"],
"properties": {
"code": {
"type": "integer",
"note": ["unenforceable in some languages"]
},
"message": {"type": "string"},
"data": {
"description": "optional, can be anything"
}
}
}
}
}
]
}
}
}
class RpcError(Exception):
pass
class ServerError(RpcError):
"""
Error class representing the case in which an error is encountered during
the processon of a JSON-RPC request on the server.
"""
def __init__(self, code, message, data=None):
super().__init__(code, message, data)
self.code = code
self.message = message
self.data = data
class RequestParseError(ServerError):
"""
Error class representing the case in which invalid JSON was received by the
server.
"""
def __init__(self, message, data=None):
super().__init__(CODE_PARSE_ERROR, message, data=data)
class InvalidRequestError(ServerError):
def __init__(self, message, data=None):
super().__init__(CODE_INVALID_REQUEST, message, data=data)
class UnknownMethodError(ServerError):
def __init__(self, message, data=None):
super().__init__(CODE_UNKNOWN_METHOD, message, data=data)
class InvalidParamsError(ServerError):
def __init__(self, message, data=None):
super().__init__(CODE_INVALID_PARAMS, message, data=data)
class InternalServerError(ServerError):
def __init__(self, message, data=None):
super().__init__(CODE_INTERNAL_ERROR, message, data=data)
class ResponseParseError(RpcError):
pass
class InvalidResponseError(RpcError):
pass
class EndpointError(RpcError):
pass
class Proxy:
def __init__(self, endpoint):
self.endpoint = endpoint
self._id = 0
def call(self, method, params=None):
self._id += 1
request = {
"jsonrpc": "2.0",
"id": self._id,
"method": method,
}
if params is not None:
request["params"] = params
jsonschema.validate(request, REQUEST_SCHEMA)
request_json = json.dumps(request).encode()
# Transmit request json and receive response through endpoint
response_json = self.endpoint.communicate(request_json)
# Parse response
try:
response = json.loads(response_json.decode())
except json.JSONDecodeError as err:
raise ResponseParseError from err
# Validate response
try:
jsonschema.validate(response, RESPONSE_SCHEMA)
except jsonschema.ValidationError:
raise InvalidResponseError
try:
return response['result']
except KeyError:
error_code = response['error'].get('code')
error_message = response['error'].get('message')
error_data = response['error'].get('data')
if error_code == CODE_PARSE_ERROR:
raise RequestParseError(error_message, data=error_data)
elif error_code == CODE_INVALID_REQUEST:
raise InvalidRequestError(error_message, data=error_data)
elif error_code == CODE_UNKNOWN_METHOD:
raise UnknownMethodError(error_message, data=error_data)
elif error_code == CODE_INVALID_PARAMS:
raise InvalidParamsError(error_message, data=error_data)
elif error_code == CODE_INTERNAL_ERROR:
raise InternalServerError(error_message, data=error_data)
else:
raise ServerError(error_code, error_message, error_data)
class TcpSocketEndpoint:
def __init__(self, host, port):
self.host = host
self.port = port
def communicate(self, data):
try:
with socket.create_connection((self.host, self.port)) as sock:
sock.sendall(data)
sock.shutdown(socket.SHUT_WR)
recv_data = b''
while True:
packet = sock.recv(1024)
if not packet:
break
recv_data += packet
return recv_data
except OSError as err:
raise EndpointError from err
|
"""Bin data for ocean water mass analysis."""
import sys
script_dir = sys.path[0]
import os
import pdb
import argparse
import logging
import numpy as np
import iris
import iris.coord_categorisation
from iris.experimental.equalise_cubes import equalise_attributes
import cmdline_provenance as cmdprov
from statsmodels.stats.weightstats import DescrStatsW
repo_dir = '/'.join(script_dir.split('/')[:-1])
module_dir = repo_dir + '/modules'
sys.path.append(module_dir)
try:
import water_mass
import general_io as gio
import convenient_universal as uconv
import spatial_weights
except ImportError:
raise ImportError('Script and modules in wrong directories')
mom_vars = {"temp_nonlocal_KPP": "cp*rho*dzt*nonlocal tendency from KPP",
"temp_vdiffuse_diff_cbt": "vert diffusion of heat due to diff_cbt",
"mixdownslope_temp": "cp*mixdownslope*rho*dzt*temp",
"temp_sigma_diff" : "thk wghtd sigma-diffusion heating",
"temp_vdiffuse_k33": "vert diffusion of heat due to K33 from neutral diffusion",
"neutral_diffusion_temp": "rho*dzt*cp*explicit neutral diffusion tendency (heating)",
"temp_tendency": "time tendency for tracer Conservative temperature"}
def construct_cube(outdata_dict, w_cube, t_cube, s_cube, b_cube, years,
t_values, t_edges, t_units, s_values, s_edges, s_units,
log, mul_ts=False, pct_edges_ts=[]):
"""Create the iris cube for output"""
for key, data in outdata_dict.items():
outdata_dict[key], b_values, flag_values, flag_meanings = uconv.add_globe_basin(data, b_cube)
year_coord = iris.coords.DimCoord(years,
standard_name=t_cube.coord('year').standard_name,
long_name=t_cube.coord('year').long_name,
var_name=t_cube.coord('year').var_name,
units=t_cube.coord('year').units)
t_bounds = uconv.get_bounds_list(t_edges)
t_coord_std_name = t_cube.standard_name
t_coord_long_name = t_cube.long_name
t_coord_var_name = t_cube.var_name
if pct_edges_ts:
t_coord_std_name = t_coord_std_name + '_percentile_bins'
t_coord_long_name = t_coord_long_name + ' percentile bins'
t_coord_var_name = t_coord_var_name + '_pct_bins'
t_coord_units = '%'
iris.std_names.STD_NAMES[t_coord_std_name] = {'canonical_units': '%'}
else:
t_coord_units = t_units
t_coord = iris.coords.DimCoord(t_values,
standard_name=t_coord_std_name,
long_name=t_coord_long_name,
var_name=t_coord_var_name,
units=t_coord_units,
bounds=t_bounds)
s_bounds = uconv.get_bounds_list(s_edges)
s_coord_std_name = s_cube.standard_name
s_coord_long_name = s_cube.long_name
s_coord_var_name = s_cube.var_name
if pct_edges_ts:
s_coord_std_name = s_coord_std_name + '_percentile_bins'
s_coord_long_name = s_coord_long_name + ' percentile bins'
s_coord_var_name = s_coord_var_name + '_pct_bins'
s_coord_units = '%'
iris.std_names.STD_NAMES[s_coord_std_name] = {'canonical_units': '%'}
else:
s_coord_units = s_units
s_coord = iris.coords.DimCoord(s_values,
standard_name=s_coord_std_name,
long_name=s_coord_long_name,
var_name=s_coord_var_name,
units=s_coord_units,
bounds=s_bounds)
b_coord = iris.coords.DimCoord(b_values,
standard_name=b_cube.standard_name,
long_name=b_cube.long_name,
var_name=b_cube.var_name,
units=b_cube.units,
attrbutes={'flag_values': flag_values,
'flag_meanings': flag_meanings})
tbin_dim_coords_list = [(year_coord, 0), (t_coord, 1), (b_coord, 2)]
sbin_dim_coords_list = [(year_coord, 0), (s_coord, 1), (b_coord, 2)]
tsbin_dim_coords_list = [(year_coord, 0), (s_coord, 1), (t_coord, 2), (b_coord, 3)]
outcube_list = iris.cube.CubeList([])
wvar_list = ['w', 'wt', 'ws'] if mul_ts else ['w']
for wvar in wvar_list:
std_base_name = w_cube.standard_name
long_base_name = w_cube.long_name
var_base_name = w_cube.var_name
if wvar == 'wt':
if std_base_name:
std_base_name = t_cube.standard_name + '_times_' + std_base_name
long_base_name = t_cube.long_name.strip() + ' times ' + long_base_name
var_base_name = t_cube.var_name + '_' + var_base_name
if wvar == 'ws':
if std_base_name:
std_base_name = s_cube.standard_name + '_times_' + std_base_name
long_base_name = s_cube.long_name.strip() + ' times ' + long_base_name
var_base_name = s_cube.var_name + '_' + var_base_name
if std_base_name:
tbin_std_name = std_base_name + '_binned_by_temperature'
iris.std_names.STD_NAMES[tbin_std_name] = {'canonical_units': str(w_cube.units)}
else:
tbin_std_name = None
tbin_cube = iris.cube.Cube(outdata_dict[wvar + '_tbin'],
standard_name=tbin_std_name,
long_name=long_base_name + ' binned by temperature',
var_name=var_base_name + '_tbin',
units=w_cube.units,
attributes=t_cube.attributes,
dim_coords_and_dims=tbin_dim_coords_list)
tbin_cube.attributes['history'] = log
outcube_list.append(tbin_cube)
if std_base_name:
sbin_std_name = std_base_name + '_binned_by_salinity'
iris.std_names.STD_NAMES[sbin_std_name] = {'canonical_units': str(w_cube.units)}
else:
sbin_std_name = None
sbin_cube = iris.cube.Cube(outdata_dict[wvar + '_sbin'],
standard_name=sbin_std_name,
long_name=long_base_name + ' binned by salinity',
var_name=var_base_name + '_sbin',
units=w_cube.units,
attributes=t_cube.attributes,
dim_coords_and_dims=sbin_dim_coords_list)
sbin_cube.attributes['history'] = log
outcube_list.append(sbin_cube)
if std_base_name:
tsbin_std_name = std_base_name + '_binned_by_temperature_and_salinity'
iris.std_names.STD_NAMES[tsbin_std_name] = {'canonical_units': str(w_cube.units)}
else:
tsbin_std_name = None
tsbin_cube = iris.cube.Cube(outdata_dict[wvar + '_tsbin'],
standard_name=tsbin_std_name,
long_name=long_base_name + ' binned by temperature and salinity',
var_name=var_base_name + '_tsbin',
units=w_cube.units,
attributes=t_cube.attributes,
dim_coords_and_dims=tsbin_dim_coords_list)
tsbin_cube.attributes['history'] = log
outcube_list.append(tsbin_cube)
if pct_edges_ts:
pct_edges_t, pct_edges_s = pct_edges_ts
pct_t_coord_std_name = t_cube.standard_name + '_percentile'
iris.std_names.STD_NAMES[pct_t_coord_std_name] = {'canonical_units': '%'}
pct_t_coord = iris.coords.DimCoord(t_edges,
standard_name=pct_t_coord_std_name,
long_name=t_cube.long_name + ' percentile',
var_name=t_cube.var_name + '_pct',
units='%')
pct_edges_t_cube = iris.cube.Cube(pct_edges_t,
standard_name=t_cube.standard_name,
long_name=t_cube.long_name,
var_name=t_cube.var_name,
units=t_units,
attributes=t_cube.attributes,
dim_coords_and_dims=[(year_coord, 0), (pct_t_coord, 1)])
pct_edges_t_cube.attributes['history'] = log
outcube_list.append(pct_edges_t_cube)
pct_s_coord_std_name = s_cube.standard_name + '_percentile'
iris.std_names.STD_NAMES[pct_s_coord_std_name] = {'canonical_units': '%'}
pct_s_coord = iris.coords.DimCoord(s_edges,
standard_name=pct_s_coord_std_name,
long_name=s_cube.long_name + ' percentile',
var_name=s_cube.var_name + '_pct',
units='%')
pct_edges_s_cube = iris.cube.Cube(pct_edges_s,
standard_name=s_cube.standard_name,
long_name=s_cube.long_name,
var_name=s_cube.var_name,
units=s_units,
attributes=s_cube.attributes,
dim_coords_and_dims=[(year_coord, 0), (pct_s_coord, 1)])
pct_edges_s_cube.attributes['history'] = log
outcube_list.append(pct_edges_s_cube)
return outcube_list
def clipping_details(orig_data, clipped_data, bin_edges, var_name):
"""Details of the clipping"""
bin_min = bin_edges[0]
bin_second_min = bin_edges[1]
bin_max = bin_edges[-1]
bin_second_max = bin_edges[-2]
npoints_under = np.sum(orig_data < bin_min)
npoints_min = np.sum(orig_data <= bin_second_min) - npoints_under
npoints_clip_min = np.sum(clipped_data <= bin_second_min) - np.sum(clipped_data < bin_min)
assert npoints_clip_min == npoints_under + npoints_min
npoints_over = np.sum(orig_data > bin_max)
npoints_max = np.sum(orig_data <= bin_max) - np.sum(orig_data <= bin_second_max)
npoints_clip_max = np.sum(clipped_data <= bin_max) - np.sum(clipped_data <= bin_second_max)
assert npoints_clip_max == npoints_over + npoints_max
logging.info(f"First {var_name} bin had {npoints_min} values, clipping added {npoints_under}")
logging.info(f"Last {var_name} bin had {npoints_max} values, clipping added {npoints_over}")
def bin_data(df, var_list, edge_list, mul_ts=False):
"""Bin the data.
Args:
df (pandas.DataFrame) -- Data
var_list (list) -- Variables for binning axes
edge_list (list) -- Bin edges for each bin axis variable
mul_ts (bool) -- Bin weights times T and S too
"""
data_list = []
for var, edges in zip(var_list, edge_list):
assert var in ['temperature', 'salinity', 'basin']
values = np.clip(df[var].values, edges[0], edges[-1])
clipping_details(df[var].values, values, edges, var)
data_list.append(values)
data = np.array(data_list).T
w_data = df['weight'].astype(np.float64).values
w_dist, edges = np.histogramdd(data, weights=w_data, bins=edge_list)
binned_total_weight = w_dist.sum()
orig_total_weight = w_data.sum()
np.testing.assert_allclose(orig_total_weight, binned_total_weight, rtol=1e-03)
if mul_ts:
ws_dist, edges = np.histogramdd(data, weights=w_data * df['salinity'].values, bins=edge_list)
wt_dist, edges = np.histogramdd(data, weights=w_data * df['temperature'].values, bins=edge_list)
return w_dist, ws_dist, wt_dist
else:
return w_dist
def get_weights_data(file_list, var, area_file):
"""Read the weights data file/s"""
w_var = mom_vars[var] if var in mom_vars else var
if ('vol' in w_var) or ('area' in w_var):
assert len(file_list) == 1
w_cube = gio.get_ocean_weights(file_list[0])
history = w_cube.attributes['history']
else:
assert area_file, "Area file needed for flux weights"
w_cube, history = gio.combine_files(file_list, var, checks=True)
return w_cube, history
def get_log(inargs, w_history, t_history, s_history, b_cube, a_cube):
"""Get the log entry for the output file history attribute."""
metadata_dict = {}
if w_history:
metadata_dict[inargs.weights_files[0]] = w_history[0]
if t_history:
metadata_dict[inargs.temperature_files[0]] = t_history[0]
if s_history:
metadata_dict[inargs.salinity_files[0]] = s_history[0]
if 'history' in b_cube.attributes:
metadata_dict[inargs.basin_file] = b_cube.attributes['history']
if a_cube:
if 'history' in a_cube.attributes:
metadata_dict[inargs.area_file] = a_cube.attributes['history']
log = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)
return log
def get_bin_data(files, var, w_cube):
"""Get binning variable data."""
cube, history = gio.combine_files(files, var, checks=True)
w_coord_names = [coord.name() for coord in w_cube.dim_coords]
coord_names = [coord.name() for coord in cube.dim_coords]
assert w_cube.shape[-2:] == cube.shape[-2:]
if not w_cube.shape == cube.shape:
if (w_cube.ndim == 3) and (cube.ndim == 4) and (w_coord_names[0] == coord_names[0]):
#e.g. w_cube is surface flux (time, i, j),
#cube is full depth temperature (time, depth, i, j)
cube = cube[:, 0, ::]
cube.remove_coord(coord_names[1])
assert w_cube.shape == cube.shape
elif (w_cube.ndim == 2) and (cube.ndim == 4):
#e.g. w_cube is area (i, j),
#cube is full depth temperature (time, depth, i, j)
cube = cube[:, 0, ::]
cube.remove_coord(coord_names[1])
assert w_cube.shape == cube.shape[1:]
else:
#e.g. w_cube is area (i, j),
#cube is surface temperature (time, i, j)
#e.g. w_cube is volume (depth, i, j),
#cube is temperature (time, depth, i, j)
assert w_cube.shape == cube.shape[1:]
return cube, history
def weighted_percentiles(data, weights, percentiles):
"""Return the weighted percentiles.
Args:
data (np.ndarray) : Bin variable (e.g. temperature, salinity)
weights (np.ndarray): Weights (e.g. cell volume, area)
percentiles (np.ndarray): Array of requested percentiles (e.g. 0-1 by 0.01)
"""
assert percentiles.max() <= 1.0
assert percentiles.min() >= 0.0
wq = DescrStatsW(data=data, weights=weights)
bin_edges = wq.quantile(probs=percentiles, return_pandas=False)
# manual method does not give a clean results...
#ix = np.argsort(data)
#data = data[ix] # sort data
#weights = weights[ix] # sort weights
#cdf = (np.cumsum(weights) - 0.5 * weights) / np.sum(weights) # 'like' a CDF function
#perc = np.arange(0, 1.01, 0.01)
#test2 = np.interp(perc, cdf, data)
return bin_edges
def main(inargs):
"""Run the program."""
logging.basicConfig(level=logging.DEBUG)
spatial_data = ('vol' in inargs.weights_var) or ('area' in inargs.weights_var)
flux_data = not spatial_data
w_cube, w_history = get_weights_data(inargs.weights_files, inargs.weights_var, inargs.area_file)
t_cube, t_history = get_bin_data(inargs.temperature_files, inargs.temperature_var, w_cube)
s_cube, s_history = get_bin_data(inargs.salinity_files, inargs.salinity_var, w_cube)
b_cube = iris.load_cube(inargs.basin_file, 'region')
if inargs.area_file:
assert flux_data
a_cube = gio.get_ocean_weights(inargs.area_file)
else:
assert spatial_data
a_cube = None
log = get_log(inargs, w_history, t_history, s_history, b_cube, a_cube)
b_values, b_edges = uconv.get_basin_details(b_cube)
if inargs.bin_percentile:
pct_edges = np.arange(0, 1.01, 0.01)
pct_values = (pct_edges[1:] + pct_edges[:-1]) / 2
nt_values = ns_values = len(pct_values)
s_bounds = (-0.2, 80)
pct_cube = a_cube
else:
t_min, t_max = inargs.temperature_bounds
t_step = inargs.tbin_size
t_edges = np.arange(t_min, t_max + t_step, t_step)
t_values = (t_edges[1:] + t_edges[:-1]) / 2
s_values, s_edges = uconv.salinity_bins()
s_bounds=(s_edges[0], s_edges[-1])
nt_values = len(t_values)
ns_values = len(s_values)
pct_cube = None
iris.coord_categorisation.add_year(t_cube, 'time')
iris.coord_categorisation.add_year(s_cube, 'time')
t_years = set(t_cube.coord('year').points)
s_years = set(s_cube.coord('year').points)
assert t_years == s_years
if flux_data:
iris.coord_categorisation.add_year(w_cube, 'time')
w_years = set(w_cube.coord('year').points)
assert w_years == t_years
years = np.array(list(t_years))
years.sort()
w_tbin_outdata = np.ma.zeros([len(years), nt_values, len(b_values)])
w_sbin_outdata = np.ma.zeros([len(years), ns_values, len(b_values)])
w_tsbin_outdata = np.ma.zeros([len(years), ns_values, nt_values, len(b_values)])
if spatial_data:
ws_tbin_outdata = np.ma.zeros([len(years), nt_values, len(b_values)])
wt_tbin_outdata = np.ma.zeros([len(years), nt_values, len(b_values)])
ws_sbin_outdata = np.ma.zeros([len(years), ns_values, len(b_values)])
wt_sbin_outdata = np.ma.zeros([len(years), ns_values, len(b_values)])
ws_tsbin_outdata = np.ma.zeros([len(years), ns_values, nt_values, len(b_values)])
wt_tsbin_outdata = np.ma.zeros([len(years), ns_values, nt_values, len(b_values)])
if inargs.bin_percentile:
pct_edges_t = np.ma.zeros([len(years), nt_values + 1])
pct_edges_s = np.ma.zeros([len(years), ns_values + 1])
if inargs.bin_clim:
iris.coord_categorisation.add_month(s_cube, 'time')
s_year_cube = s_cube.aggregated_by(['month'], iris.analysis.MEAN)
s_year_cube.remove_coord('month')
s_year_cube.replace_coord(s_cube[0:12, ::].coord('time'))
iris.coord_categorisation.add_month(t_cube, 'time')
t_year_cube = t_cube.aggregated_by(['month'], iris.analysis.MEAN)
t_year_cube.remove_coord('month')
t_year_cube.replace_coord(t_cube[0:12, ::].coord('time'))
for year_index, year in enumerate(years):
print(year)
year_constraint = iris.Constraint(year=year)
if not inargs.bin_clim:
s_year_cube = s_cube.extract(year_constraint)
t_year_cube = t_cube.extract(year_constraint)
if flux_data:
w_year_cube = w_cube.extract(year_constraint)
w_year_cube = spatial_weights.multiply_by_area(w_year_cube, area_cube=a_cube)
else:
w_year_cube = w_cube
df, s_units, t_units = water_mass.create_df(w_year_cube, t_year_cube, s_year_cube, b_cube,
pct_cube=pct_cube,
multiply_weights_by_days_in_year_frac=True)
if inargs.bin_percentile:
weight_var = 'percentile_weights' if pct_cube else 'weight'
t_edges = weighted_percentiles(df['temperature'].values, df[weight_var].values, pct_edges)
s_edges = weighted_percentiles(df['salinity'].values, df[weight_var].values, pct_edges)
pct_edges_t[year_index, :] = t_edges
pct_edges_s[year_index, :] = s_edges
if flux_data:
w_tbin_outdata[year_index, ::] = bin_data(df, ['temperature', 'basin'], [t_edges, b_edges])
w_sbin_outdata[year_index, ::] = bin_data(df, ['salinity', 'basin'], [s_edges, b_edges])
w_tsbin_outdata[year_index, ::] = bin_data(df, ['salinity', 'temperature', 'basin'], [s_edges, t_edges, b_edges])
else:
tbin_list = bin_data(df, ['temperature', 'basin'], [t_edges, b_edges], mul_ts=True)
sbin_list = bin_data(df, ['salinity', 'basin'], [s_edges, b_edges], mul_ts=True)
tsbin_list = bin_data(df, ['salinity', 'temperature', 'basin'], [s_edges, t_edges, b_edges], mul_ts=True)
w_tbin_outdata[year_index, ::], ws_tbin_outdata[year_index, ::], wt_tbin_outdata[year_index, ::] = tbin_list
w_sbin_outdata[year_index, ::], ws_sbin_outdata[year_index, ::], wt_sbin_outdata[year_index, ::] = sbin_list
w_tsbin_outdata[year_index, ::], ws_tsbin_outdata[year_index, ::], wt_tsbin_outdata[year_index, ::] = tsbin_list
outdata_dict = {}
outdata_dict['w_tbin'] = np.ma.masked_invalid(w_tbin_outdata)
outdata_dict['w_sbin'] = np.ma.masked_invalid(w_sbin_outdata)
outdata_dict['w_tsbin'] = np.ma.masked_invalid(w_tsbin_outdata)
if spatial_data:
outdata_dict['ws_tbin'] = np.ma.masked_invalid(ws_tbin_outdata)
outdata_dict['wt_tbin'] = np.ma.masked_invalid(wt_tbin_outdata)
outdata_dict['ws_sbin'] = np.ma.masked_invalid(ws_sbin_outdata)
outdata_dict['wt_sbin'] = np.ma.masked_invalid(wt_sbin_outdata)
outdata_dict['ws_tsbin'] = np.ma.masked_invalid(ws_tsbin_outdata)
outdata_dict['wt_tsbin'] = np.ma.masked_invalid(wt_tsbin_outdata)
if inargs.bin_percentile:
t_values = s_values = pct_values * 100
t_edges = s_edges = pct_edges * 100
pct_edges_ts = [pct_edges_t, pct_edges_s]
else:
pct_edges_ts = []
outcube_list = construct_cube(outdata_dict, w_year_cube, t_cube, s_cube, b_cube, years,
t_values, t_edges, t_units, s_values, s_edges, s_units,
log, mul_ts=spatial_data, pct_edges_ts=pct_edges_ts)
equalise_attributes(outcube_list)
iris.save(outcube_list, inargs.outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("weights_files", type=str, nargs='*', help="volume, area or a flux")
parser.add_argument("weights_var", type=str, help="weights variable")
parser.add_argument("basin_file", type=str, help="basin file (from calc_basin.py)")
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("--temperature_files", type=str, nargs='*', help="temperature files for the binning")
parser.add_argument("--temperature_var", type=str, help="temperature variable")
parser.add_argument("--salinity_files", type=str, nargs='*', help="salinity files for the binning")
parser.add_argument("--salinity_var", type=str, help="salinity variable")
parser.add_argument("--area_file", type=str, default=None, help="For converting m-2 flux to total")
parser.add_argument("--temperature_bounds", type=float, nargs=2, default=(-6, 50),
help='bounds for the temperature (Y) axis')
bin_default = 1/3.
parser.add_argument("--tbin_size", type=float, default=bin_default, help='temperature bin size')
parser.add_argument("--bin_clim", action="store_true", default=False,
help="Use the bin file climatology for binning")
parser.add_argument("--bin_percentile", action="store_true", default=False,
help="Use percentiles for binning")
args = parser.parse_args()
main(args)
|
<gh_stars>1-10
# Copyright (c) 2019 American Express Travel Related Services Company, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
""" This module acts as the interface between the user's Keras
code and the distributed tensorflow session which will be used
to train the network.
This module is responsible mainly for graph extraction, transfer
and the issuance of the training call """
from __future__ import print_function
import subprocess
import os
import keras.backend as K
import tensorflow as tf
import boto3
KEY_NAME = 'easyDist.pem'
USER = 'ec2-user'
class ExecutionEnvironment:
""" This class is responsible for extracting the Keras
graph and using it to set up a distributed tensorflow cluster
across all the VMs."""
def __init__(self, bucket_name, prefix, epochs, batch_size, opt, port="2222",test=False):
self.epochs = epochs
self.batch_size = batch_size
self.opt = opt
self.test = test
worker_ip_file = open("resources.txt", "r")
#Extract IP addresses (remove the trailing new line chrachter)
self.instance_ids, all_ips = [line[:-1].split() for line in worker_ip_file.readlines()]
self.ps_ip = all_ips[0]
self.worker_ips = all_ips[1:]
self.bucket_name = bucket_name
if test is False:
self.client = boto3.client('s3')
self.data_chunks = [a['Key'] for a in
self.client.list_objects(Bucket=bucket_name, Prefix=prefix)['Contents']]
else:
self.data_chunks = ['dummy_1','dummy_2','dummy_3','dummy_4']
self.saved = -1
self.trained = -1
self.create_graph_directory = True
self.create_run_directory = True
self.port = port
self.executions = []
def fit(self):
""" Starts training the network after the graph
transfer and tensorflow setup is complete"""
print("Creating Execution Scripts on Remote Workers")
self.create_run_scripts()
self.save_graph()
self.transfer_graph()
self.trained += 1
execution = Execution(self.trained, self.worker_ips)
execution.start_training()
self.executions.append(execution)
'''
def status(self, worker, error_file=True, lines=5):
"""Gives the current training status for a single
worker"""
self.executions[-1].give_status(worker, error_file, lines)
'''
def save_graph(self):
""" Extracts the current Keras Computational
Graph and saves it """
self.saved += 1
if self.create_graph_directory:
subprocess.call(["mkdir", "graphs"])
self.create_graph_directory = False
sess = K.get_session()
saver = tf.train.Saver()
logdir_name = "graphs/graph"+str(self.saved)
tf.train.write_graph(graph_or_graph_def=sess.graph_def,
logdir=logdir_name,
name='Graph.prototxt')
saver.save(sess, logdir_name+"/Graph")
graph = tf.get_default_graph()
#Listing all placeholders in graph
placeholders = [op for op in graph.get_operations() if op.type == "Placeholder"]
op_names = [str(op.name)+':0' for op in placeholders]
print('\n\nYour Placeholder Tensors are ', op_names)
print('Please ensure that this ordering is followed \
when in the dataReader File. (Inputs, Outputs, Weights)')
print('By Default, the weights are set to 1')
print('----------------------------------------')
return self.saved
def transfer_graph(self, graph_number=-1):
"""Transfers Most Recent Graph to all VMs """
if graph_number == -1:
graph_number = self.saved
for i in range(len(self.worker_ips)):
print('Transferring')
os.system('ssh -i ./aux/%s -o StrictHostKeyChecking=no \
%s@"%s" \'mkdir Graph\'' % (KEY_NAME, USER, self.worker_ips[i]))
os.system('scp -i ./aux/%s -o StrictHostKeyChecking=no \
dataReader.py %s@"%s":.' % (KEY_NAME, USER, self.worker_ips[i]))
os.system('scp -i ./aux/%s -o StrictHostKeyChecking=no \
trainer.py %s@"%s":.' % (KEY_NAME, USER, self.worker_ips[i]))
if os.system('scp -i ./aux/%s -o StrictHostKeyChecking=no \
./graphs/graph"%s"/* %s@"%s":Graph/.'
% (KEY_NAME, str(graph_number), USER, self.worker_ips[i])) == 0:
print('Successfully Transferred Graph to Worker ', i)
else:
print('Failed to Transfer Graph to Worker ', i)
def create_run_scripts(self):
""" Creates the run scripts for each machines,
This function also determines which chunks of data
are mapped to which worker"""
if self.create_run_directory:
os.system("mkdir runscripts > /dev/null")
self.create_run_directory = False
#Create IP address with ports
ps_ip_with_port = self.ps_ip+':'+self.port
worker_ips_with_port = [ip+':'+self.port for ip in self.worker_ips]
file_ps = open("run.sh", "w")
common_string = """mkdir models > /dev/null
source activate tensorflow_p36
nvidia-smi --query-gpu=timestamp,name,pci.bus_id,driver_version,pstate,\
pcie.link.gen.max,pcie.link.gen.current,temperature.gpu,utilization.gpu,\
utilization.memory --format=csv -l 30 > GPUlog.csv & python trainer.py \
--epochs=%s --batch_size=%s --optimizer=%s --ps_hosts=%s \
--worker_hosts=%s""" % (str(self.epochs),
str(self.batch_size), self.opt,
ps_ip_with_port,
",".join(worker_ips_with_port))
ps_string = common_string+""" -j_name ps -t_id 0 --bucket=. --keys=.
zip outs_train.zip ./outs_train/*
zip errs_train.zip ./errs_train/*
zip outs_data.zip ./outs_data/*
zip ps.zip outs_train.zip outs_data.zip errs_train.zip ip* GPUlog.csv"""
file_ps.write(ps_string)
file_ps.close()
chunks_per_worker = len(self.data_chunks)/len(self.worker_ips)
for worker, ip_address in enumerate(self.worker_ips):
start_index = worker * chunks_per_worker
end_index = min((worker+1)*chunks_per_worker, len(self.data_chunks))
worker_string = common_string+""" -j_name worker -t_id %s --bucket=%s --keys=%s
zip -r m_%s_logs.zip ip* GPU*
zip -r m_%s_models.zip log.csv epochLog.csv models/*
rm -r models/""" % (str(worker), self.bucket_name,
",".join(self.data_chunks[int(start_index):int(end_index)]),
str(worker), str(worker))
filename = "runscripts/%s.sh"%(ip_address)
run_script = open(filename, "w")
run_script.write(worker_string)
run_script.close()
if self.test is False:
os.system('scp -i ./aux/%s -o StrictHostKeyChecking=no \
./"%s" %s@"%s":run.sh' % (KEY_NAME, filename, USER, ip_address))
class Execution:
""" This class starts and monitors the actual training by
issuing the required shell commands"""
def __init__(self, experiment_number, worker_ips):
self.worker_ips = worker_ips
self.experiment_number = experiment_number
def start_training(self):
"""Starts Distributed Tensorflow Training of the graph"""
os.system('sh ./aux/startTraining.sh %d'%(self.experiment_number))
'''
def give_status(self, worker, error_file=True, lines=5):
"""Gives the current training status for a single worker"""
if worker < 0 or worker >= len(self.worker_ips):
print('Sorry worker id out of range')
return
print('\n*************************************')
if error_file:
print('Last %d lines of Error File For Worker %d ' % (lines, worker))
os.system('tail -"%s" experiments/exp"%s"/errs_train/"%s"' % (str(lines),
str(self.experiment_number),
str(self.worker_ips[worker])))
print('----------------------------')
print('Last %d lines of Output File For Worker %d ' % (lines, worker))
os.system('tail -"%s" experiments/exp"%s"/outs_train/"%s"' % (str(lines),
str(self.experiment_number),
str(self.worker_ips[worker])))
print('\n*************************************')
'''
|
"""
Show the errors in physics tendencies as a function of precision. Show both
relative and absolute errors (haven't decided on mean_diff or rms_diff). For
relative errors, this can be directly compared to the machine epsilon so add
this line also. Also print out errors as a function of machine epsilon to be
shown in a table.
"""
import warnings
import numpy as np
import matplotlib.pyplot as plt
import iris.exceptions
from iris.analysis import maths
from irise.plot.util import legend, multilabel
from myscripts.statistics import global_mean
from myscripts.models import speedy
from myscripts.projects.ithaca.tendencies import plotdir, load_tendency
warnings.filterwarnings('ignore')
schemes = ['All Parametrizations',
'Condensation', 'Convection', 'Cloud',
'Short-Wave Radiation', 'Long-Wave Radiation',
'Surface Fluxes', 'Vertical Diffusion']
def main():
generate_table()
return
def generate_table():
for variable in ['Temperature', 'Specific Humidity',
'Zonal Velocity', 'Meridional Velocity']:
print(variable)
table = {}
for scheme in schemes:
table[scheme] = scheme
for label, sigma in [('Boundary Layer', speedy.sigma_levels[0]),
('Lower Troposphere', speedy.sigma_levels[1:4]),
('Upper Troposphere', speedy.sigma_levels[4:6]),
('Stratosphere', speedy.sigma_levels[6:8])]:
main2(variable, sigma, table)
#plt.savefig(
# plotdir + 'error_vs_precision/' +
# '{}_{}.png'.format(label, variable).lower().replace(' ', '_'))
plt.show()
for scheme in schemes:
print(table[scheme] + '\\\\')
return
def main2(variable, sigma, table):
# Create a two by two grid
fig, axes = plt.subplots(nrows=1, ncols=2, sharey='row',
figsize=(16, 5),
subplot_kw={'yscale': 'log'})
# Show the reference machine epsilon
sbits = np.arange(5, 24)
machine_error = 2.0 ** -(sbits + 1)
# Errors with respect to individual parametrization tendency
plt.axes(axes[0])
for scheme in schemes:
plp = speedy.physics_schemes[scheme]
try:
fp = load_tendency(variable=variable, scheme=scheme,
rp_scheme='all_parametrizations',
sigma=sigma, precision=52)
rp = load_tendency(variable=variable, scheme=scheme,
rp_scheme=filename(scheme),
sigma=sigma, precision=sbits)
# Ignore where tendencies are zero
rp.data = np.ma.masked_where((rp.data - fp.data) == 0, rp.data)
display_errors(rp, fp, plp)
except iris.exceptions.ConstraintMismatchError:
print('{} cannot be loaded \n'.format(scheme))
# Errors with respect to total parametrization tendency
plt.axes(axes[1])
fp = load_tendency(variable=variable, rp_scheme='all_parametrizations',
sigma=sigma, precision=52)
tendency = global_mean(maths.abs(fp))
tendency = collapse_sigma(tendency)
axes[1].axhline(
tendency.data, linestyle='--', color='k', alpha=0.5)
axes[1].plot(
sbits, machine_error * tendency.data, ':k', alpha=0.5)
for scheme in schemes:
plp = speedy.physics_schemes[scheme]
rp = load_tendency(variable=variable, rp_scheme=filename(scheme),
sigma=sigma, precision=sbits)
error = display_errors(rp, fp, plp, label=scheme)
error = (error/tendency) / machine_error
table[scheme] += ' & ${:.0f}-{:.0f}\\varepsilon$'.format(error.data.min(), error.data.max())
# Add dressing to the plot
multilabel(axes[0], 0, factor=0.01)
axes[0].set_title('Individual Temperature Tendency')
axes[0].set_ylabel('Average Tendency Error [{}]'.format(tendency.units))
axes[0].set_xticks(sbits[::5])
multilabel(axes[1], 1, factor=0.01)
axes[1].set_title('Total Temperature Tendency')
axes[1].set_xticks(sbits[::5])
fig.text(0.45, 0.01, 'Precision [sbits]')
legend(ax=axes[1], key=lambda x: speedy.physics_schemes[x[0]].idx, ncol=2)
plt.subplots_adjust(left=0.08, right=0.98, wspace=0.05)
return
def filename(scheme):
return scheme.replace(' ', '_').replace('-', '_').lower()
def display_errors(rp, fp, plp, **kwargs):
# Calculate absolute error
abs_error = maths.abs(rp - fp)
abs_error = global_mean(abs_error)
abs_error = collapse_sigma(abs_error)
plp.plot(abs_error, **kwargs)
return abs_error
def collapse_sigma(cube, coord_name='sigma'):
if len(cube.coord(coord_name).points) > 1:
return cube.collapsed(coord_name, iris.analysis.MEAN)
else:
return cube
if __name__ == '__main__':
main()
|
<reponame>piwaniuk/critic
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2017 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
import jsonapi
@jsonapi.PrimaryResource
class Batches(object):
"""Batches of changes in reviews."""
name = "batches"
contexts = (None, "reviews")
value_class = api.batch.Batch
exceptions = api.batch.BatchError
@staticmethod
def json(value, parameters):
"""{
"id": integer or null,
"is_empty": boolean,
"review": integer,
"author": integer,
"comment": integer or null,
"timestamp": float or null,
"created_comments": integer[],
"written_replies": integer[],
"resolved_issues": integer[],
"reopened_issues": integer[],
"morphed_comments": MorphedComment[],
"reviewed_changes": integer[],
"unreviewed_changes": integer[],
}
MorphedComment {
"comment": integer,
"new_type": "issue" or "note",
}"""
morphed_comments = sorted([
{ "comment": comment, "new_type": new_type }
for comment, new_type in value.morphed_comments.items()
], key=lambda morphed_comment: morphed_comment["comment"].id)
timestamp = jsonapi.v1.timestamp(value.timestamp)
return parameters.filtered(
"batches", { "id": value.id,
"is_empty": value.is_empty,
"review": value.review,
"author": value.author,
"timestamp": timestamp,
"comment": value.comment,
"created_comments": jsonapi.sorted_by_id(
value.created_comments),
"written_replies": jsonapi.sorted_by_id(
value.written_replies),
"resolved_issues": jsonapi.sorted_by_id(
value.resolved_issues),
"reopened_issues": jsonapi.sorted_by_id(
value.reopened_issues),
"morphed_comments": morphed_comments,
"reviewed_changes": jsonapi.sorted_by_id(
value.reviewed_file_changes),
"unreviewed_changes": jsonapi.sorted_by_id(
value.unreviewed_file_changes) })
@staticmethod
def single(parameters, argument):
"""Retrieve one (or more) batches in reviews.
BATCH_ID : integer
Retrieve a batch identified by its unique numeric id."""
batch = api.batch.fetch(
parameters.critic, batch_id=jsonapi.numeric_id(argument))
review = jsonapi.deduce("v1/reviews", parameters)
if review and review != batch.review:
raise jsonapi.PathError(
"Batch does not belong to specified review")
return Batches.setAsContext(parameters, batch)
@staticmethod
def multiple(parameters):
"""Retrieve all batches in the system (or review.)
review : REVIEW_ID : integer
Retrieve only batches in the specified review. Can only be used if a
review is not specified in the resource path.
author : AUTHOR : integer or string
Retrieve only batches authored by the specified user, identified by
the user's unique numeric id or user name.
unpublished : UNPUBLISHED : 'yes'
Retrieve a single batch representing the current user's unpublished
changes to a review. Must be combined with `review` and cannot be
combined with `author`."""
critic = parameters.critic
review = jsonapi.deduce("v1/reviews", parameters)
author = jsonapi.from_parameter("v1/users", "author", parameters)
unpublished_parameter = parameters.getQueryParameter("unpublished")
if unpublished_parameter is not None:
if unpublished_parameter == "yes":
if author is not None:
raise jsonapi.UsageError(
"Parameters 'author' and 'unpublished' cannot be "
"combined")
return api.batch.fetchUnpublished(critic, review)
else:
raise jsonapi.UsageError(
"Invalid 'unpublished' parameter: %r (must be 'yes')"
% unpublished_parameter)
return api.batch.fetchAll(critic, review=review, author=author)
@staticmethod
def create(parameters, value, values, data):
critic = parameters.critic
user = parameters.context.get("users", critic.actual_user)
if value or values:
raise jsonapi.UsageError("Invalid POST request")
converted = jsonapi.convert(
parameters,
{
"review?": api.review.Review,
"comment?": str,
},
data)
review = jsonapi.deduce("v1/reviews", parameters)
if not review:
if "review" not in converted:
raise jsonapi.UsageError("No review specified")
review = converted["review"]
elif "review" in converted and review != converted["review"]:
raise jsonapi.UsageError("Conflicting reviews specified")
if "comment" in converted:
comment_text = converted["comment"].strip()
if not comment_text:
raise jsonapi.UsageError("Empty comment specified")
else:
comment_text = None
result = []
def collectBatch(batch):
assert isinstance(batch, api.batch.Batch)
result.append(batch)
with api.transaction.Transaction(critic) as transaction:
modifier = transaction.modifyReview(review)
if comment_text:
note = modifier.createComment(comment_type="note",
author=critic.actual_user,
text=comment_text)
else:
note = None
modifier.submitChanges(note, callback=collectBatch)
assert len(result) == 1
return result[0], None
@staticmethod
def deduce(parameters):
batch = parameters.context.get("batches")
batch_parameter = parameters.getQueryParameter("batch")
if batch_parameter is not None:
if batch is not None:
raise jsonapi.UsageError(
"Redundant query parameter: batch=%s" % batch_parameter)
batch = api.batch.fetch(
parameters.critic, jsonapi.numeric_id(batch_parameter))
return batch
@staticmethod
def setAsContext(parameters, batch):
parameters.setContext(Batches.name, batch)
return batch
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import json
import os
import subprocess
import sys
import time
import unittest
import config
import debug
from node import Node
PACKET_VERIFICATION = int(os.getenv('PACKET_VERIFICATION', 0))
if PACKET_VERIFICATION:
from pktverify.addrs import ExtAddr
from pktverify.packet_verifier import PacketVerifier
PORT_OFFSET = int(os.getenv('PORT_OFFSET', "0"))
DEFAULT_PARAMS = {
'is_mtd': False,
'is_bbr': False,
'mode': 'rsdn',
'panid': 0xface,
'whitelist': None,
'version': '1.1',
}
"""Default configurations when creating nodes."""
EXTENDED_ADDRESS_BASE = 0x166e0a0000000000
"""Extended address base to keep U/L bit 1. The value is borrowed from Thread Test Harness."""
class NcpSupportMixin():
""" The mixin to check whether a test case supports NCP.
"""
SUPPORT_NCP = True
def __init__(self, *args, **kwargs):
if os.getenv('NODE_TYPE', 'sim') == 'ncp-sim' and not self.SUPPORT_NCP:
# 77 means skip this test case in automake tests
sys.exit(77)
super().__init__(*args, **kwargs)
class TestCase(NcpSupportMixin, unittest.TestCase):
"""The base class for all thread certification test cases.
The `topology` member of sub-class is used to create test topology.
"""
TOPOLOGY = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._start_time = None
self._do_packet_verification = PACKET_VERIFICATION and hasattr(self, 'verify')
def setUp(self):
"""Create simulator, nodes and apply configurations.
"""
self._clean_up_tmp()
self.simulator = config.create_default_simulator()
self.nodes = {}
self._initial_topology = initial_topology = {}
for i, params in self.TOPOLOGY.items():
if params:
params = dict(DEFAULT_PARAMS, **params)
else:
params = DEFAULT_PARAMS.copy()
initial_topology[i] = params
self.nodes[i] = Node(
i,
params['is_mtd'],
simulator=self.simulator,
name=params.get('name'),
version=params['version'],
is_bbr=params['is_bbr'],
)
self.nodes[i].set_panid(params['panid'])
self.nodes[i].set_mode(params['mode'])
if 'partition_id' in params:
self.nodes[i].set_partition_id(params['partition_id'])
if 'channel' in params:
self.nodes[i].set_channel(params['channel'])
if 'masterkey' in params:
self.nodes[i].set_masterkey(params['masterkey'])
if 'network_name' in params:
self.nodes[i].set_network_name(params['network_name'])
if 'router_selection_jitter' in params:
self.nodes[i].set_router_selection_jitter(params['router_selection_jitter'])
if 'router_upgrade_threshold' in params:
self.nodes[i].set_router_upgrade_threshold(params['router_upgrade_threshold'])
if 'router_downgrade_threshold' in params:
self.nodes[i].set_router_downgrade_threshold(params['router_downgrade_threshold'])
if 'timeout' in params:
self.nodes[i].set_timeout(params['timeout'])
if 'active_dataset' in params:
self.nodes[i].set_active_dataset(params['active_dataset']['timestamp'],
panid=params['active_dataset'].get('panid'),
channel=params['active_dataset'].get('channel'),
channel_mask=params['active_dataset'].get('channel_mask'),
master_key=params['active_dataset'].get('master_key'))
if 'pending_dataset' in params:
self.nodes[i].set_pending_dataset(params['pending_dataset']['pendingtimestamp'],
params['pending_dataset']['activetimestamp'],
panid=params['pending_dataset'].get('panid'),
channel=params['pending_dataset'].get('channel'))
if 'key_switch_guardtime' in params:
self.nodes[i].set_key_switch_guardtime(params['key_switch_guardtime'])
if 'key_sequence_counter' in params:
self.nodes[i].set_key_sequence_counter(params['key_sequence_counter'])
if 'network_id_timeout' in params:
self.nodes[i].set_network_id_timeout(params['network_id_timeout'])
if 'context_reuse_delay' in params:
self.nodes[i].set_context_reuse_delay(params['context_reuse_delay'])
if 'max_children' in params:
self.nodes[i].set_max_children(params['max_children'])
# we have to add whitelist after nodes are all created
for i, params in initial_topology.items():
whitelist = params['whitelist']
if not whitelist:
continue
for j in whitelist:
rssi = None
if isinstance(j, tuple):
j, rssi = j
self.nodes[i].add_whitelist(self.nodes[j].get_addr64(), rssi=rssi)
self.nodes[i].enable_whitelist()
self._inspector = debug.Inspector(self)
self._collect_test_info_after_setup()
def inspect(self):
self._inspector.inspect()
def tearDown(self):
"""Destroy nodes and simulator.
"""
if self._do_packet_verification and os.uname().sysname != "Linux":
raise NotImplementedError(
f'{self.testcase_name}: Packet Verification not available on {os.uname().sysname} (Linux only).')
if self._do_packet_verification:
time.sleep(3)
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
if self._do_packet_verification:
self._test_info['pcap'] = self._get_pcap_filename()
test_info_path = self._output_test_info()
os.environ['LD_LIBRARY_PATH'] = '/tmp/thread-wireshark'
self._verify_packets(test_info_path)
def flush_all(self):
"""Flush away all captured messages of all nodes.
"""
for i in list(self.nodes.keys()):
self.simulator.get_messages_sent_by(i)
def flush_nodes(self, nodes):
"""Flush away all captured messages of specified nodes.
Args:
nodes (list): nodes whose messages to flush.
"""
for i in nodes:
if i in list(self.nodes.keys()):
self.simulator.get_messages_sent_by(i)
def _clean_up_tmp(self):
"""
Clean up node files in tmp directory
"""
os.system(f"rm -f tmp/{PORT_OFFSET}_*.flash tmp/{PORT_OFFSET}_*.data tmp/{PORT_OFFSET}_*.swap")
def _verify_packets(self, test_info_path: str):
pv = PacketVerifier(test_info_path)
pv.add_common_vars()
self.verify(pv)
print("Packet verification passed: %s" % test_info_path, file=sys.stderr)
@property
def testcase_name(self):
return os.path.splitext(os.path.basename(sys.argv[0]))[0]
def collect_ipaddrs(self):
if not self._do_packet_verification:
return
test_info = self._test_info
for i, node in self.nodes.items():
ipaddrs = node.get_addrs()
test_info['ipaddrs'][i] = ipaddrs
mleid = node.get_mleid()
test_info['mleids'][i] = mleid
def collect_rloc16s(self):
if not self._do_packet_verification:
return
test_info = self._test_info
test_info['rloc16s'] = {}
for i, node in self.nodes.items():
test_info['rloc16s'][i] = '0x%04x' % node.get_addr16()
def collect_extra_vars(self, **vars):
if not self._do_packet_verification:
return
for k in vars.keys():
assert isinstance(k, str), k
test_vars = self._test_info.setdefault("extra_vars", {})
test_vars.update(vars)
def _collect_test_info_after_setup(self):
"""
Collect test info after setUp
"""
if not self._do_packet_verification:
return
test_info = self._test_info = {
'testcase': self.testcase_name,
'start_time': time.ctime(self._start_time),
'pcap': '',
'extaddrs': {},
'ethaddrs': {},
'ipaddrs': {},
'mleids': {},
'topology': self._initial_topology,
}
for i, node in self.nodes.items():
extaddr = node.get_addr64()
test_info['extaddrs'][i] = ExtAddr(extaddr).format_octets()
def _output_test_info(self):
"""
Output test info to json file after tearDown
"""
filename = f'{self.testcase_name}.json'
with open(filename, 'wt') as ofd:
ofd.write(json.dumps(self._test_info, indent=1, sort_keys=True))
return filename
def _get_pcap_filename(self):
current_pcap = os.getenv('TEST_NAME', 'current') + '.pcap'
return os.path.abspath(current_pcap)
def assure_run_ok(self, cmd, shell=False):
if not shell and isinstance(cmd, str):
cmd = cmd.split()
proc = subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=shell)
print(">>> %s => %d" % (cmd, proc.returncode), file=sys.stderr)
proc.check_returncode()
|
from pydub import AudioSegment
import os
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
from python_speech_features import logfbank
import scipy.io.wavfile as wav
import argparse
parser = argparse.ArgumentParser(description='Librispeech preprocess.')
parser.add_argument('root', metavar='root', type=str,
help='file path to LibriSpeech. (e.g. /usr/downloads/LibriSpeech/)')
parser.add_argument('sets', metavar='sets', type=str, nargs='+',
help='Datasets to process in LibriSpeech. (e.g. train-clean-100/)')
parser.add_argument('--n_jobs', dest='n_jobs', action='store', default=-2 ,
help='number of cpu availible for preprocessing.\n -1: use all cpu, -2: use all cpu but one')
parser.add_argument('--n_filters', dest='n_filters', action='store', default=40 ,
help='number of filters for fbank. (Default : 40)')
parser.add_argument('--win_size', dest='win_size', action='store', default=0.01 ,
help='window size during feature extraction (Default : 0.01 [10ms])')
parser.add_argument('--char_map', dest='char_map', action='store', default=None ,
help='Character2Index mapping file, generated during training data preprocessing. Specify this argument when processing dev/test data.')
paras = parser.parse_args()
root = paras.root
libri_path = paras.sets
n_jobs = paras.n_jobs
n_filters = paras.n_filters
win_size = paras.win_size
char_map_path = paras.char_map
# # flac 2 wav
def flac2wav(f_path):
flac_audio = AudioSegment.from_file(f_path, "flac")
flac_audio.export(f_path[:-5]+'.wav', format="wav")
print('Processing flac2wav...',flush=True)
print(flush=True)
file_list = []
for p in libri_path:
p = root + p
for sub_p in sorted(os.listdir(p)):
for sub2_p in sorted(os.listdir(p+sub_p+'/')):
for file in sorted(os.listdir(p+sub_p+'/'+sub2_p)):
if '.flac' in file:
file_path = p+sub_p+'/'+sub2_p+'/'+file
file_list.append(file_path)
results = Parallel(n_jobs=n_jobs,backend="threading")(delayed(flac2wav)(i) for i in tqdm(file_list))
print('done')
# # wav 2 log-mel fbank
def wav2logfbank(f_path):
(rate,sig) = wav.read(f_path)
fbank_feat = logfbank(sig,rate,winlen=win_size,nfilt=n_filters)
np.save(f_path[:-3]+'fb'+str(n_filters),fbank_feat)
print('Processing wav2logfbank...',flush=True)
print(flush=True)
results = Parallel(n_jobs=n_jobs,backend="threading")(delayed(wav2logfbank)(i[:-4]+'wav') for i in tqdm(file_list))
print('done')
# # log-mel fbank 2 feature
print('Preparing dataset...',flush=True)
file_list = []
text_list = []
for p in libri_path:
p = root + p
for sub_p in sorted(os.listdir(p)):
for sub2_p in sorted(os.listdir(p+sub_p+'/')):
# Read trans txt
with open(p+sub_p+'/'+sub2_p+'/'+sub_p+'-'+sub2_p+'.trans.txt','r') as txt_file:
for line in txt_file:
text_list.append(' '.join(line[:-1].split(' ')[1:]))
# Read acoustic feature
for file in sorted(os.listdir(p+sub_p+'/'+sub2_p)):
if '.fb'+str(n_filters) in file:
file_path = p+sub_p+'/'+sub2_p+'/'+file
file_list.append(file_path)
X = []
for f in file_list:
X.append(np.load(f))
audio_len = [len(x) for x in X]
# Sort data by signal length (long to short)
file_list = [file_list[idx] for idx in reversed(np.argsort(audio_len))]
text_list = [text_list[idx] for idx in reversed(np.argsort(audio_len))]
if char_map_path:
# Load char mapping
char_map = {}
with open(char_map_path,'r') as f:
for line in f:
if 'idx,char' in line:
continue
idx = int(line.split(',')[0])
char = line[:-1].split(',')[1]
char_map[char] = idx
else:
assert 'train' in libri_path[0]
# Create char mapping
char_map = {}
char_map['<sos>'] = 0
char_map['<eos>'] = 1
char_idx = 2
# map char to index
for text in text_list:
for char in text:
if char not in char_map:
char_map[char] = char_idx
char_idx +=1
# Reverse mapping
rev_char_map = {v:k for k,v in char_map.items()}
# Save mapping
with open(root+'idx2chap.csv','w') as f:
f.write('idx,char\n')
for i in range(len(rev_char_map)):
f.write(str(i)+','+rev_char_map[i]+'\n')
# text to index sequence
tmp_list = []
for text in text_list:
tmp = []
for char in text:
tmp.append(char_map[char])
tmp_list.append(tmp)
text_list = tmp_list
del tmp_list
# write dataset
if 'train' in libri_path[0]:
file_name = 'train.csv'
elif 'test' in libri_path[0]:
file_name = 'test.csv'
elif 'dev' in libri_path[0]:
file_name = 'dev.csv'
print('Writing dataset to'+root+file_name+'...',flush=True)
with open(root+file_name,'w') as f:
f.write('idx,input,label\n')
for i in range(len(file_list)):
f.write(str(i)+',')
f.write(file_list[i]+',')
for char in text_list[i]:
f.write(' '+str(char))
f.write('\n')
print('done')
|
import calendar
from decimal import Decimal
import datetime
import logging
from django.db.models import F, Q
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from corehq.apps.accounting.utils import ensure_domain_instance
from dimagi.utils.decorators.memoized import memoized
from corehq import Domain
from corehq.apps.accounting.exceptions import LineItemError, InvoiceError, InvoiceEmailThrottledError, BillingContactInfoError
from corehq.apps.accounting.models import (
LineItem, FeatureType, Invoice, DefaultProductPlan, Subscriber,
Subscription, BillingAccount, SubscriptionAdjustment,
SubscriptionAdjustmentMethod, BillingRecord,
BillingContactInfo, SoftwarePlanEdition, CreditLine,
)
from corehq.apps.smsbillables.models import SmsBillable
from corehq.apps.users.models import CommCareUser
from dimagi.utils.django.email import send_HTML_email
import settings
logger = logging.getLogger('accounting')
DEFAULT_DAYS_UNTIL_DUE = 30
class DomainInvoiceFactory(object):
"""
This handles all the little details when generating an Invoice.
"""
def __init__(self, date_start, date_end, domain):
"""
The Invoice generated will always be for the month preceding the
invoicing_date.
For example, if today is July 5, 2014 then the invoice will be from
June 1, 2014 to June 30, 2014.
"""
self.date_start = date_start
self.date_end = date_end
self.domain = ensure_domain_instance(domain)
self.logged_throttle_error = False
if self.domain is None:
raise InvoiceError("Domain '%s' is not a valid domain on HQ!"
% domain)
self.is_community_invoice = False
@property
def subscriber(self):
return Subscriber.objects.get_or_create(domain=self.domain.name)[0]
def get_subscriptions(self):
subscriptions = Subscription.objects.filter(
subscriber=self.subscriber, date_start__lte=self.date_end
).filter(Q(date_end=None) | Q(date_end__gt=self.date_start)
).filter(Q(date_end=None) | Q(date_end__gt=F('date_start'))
).order_by('date_start', 'date_end').all()
return list(subscriptions)
def get_community_ranges(self, subscriptions):
community_ranges = []
if len(subscriptions) == 0:
community_ranges.append((self.date_start, self.date_end))
else:
prev_sub_end = self.date_end
for ind, sub in enumerate(subscriptions):
if ind == 0 and sub.date_start > self.date_start:
# the first subscription started AFTER the beginning
# of the invoicing period
community_ranges.append((self.date_start, sub.date_start))
if prev_sub_end < self.date_end and sub.date_start > prev_sub_end:
community_ranges.append((prev_sub_end, sub.date_start))
prev_sub_end = sub.date_end
if (ind == len(subscriptions) - 1
and sub.date_end is not None
and sub.date_end <= self.date_end
):
# the last subscription ended BEFORE the end of
# the invoicing period
community_ranges.append(
(sub.date_end, self.date_end + datetime.timedelta(days=1))
)
return community_ranges
def ensure_full_coverage(self, subscriptions):
plan_version = DefaultProductPlan.get_default_plan_by_domain(
self.domain, edition=SoftwarePlanEdition.COMMUNITY
).plan.get_version()
if not plan_version.feature_charges_exist_for_domain(self.domain):
return
community_ranges = self.get_community_ranges(subscriptions)
if not community_ranges:
return
do_not_invoice = any([s.do_not_invoice for s in subscriptions])
account = BillingAccount.get_or_create_account_by_domain(
self.domain.name, created_by=self.__class__.__name__,
created_by_invoicing=True)[0]
if account.date_confirmed_extra_charges is None:
if self.domain.is_active:
subject = "[%s] Invoice Generation Issue" % self.domain.name
email_content = render_to_string(
'accounting/invoice_error_email.html', {
'project': self.domain.name,
'error_msg': "This project is incurring charges on their "
"Community subscription, but they haven't "
"agreed to the charges yet. Someone should "
"follow up with this project to see if everything "
"is configured correctly or if communication "
"needs to happen between Dimagi and the project's"
"admins. For now, the invoices generated are "
"marked as Do Not Invoice.",
}
)
send_HTML_email(
subject, settings.BILLING_EMAIL, email_content,
email_from="Dimagi Billing Bot <%s>" % settings.DEFAULT_FROM_EMAIL
)
do_not_invoice = True
if not BillingContactInfo.objects.filter(account=account).exists():
# No contact information exists for this account.
# This shouldn't happen, but if it does, we can't continue
# with the invoice generation.
raise BillingContactInfoError(
"Project %s has incurred charges, but does not have their "
"Billing Contact Info filled out. Someone should follow up "
"on this." % self.domain.name
)
# First check to make sure none of the existing subscriptions is set
# to do not invoice. Let's be on the safe side and not send a
# community invoice out, if that's the case.
for c in community_ranges:
# create a new community subscription for each
# date range that the domain did not have a subscription
community_subscription = Subscription(
account=account,
plan_version=plan_version,
subscriber=self.subscriber,
date_start=c[0],
date_end=c[1],
do_not_invoice=do_not_invoice,
)
community_subscription.save()
subscriptions.append(community_subscription)
def create_invoices(self):
subscriptions = self.get_subscriptions()
self.ensure_full_coverage(subscriptions)
for subscription in subscriptions:
self.create_invoice_for_subscription(subscription)
def create_invoice_for_subscription(self, subscription):
if subscription.is_trial:
# Don't create invoices for trial subscriptions
logger.info("[BILLING] Skipping invoicing for Subscription "
"%s because it's a trial." % subscription.pk)
return
if subscription.auto_generate_credits:
for product_rate in subscription.plan_version.product_rates.all():
CreditLine.add_credit(
product_rate.monthly_fee,
subscription=subscription,
product_type=product_rate.product.product_type,
)
days_until_due = DEFAULT_DAYS_UNTIL_DUE
if subscription.date_delay_invoicing is not None:
td = subscription.date_delay_invoicing - self.date_end
days_until_due = max(days_until_due, td.days)
date_due = self.date_end + datetime.timedelta(days_until_due)
if subscription.date_start > self.date_start:
invoice_start = subscription.date_start
else:
invoice_start = self.date_start
if (subscription.date_end is not None
and subscription.date_end <= self.date_end):
# Since the Subscription is actually terminated on date_end
# have the invoice period be until the day before date_end.
invoice_end = subscription.date_end - datetime.timedelta(days=1)
else:
invoice_end = self.date_end
invoice = Invoice(
subscription=subscription,
date_start=invoice_start,
date_end=invoice_end,
date_due=date_due,
is_hidden=subscription.do_not_invoice,
)
invoice.save()
if subscription.subscriptionadjustment_set.count() == 0:
# record that the subscription was created
SubscriptionAdjustment.record_adjustment(
subscription,
method=SubscriptionAdjustmentMethod.TASK,
invoice=invoice,
)
self.generate_line_items(invoice, subscription)
invoice.calculate_credit_adjustments()
invoice.update_balance()
invoice.save()
record = BillingRecord.generate_record(invoice)
try:
record.send_email()
except InvoiceEmailThrottledError as e:
if not self.logged_throttle_error:
logger.error("[BILLING] %s" % e)
self.logged_throttle_error = True
return invoice
def generate_line_items(self, invoice, subscription):
for product_rate in subscription.plan_version.product_rates.all():
product_factory = ProductLineItemFactory(subscription, product_rate, invoice)
product_factory.create()
for feature_rate in subscription.plan_version.feature_rates.all():
feature_factory_class = FeatureLineItemFactory.get_factory_by_feature_type(
feature_rate.feature.feature_type
)
feature_factory = feature_factory_class(subscription, feature_rate, invoice)
feature_factory.create()
class LineItemFactory(object):
"""
This generates a line item based on what type of Feature or Product rate triggers it.
"""
line_item_details_template = "" # todo
def __init__(self, subscription, rate, invoice):
self.subscription = subscription
self.rate = rate
self.invoice = invoice
@property
def unit_description(self):
"""
If this returns None then the unit unit_description, unit_cost, and quantity
will not show up for the line item in the printed invoice.
"""
return None
@property
def base_description(self):
"""
If this returns None then the unit base_description and base_cost
will not show up for the line item in the printed invoice.
"""
return None
@property
def unit_cost(self):
raise NotImplementedError()
@property
def quantity(self):
raise NotImplementedError()
@property
@memoized
def subscribed_domains(self):
if self.subscription.subscriber.organization is None and self.subscription.subscriber.domain is None:
raise LineItemError("No domain or organization could be obtained as the subscriber.")
if self.subscription.subscriber.organization is not None:
return Domain.get_by_organization(self.subscription.subscriber.organization)
return [self.subscription.subscriber.domain]
@property
@memoized
def line_item_details(self):
return []
def create(self):
line_item = LineItem(
invoice=self.invoice,
base_description=self.base_description,
unit_description=self.unit_description,
unit_cost=self.unit_cost,
quantity=self.quantity,
)
return line_item
@classmethod
def get_factory_by_feature_type(cls, feature_type):
try:
return {
FeatureType.SMS: SmsLineItemFactory,
FeatureType.USER: UserLineItemFactory,
}[feature_type]
except KeyError:
raise LineItemError("No line item factory exists for the feature type '%s" % feature_type)
class ProductLineItemFactory(LineItemFactory):
def create(self):
line_item = super(ProductLineItemFactory, self).create()
line_item.product_rate = self.rate
if not self.is_prorated:
line_item.base_cost = self.rate.monthly_fee
line_item.save()
return line_item
@property
@memoized
def is_prorated(self):
last_day = calendar.monthrange(self.invoice.date_end.year, self.invoice.date_end.month)[1]
return not (self.invoice.date_end.day == last_day and self.invoice.date_start.day == 1)
@property
def base_description(self):
if not self.is_prorated:
return _("One month of %(plan_name)s Software Plan." % {
'plan_name': self.rate.product.name,
})
@property
def unit_description(self):
if self.is_prorated:
return _("%(num_days)s day%(pluralize)s of %(plan_name)s Software Plan." % {
'num_days': self.num_prorated_days,
'pluralize': "" if self.num_prorated_days == 1 else "s",
'plan_name': self.rate.product.name,
})
@property
def num_prorated_days(self):
return self.invoice.date_end.day - self.invoice.date_start.day + 1
@property
def unit_cost(self):
if self.is_prorated:
return Decimal("%.2f" % round(self.rate.monthly_fee / 30, 2))
return Decimal('0.0')
@property
def quantity(self):
if self.is_prorated:
return self.num_prorated_days
return 1
class FeatureLineItemFactory(LineItemFactory):
def create(self):
line_item = super(FeatureLineItemFactory, self).create()
line_item.feature_rate = self.rate
line_item.save()
return line_item
@property
def unit_cost(self):
return self.rate.per_excess_fee
class UserLineItemFactory(FeatureLineItemFactory):
@property
def quantity(self):
return self.num_excess_users
@property
def num_excess_users(self):
return max(self.num_users - self.rate.monthly_limit, 0)
@property
@memoized
def num_users(self):
total_users = 0
for domain in self.subscribed_domains:
total_users += CommCareUser.total_by_domain(domain, is_active=True)
return total_users
@property
def unit_description(self):
if self.num_excess_users > 0:
return _("Per User fee exceeding monthly limit of "
"%(monthly_limit)s users." % {
'monthly_limit': self.rate.monthly_limit,
})
class SmsLineItemFactory(FeatureLineItemFactory):
@property
@memoized
def unit_cost(self):
total_excess = Decimal('0.0')
if self.is_within_monthly_limit:
return total_excess
sms_count = 0
for billable in self.sms_billables:
sms_count += 1
if sms_count <= self.rate.monthly_limit:
# don't count fees until the free monthly limit is exceeded
continue
if billable.usage_fee:
total_excess += billable.usage_fee.amount
if billable.gateway_fee:
total_excess += billable.gateway_charge
return Decimal("%.2f" % round(total_excess, 2))
@property
@memoized
def quantity(self):
return 1
@property
@memoized
def unit_description(self):
if self.is_within_monthly_limit:
return _("%(num_sms)d of %(monthly_limit)d included SMS "
"messages") % {
'num_sms': self.num_sms,
'monthly_limit': self.rate.monthly_limit,
}
if self.rate.monthly_limit == 0:
return _("%(num_sms)d SMS Message(plural)s" % {
'num_sms': self.num_sms,
'plural': '' if self.num_sms == 1 else 's',
})
num_extra = self.rate.monthly_limit - self.num_sms
return _("%(num_extra_sms)d SMS Message%(plural)s beyond "
"%(monthly_limit)d messages included." % {
'num_extra_sms': num_extra,
'plural': '' if num_extra == 0 else 's',
'monthly_limit': self.rate.monthly_limit,
})
@property
@memoized
def sms_billables_queryset(self):
return SmsBillable.objects.filter(
domain__in=self.subscribed_domains,
is_valid=True,
date_sent__range=[self.invoice.date_start, self.invoice.date_end]
).order_by('-date_sent')
@property
@memoized
def sms_billables(self):
return list(self.sms_billables_queryset)
@property
@memoized
def num_sms(self):
return self.sms_billables_queryset.count()
@property
@memoized
def is_within_monthly_limit(self):
return self.num_sms - self.rate.monthly_limit <= 0
@property
def line_item_details(self):
details = []
for billable in self.sms_billables:
gateway_api = billable.gateway_fee.criteria.backend_api_id if billable.gateway_fee else "custom"
gateway_fee = billable.gateway_charge
usage_fee = billable.usage_fee.amount if billable.usage_fee else Decimal('0.0')
total_fee = gateway_fee + usage_fee
details.append(
[billable.phone_number, billable.direction, gateway_api, total_fee]
)
return details
|
<reponame>CourtHans/401-ops-challenges
#!/usr/bin/env python3
# Script: 401 Op Challenge Day 7
# Author: <NAME>
# Date of latest revision: 10/13/20
# Purpose: Menu & execution for encryption (directory, file, string)
# Import libraries
from cryptography.fernet import Fernet
import os, math, time, datetime
# Declare variables
dir_count = 0
file_count = 0
# Declare functions
def write_key():
"""
Generates a key and save it into a file
"""
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
return key
def load_key():
"""
Loads the key from the current directory named `key.key`
"""
try:
return open("key.key", "rb").read()
except:
return None
# function to write key only if it's not already there
def if_key():
key = load_key()
if key == None:
key = write_key()
return Fernet(key)
def encrypt_message():
user_message = input("What message would you like to encrypt? ")
message_e = user_message.encode()
# initialize the Fernet class
f = if_key()
# encrypt the message
encrypted = f.encrypt(message_e)
print("Your encrypted message:")
print(encrypted)
def decrypt_message():
user_input = input("What message would you like to decrypt? ")
message_d = str.encode(user_input)
f = if_key()
# decrypt the message
decrypted = f.decrypt(message_d)
# remove the 'b' and extra ""
print("Your decrypted message:")
print(str(decrypted)[2:-1])
def encrypt_file():
f = if_key()
filename = input("Please enter the full filepath for the file you wish to encrypt? ")
with open(filename, "rb") as file:
#read file data
file_data = file.read()
# encrypt data
encrypted_file = f.encrypt(file_data)
# write the encrypted file
with open(filename, "wb") as file:
file.write(encrypted_file)
def decrypt_file():
f = if_key()
filename = input("Please enter the full filepath for the file you wish to decrypt? ")
with open(filename, "rb") as file:
# read the encrypted data
encrypted_doc = file.read()
#decrypt data
decrypted_info = f.decrypt(encrypted_doc)
# write the original file
with open (filename, "wb") as file:
file.write(decrypted_info)
def recurse_encrypt(filename):
f = if_key()
with open(filename, "rb") as file:
file_data = file.read()
encrypted_file = f.encrypt(file_data)
with open(filename, "wb") as file:
file.write(encrypted_file)
def recurse_decrypt(filename):
f = if_key()
with open(filename, "rb") as file:
encrypted_doc = file.read()
decrypted_info = f.decrypt(encrypted_doc)
with open (filename, "wb") as file:
file.write(decrypted_info)
def ask_user():
mode = input("\nWhat would you like to do?\
\nMode 1 - Encrypt a file\
\nMode 2 - Decrypt a file\
\nMode 3 - Encrypt a message\
\nMode 4 - Decrypt a message\
\nMode 5 - Encrypt a folder and its contents\
\nMode 6 - Decrypt a folder and its contents\
\n\
\nPlease enter a number: ")
if (mode== "1"):
encrypt_file()
print("...file encrypted.")
elif (mode == "2"):
decrypt_file()
print("...file decrypted.")
elif (mode == "3"):
encrypt_message()
elif (mode == "4"):
decrypt_message()
elif (mode == "5"):
print_dirContents_encrypt()
elif (mode == "6"):
print_dirContents_decrypt()
else:
print("Invalid selection...")
#Traverse & encrypt directory tree
def print_dirContents_encrypt():
global dir_count
global file_count
start_path = input("Please enter the absolute path to the directory you want to encrypt: ")
for (path,dirs,files) in os.walk(start_path):
print('Directory: {:s}'.format(path))
dir_count += 1
#Repeat for each file in directory
for file in files:
fstat = os.stat(os.path.join(path,file))
# Convert file size to MB, KB or Bytes
if (fstat.st_size > 1024 * 1024):
fsize = math.ceil(fstat.st_size / (1024 * 1024))
unit = "MB"
elif (fstat.st_size > 1024):
fsize = math.ceil(fstat.st_size / 1024)
unit = "KB"
else:
fsize = fstat.st_size
unit = "B"
mtime = time.strftime("%X %x", time.gmtime(fstat.st_mtime))
# Print file attributes
print('encrypting \t{:15.15s}{:8d} {:2s} {:18s}'.format(file,fsize,unit,mtime))
file_count += 1
filename = os.path.join(path,file)
recurse_encrypt(filename)
# Print total files and directory count
print('\nEncrypted {} files in {} directories.'.format(file_count,dir_count))
dir_count = 0
file_count = 0
#Traverse & decrypt directory tree
def print_dirContents_decrypt():
global dir_count
global file_count
start_path = input("Please enter the absolute path to the directory you want to decrypt: ")
for (path,dirs,files) in os.walk(start_path):
print('Directory: {:s}'.format(path))
dir_count += 1
#Repeat for each file in directory
for file in files:
fstat = os.stat(os.path.join(path,file))
# Convert file size to MB, KB or Bytes
if (fstat.st_size > 1024 * 1024):
fsize = math.ceil(fstat.st_size / (1024 * 1024))
unit = "MB"
elif (fstat.st_size > 1024):
fsize = math.ceil(fstat.st_size / 1024)
unit = "KB"
else:
fsize = fstat.st_size
unit = "B"
mtime = time.strftime("%X %x", time.gmtime(fstat.st_mtime))
# Print file attributes
print('decrypting \t{:15.15s}{:8d} {:2s} {:18s}'.format(file,fsize,unit,mtime))
file_count += 1
filename = os.path.join(path,file)
recurse_decrypt(filename)
# Print total files and directory count
print('\nDecrypted {} files in {} directories.'.format(file_count,dir_count))
dir_count = 0
file_count = 0
# Main
#print_dirContents()
while True:
ask_user()
y_n = input ("Try again? y/n ")
if y_n == "n":
print("Have a nice day!")
break
# resource: https://www.opentechguides.com/how-to/article/python/78/directory-file-list.html
# End |
#import time
#
#tic = time.time()
#
import rhinoscriptsyntax as rs
import json
import random
import itertools
import copy
from compas.datastructures import Mesh
from compas.datastructures import Network
from compas.utilities import geometric_key
from compas.topology import depth_first_tree
from compas.geometry import centroid_points
from compas.geometry.algorithms import discrete_coons_patch
from compas.geometry import mesh_cull_duplicate_vertices
from compas.geometry import add_vectors
from compas.geometry import subtract_vectors
from compas.geometry import scale_vector
from compas.geometry import mesh_smooth_area
from compas.geometry import mesh_smooth_centroid
from compas_rhino import get_line_coordinates
from compas_rhino.conduits import LinesConduit
from compas_rhino import MeshArtist
def find_groups(mesh):
num_edges = mesh.number_of_edges()
h_edge_pairs = make_pairs(mesh)
seen = set()
groups = []
for u,v in mesh.edges():
group = set()
for h_edge in [(u,v),(v,u)]:
start = h_edge
if start in seen:
continue
if start not in h_edge_pairs:
continue
group.add((start[0],start[1]))
group.add((start[1],start[0]))
count = 0
while True:
if h_edge_pairs[start] == None:
break
u,v = h_edge_pairs[start]
group.add((u,v))
group.add((v,u))
start = (v,u)
if (v,u) not in h_edge_pairs:
break
count += 1
if count > num_edges:
break
if len(group) > 0:
groups.append(group)
seen = seen.union(group)
edge_groups = []
for group in groups:
edges = []
for u,v in group:
if (u,v) in mesh.edges():
edges.append((u,v))
edge_groups.append(edges)
return edge_groups
def make_pairs(mesh):
h_edge_pairs = {}
for fkey in mesh.faces():
h_edges = mesh.face_halfedges(fkey)
if len(h_edges) == 4:
h_edge_pairs[h_edges[0]] = h_edges[2]
h_edge_pairs[h_edges[1]] = h_edges[3]
h_edge_pairs[h_edges[2]] = h_edges[0]
h_edge_pairs[h_edges[3]] = h_edges[1]
if len(h_edges) == 3:
flag = mesh.get_face_attribute(fkey,'corner')
if flag == 0:
h_edge_pairs[h_edges[0]] = None
h_edge_pairs[h_edges[2]] = h_edges[1]
h_edge_pairs[h_edges[1]] = h_edges[2]
elif flag == 1:
h_edge_pairs[h_edges[1]] = h_edges[0]
h_edge_pairs[h_edges[0]] = h_edges[1]
h_edge_pairs[h_edges[2]] = None
elif flag == 2:
h_edge_pairs[h_edges[2]] = h_edges[0]
h_edge_pairs[h_edges[1]] = None
h_edge_pairs[h_edges[0]] = h_edges[2]
return h_edge_pairs
def find_devisions(mesh, edge_groups, trg_len):
for edges in edge_groups:
lengths = 0
for u,v in edges:
lengths += mesh.get_edge_attribute((u,v),'length')
ave_len = lengths / len(edges)
div = max((round(ave_len / trg_len,0),1))
for u,v in edges:
crv = mesh.get_edge_attribute((u,v),'guid')
pts = rs.DivideCurve(crv,div)
mesh.set_edge_attribute((u,v),'points',pts)
edges = set(mesh.edges())
coons_meshes = []
for fkey in mesh.faces():
h_edges = mesh.face_halfedges(fkey)
# arrange point lists in circular order along edge faces
pts_coon = []
for h_edge in h_edges:
pts = mesh.get_edge_attribute(h_edge,'points')[:]
if not h_edge in edges:
pts.reverse()
if not mesh.get_edge_attribute(h_edge,'dir'):
pts.reverse()
pts_coon.append(pts)
# handle triangles correctly based on user input (flag 0 - 2)
lengths = [len(pts_coon[0]), len(pts_coon[1])]
if len(h_edges) == 4:
ab,bc,dc,ad = pts_coon
else:
flag = mesh.get_face_attribute(fkey,'corner')
if flag == 0:
ab,bc,dc,ad = pts_coon[0],pts_coon[1],[],pts_coon[2]
elif flag == 1:
ab,bc,dc,ad = pts_coon[0],[],pts_coon[1],pts_coon[2]
lengths = [len(pts_coon[0]), len(pts_coon[2])]
elif flag == 2:
ab,bc,dc,ad = pts_coon[0],pts_coon[1],pts_coon[2],[]
# reverse for coons patch (see parameters)
dc.reverse()
ad.reverse()
vertices, faces = discrete_coons_patch(ab,bc,dc,ad)
coons_meshes.append((vertices, faces, lengths))
# join al sub "meshes" of the coons patches in one mesh (with duplicate vertices)
inc = 0
mesh = Mesh()
for coons_mesh in coons_meshes:
vertices, faces, lengths = coons_mesh
a, b = lengths
indices = []
for i,pt in enumerate(vertices):
indices.append(i)
pass
indices = indices[::b] + indices[b-1::b]+ indices[:b] + indices[(a-1)*b:]
indices = set(indices)
for i,pt in enumerate(vertices):
if i in indices:
attr = {'coon_bound' : True}
else:
attr = {'coon_bound' : False}
mesh.add_vertex(i + inc, x=pt[0], y=pt[1], z=pt[2], attr_dict=attr)
for face in faces:
face = [key + inc for key in face]
mesh.add_face(face)
inc += len(vertices)
return mesh
def set_tri_corners(mesh):
dots = {}
rs.EnableRedraw(False)
for fkey in mesh.faces():
c_pts = mesh.face_coordinates(fkey)
# reverse oder to make the flags match with the corners
c_pts.reverse()
if len(c_pts) != 3:
continue
cent = mesh.face_centroid(fkey)
for i,c_pt in enumerate(c_pts):
pt = centroid_points([cent,c_pt])
dot = rs.AddTextDot('', pt)
rs.TextDotHeight(dot,6)
dots[str(dot)] = (fkey,i)
rs.EnableRedraw(True)
if not dots:
return None
dot_ids = dots.keys()
data = rs.GetObjectsEx(message="Select face dot", filter=0, preselect=False, select=False, objects=dot_ids)
rs.DeleteObjects(dot_ids)
if not data:
return None
for datum in data:
dot = datum[0]
fkey, flag = dots[str(dot)]
if flag == None:
return None
mesh.set_face_attribute(fkey,'corner',flag)
return True
def lines_from_mesh(mesh):
return [mesh.edge_coordinates(u,v) for u,v in mesh.edges()]
def group_and_mesh(mesh, trg_len):
edge_groups = find_groups(mesh)
coons_mesh = find_devisions(mesh, edge_groups, trg_len)
return coons_mesh
def mesh_cull_duplicate_vertices(mesh, precision='3f'):
"""Cull all duplicate vertices of a mesh and sanitize affected faces.
Parameters
----------
mesh : Mesh
A mesh object.
precision (str): Optional.
A formatting option that specifies the precision of the
individual numbers in the string (truncation after the decimal point).
Supported values are any float precision, or decimal integer (``'d'``).
Default is ``'3f'``.
"""
geo_keys = {}
keys_geo = {}
keys_pointer = {}
for key in mesh.vertices():
geo_key = geometric_key(mesh.vertex_coordinates(key), precision)
if geo_key in geo_keys:
keys_pointer[key] = geo_keys[geo_key]
else:
geo_keys[geo_key] = key
keys_geo[key] = geo_key
keys_remain = geo_keys.values()
keys_del = [key for key in mesh.vertices() if key not in keys_remain]
# delete vertices
for key in keys_del:
del mesh.vertex[key]
# sanitize affected faces
new_faces = {}
for fkey in mesh.faces():
face = []
seen = set()
for key in mesh.face_vertices(fkey):
if key in keys_pointer:
pointer = keys_pointer[key]
if pointer not in seen:
face.append(pointer)
seen.add(pointer)
else:
face.append(key)
if seen:
new_faces[fkey] = face
for fkey in new_faces:
mesh.delete_face(fkey)
mesh.add_face(new_faces[fkey], fkey)
def get_initial_mesh(precision):
crvs = rs.GetObjects("Select boundary curves", 4, group=True, preselect=False, select=False, objects=None, minimum_count=3, maximum_count=0)
lines = get_line_coordinates(crvs)
geo_lines = [(geometric_key(pt_u,precision), geometric_key(pt_v,precision)) for pt_u, pt_v in lines]
network = Network.from_lines(lines, precision)
if network.leaves():
return None
adjacency = {key: network.vertex_neighbours(key) for key in network.vertices()}
root = network.get_any_vertex()
ordering, predecessors, paths = depth_first_tree(adjacency, root)
if len(ordering) != network.number_of_vertices():
return None
mesh = Mesh.from_lines(lines, delete_boundary_face=True, precision=precision)
rs.EnableRedraw(False)
dots = {}
for fkey in mesh.faces():
cent = mesh.face_centroid(fkey)
dot = rs.AddTextDot('', cent)
rs.TextDotHeight(dot,6)
dots[str(dot)] = fkey
rs.EnableRedraw(True)
if not dots:
return None
dot_ids = dots.keys()
data = rs.GetObjectsEx(message="Select face for openings", filter=0, preselect=False, select=False, objects=dot_ids)
rs.DeleteObjects(dot_ids)
if data:
for datum in data:
dot = datum[0]
fkey = dots[str(dot)]
mesh.delete_face(fkey)
geo_edges = []
for u,v, attr in mesh.edges(True):
pt_u, pt_v = mesh.edge_coordinates(u,v)
geo_u, geo_v = geometric_key(pt_u,precision), geometric_key(pt_v,precision)
for i, geo_l_uv in enumerate(geo_lines):
geo_l_u, geo_l_v = geo_l_uv[0], geo_l_uv[1]
if (geo_l_u == geo_u) and (geo_l_v == geo_v):
attr['dir'] = True
elif (geo_l_u == geo_v) and (geo_l_v == geo_u):
attr['dir'] = False
else: continue
attr['guid'] = str(crvs[i])
attr['length'] = rs.CurveLength(crvs[i])
# initiate flag for corners
for fkey, attr in mesh.faces(True):
mesh.set_face_attribute(fkey,'corner',0)
return mesh, crvs
if __name__ == '__main__':
#user inputs
#----------------------------------------
#----------------------------------------
precision = '3f'
trg_len = 0.75
mesh, crvs = get_initial_mesh(precision)
#----------------------------------------
#----------------------------------------
coons_mesh = group_and_mesh(mesh, trg_len)
mesh_lines = lines_from_mesh(coons_mesh)
try:
conduit = LinesConduit(mesh_lines)
conduit.Enabled = True
while True:
if not set_tri_corners(mesh):
break
if not trg_len:
break
coons_mesh = group_and_mesh(mesh, trg_len)
mesh_lines = lines_from_mesh(coons_mesh)
conduit.lines = mesh_lines
conduit.redraw()
except Exception as e:
print(e)
finally:
conduit.Enabled = False
del conduit
mesh_cull_duplicate_vertices(coons_mesh, precision)
#fixed = coons_mesh.vertices_on_boundary()
#mesh_smooth_area(coons_mesh, fixed=fixed, kmax=25,damping=0.5)
#mesh_smooth_centroid(coons_mesh, fixed=fixed, kmax=5,damping=0.5)
artist = MeshArtist(coons_mesh, layer='form_quad')
artist.draw_edges()
artist.draw_vertices()
#artist.draw_faces()
artist.redraw()
|
<reponame>archieio/tele2-prof
import math
import re
import time
from colorama import Fore
from app.api import Tele2Api
def input_lots(data_left, display_name, min_amount, max_multiplier,
price_multiplier, lot_type):
lots_to_sell = []
index = 1
while data_left >= min_amount:
user_input = input(Fore.WHITE + f'\t{display_name}s lot {index} >>> ')
if user_input == '':
break
if not re.match(r'^\s*\d+\s*(\s\d+\s*)?$', user_input):
print(Fore.MAGENTA + '\tIncorrect input format. Try again')
continue
clean = re.sub(r'\s+', ' ', user_input.strip())
lot_data = list(map(int, clean.split(' ')))
amount = lot_data[0]
if amount < min_amount:
print(Fore.RED +
f'\tOops: {display_name.capitalize()} lot amount must be '
f'> {min_amount}')
continue
elif amount > data_left:
print(Fore.RED + f'\tOops: You only have {data_left} left')
continue
if len(lot_data) == 1:
price = math.ceil(amount * price_multiplier)
else:
price = lot_data[1]
if price < math.ceil(amount * price_multiplier):
print(Fore.RED +
f'\tOops: {display_name.capitalize()} lot price must be >='
f' ({price_multiplier} * amount)')
continue
elif price > max_multiplier * amount:
print(Fore.RED +
f'\tOops: {display_name.capitalize()} lot price must be <='
f' ({max_multiplier} * amount)')
continue
print(Fore.GREEN +
f'\t\tOk! Lot {index}: {amount} {display_name[:3]}.'
f' for {price} rub.')
data_left -= amount
print(f'\t\t({data_left} {display_name[:3]}. left)')
lots_to_sell.append({
'name': display_name[:3],
'lot_type': lot_type,
'amount': amount,
'price': price,
})
index += 1
return lots_to_sell
async def prepare_lots(rests):
lots_to_sell = []
if rests['voice'] >= 50:
print(Fore.YELLOW + '1. Prepare minute lots:')
lots_to_sell += input_lots(data_left=rests['voice'],
display_name='minute',
min_amount=50,
max_multiplier=2,
price_multiplier=0.8,
lot_type='voice'
)
if rests['data'] >= 1:
print(Fore.GREEN + '2. Prepare gigabyte lots:')
lots_to_sell += input_lots(data_left=rests['data'],
display_name='gigabyte',
min_amount=1,
max_multiplier=50,
price_multiplier=15,
lot_type='data'
)
return lots_to_sell
def print_prepared_lots(prepared_lots):
count = len(prepared_lots)
if count:
print(Fore.LIGHTMAGENTA_EX +
f'Ok. You have prepared {count} lot{"s" if count > 1 else ""}:')
for lot in prepared_lots:
color = Fore.YELLOW if lot['lot_type'] == 'voice' else Fore.GREEN
print(color + f'\t{lot["amount"]} {lot["name"]} '
f'for {lot["price"]} rub')
def prepare_old_lots(old_lots: list):
lots = []
for lot in old_lots:
lots.append({
'lot_type': lot['trafficType'],
'amount': lot['volume']['value'],
'price': lot['cost']['amount'],
})
return lots
def get_if_status_is_ok(response):
return True if response['meta']['status'] == 'OK' else False
def print_lot_listing_status(response):
if get_if_status_is_ok(response):
color = Fore.YELLOW if response['data']['trafficType'] == 'voice' \
else Fore.GREEN
amount = response['data']['volume']['value']
uom = response['data']['volume']['uom']
cost = response['data']['cost']['amount']
print(color +
f'Successful listing {amount} {uom} for {cost} rub.')
else:
print(Fore.RED +
f'Error during listing... Trying Again')
async def try_sell_infinite_times(api: Tele2Api, lot: any):
while True:
response = await api.sell_lot(lot)
status_is_ok = get_if_status_is_ok(response)
print_lot_listing_status(response)
if status_is_ok:
break
else:
time.sleep(3)
continue
async def sell_prepared_lots(api: Tele2Api, lots: list):
for lot in lots:
await try_sell_infinite_times(api, lot)
|
<gh_stars>0
################################################################################
# Exploratory Data Analysis
# columns list(sample)
# sample.drop('is_listened', 1)
# yes = sample.loc[sample['is_listened'] == 1]
# no = sample.loc[sample['is_listened'] == 0]
################################################################################
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from preprocess import preprocess_helper as helper
# plt.style.use('seaborn-deep')
# read the sample
sample = helper.preprocess_default('data/train_sample_0.csv')
def age_skip_song():
# relationship between age and skip song
age_listen = sample[['user_age', 'is_listened']]
ages = age_listen['user_age'].unique()
ages.sort()
table = age_listen.groupby(['user_age', 'is_listened']).size()
table = table.sort_index(level='is_listened')
result = table.as_matrix()
plt.bar(ages, result[13:], width=0.5, color='g', align='center')
plt.bar(ages+0.5, result[:13], width=0.5, color='r', align='center')
plt.legend(('Is Listened', 'Not Listened'), loc='upper right')
plt.show()
def times():
# At which time the people hear the songs
hour = sample[['hour']]
hours = hour['hour'].unique()
hours.sort()
table = hour.groupby(['hour']).size()
table = table.sort_index(level='hour')
result = table.as_matrix()
plt.bar(hours, result, width=0.5, color='g', align='center')
plt.legend("People hearing music", loc='upper right')
plt.xlim(0, 24)
plt.show()
#---
def histogram(sample, column_name):
"""Create a histogram"""
column = sample[[column_name]]
values = column[column_name].unique()
values.sort()
#print(values)
table = column.groupby([column_name]).size()
table = table.sort_index(level=column_name)
#print(table)
result = table.as_matrix()
plt.bar(values, result, width=0.5, color='g', align='center')
plt.legend("Title", loc='upper right')
# plt.xlim(values[0], values[-1])
plt.show()
def draw_platform_family_by_age(id, title):
"""draws how many users listened or not a track in a certain
platform_family by age"""
sub_sample = sample[['platform_family','user_age','is_listened']]
sub_sample = sub_sample[(sub_sample.platform_family == id)]
ages = sub_sample['user_age'].unique()
ages.sort()
table = sub_sample.groupby(['platform_family','user_age', 'is_listened']).size()
table = table.sort_index(level='is_listened')
table_matrix = table.as_matrix()
plt.title(title)
# bug: when the number of ages is not exactly 13, but with all the data set, less probable.
plt.bar(ages, table_matrix[13:], width=0.5, color='g', align='center')
plt.bar(ages+0.5, table_matrix[:13], width=0.5, color='r', align='center')
plt.legend(('Is Listened', 'Not Listened'), loc='upper right')
plt.ylabel("Quantity");
plt.xlabel("Ages");
plt.show()
# histogram(sample, 'release_year')
# times()
# age_skip_song()
# for platform_family 0
draw_platform_family_by_age(0, "Platform Family 0")
# for platform_family 1
draw_platform_family_by_age(1, "Platform Family 1")
# for platform_family 2
draw_platform_family_by_age(2, "Platform Family 2")
|
from vnc_api_test import *
from tcutils.config.vnc_introspect_utils import *
from tcutils.config.svc_mon_introspect_utils import SvcMonInspect
from tcutils.control.cn_introspect_utils import *
from tcutils.agent.vna_introspect_utils import *
from tcutils.collector.opserver_introspect_utils import *
from tcutils.collector.analytics_tests import *
from tcutils.config.kube_manager_introspect_utils import KubeManagerInspect
from vnc_api.vnc_api import *
from tcutils.vdns.dns_introspect_utils import DnsAgentInspect
from tcutils.util import custom_dict, get_plain_uuid
import os
from openstack import OpenstackAuth, OpenstackOrchestrator
from vcenter import VcenterAuth, VcenterOrchestrator
from common.contrail_test_init import ContrailTestInit
from vcenter_gateway import VcenterGatewayOrch
try:
from tcutils.kubernetes.api_client import Client as Kubernetes_client
except ImportError:
pass
try:
from webui.ui_login import UILogin
except ImportError:
pass
class ContrailConnections():
def __init__(self, inputs=None, logger=None, project_name=None,
username=None, password=<PASSWORD>, domain_name=None, input_file=None, domain_obj=None,scope='domain'):
self.inputs = inputs or ContrailTestInit(input_file,
stack_tenant=project_name)
self.project_name = project_name or self.inputs.project_name
self.domain_name = domain_name or self.inputs.domain_name
self.orch_domain_name = domain_name or self.inputs.domain_name
if self.orch_domain_name == 'Default':
self.domain_name = 'default-domain'
self.scope = scope
self.username = username or self.inputs.stack_user
self.password = password or self.inputs.stack_password
self.logger = logger or self.inputs.logger
self.nova_h = None
self.quantum_h = None
self.vnc_lib_fixture = None
self.api_server_inspects = custom_dict(self.get_api_inspect_handle,
'api_inspect:'+self.project_name+':'+self.username)
self.dnsagent_inspect = custom_dict(self.get_dns_agent_inspect_handle,
'dns_inspect')
self.agent_inspect = custom_dict(self.get_vrouter_agent_inspect_handle,
'agent_inspect')
self.ops_inspects = custom_dict(self.get_opserver_inspect_handle,
'ops_inspect:'+self.project_name+':'+self.username)
self.cn_inspect = custom_dict(self.get_control_node_inspect_handle,
'cn_inspect')
self.k8s_client = self.get_k8s_api_client_handle()
# ToDo: msenthil/sandipd rest of init needs to be better handled
self.domain_id = None
if self.inputs.domain_isolation:
#get admin auth to list domains and get domain_id
auth = self.get_auth_h(username = self.inputs.admin_username,
password=<PASSWORD>,
project_name=self.inputs.admin_tenant,
domain_name=self.inputs.admin_domain)
self.domain_id = auth.get_domain_id(self.domain_name)
self.auth = self.get_auth_h()
self.vnc_lib = self.get_vnc_lib_h()
self.project_id = self.get_project_id()
if self.inputs.orchestrator == 'openstack':
if self.inputs.verify_thru_gui():
self.ui_login = UILogin(self, self.inputs, project_name, username, password)
self.browser = self.ui_login.browser
self.browser_openstack = self.ui_login.browser_openstack
self.orch = OpenstackOrchestrator(inputs=self.inputs,
vnclib=self.vnc_lib,
logger=self.logger,
auth_h=self.auth
)
self.nova_h = self.orch.get_compute_handler()
self.quantum_h = self.orch.get_network_handler()
self.glance_h = self.orch.get_image_handler()
elif self.inputs.orchestrator == 'vcenter': # vcenter
self.orch = VcenterOrchestrator(user=self.username,
pwd=<PASSWORD>,
host=self.inputs.auth_ip,
port=self.inputs.auth_port,
dc_name=self.inputs.vcenter_dc,
vnc=self.vnc_lib,
inputs=self.inputs,
logger=self.logger)
elif self.inputs.orchestrator == 'kubernetes':
self.orch = None
if self.inputs.vcenter_gw_setup: # vcenter_gateway
self.slave_orch = VcenterGatewayOrch(user=self.inputs.vcenter_username,
pwd=<PASSWORD>,
host=self.inputs.vcenter_server,
port=int(self.inputs.vcenter_port),
dc_name=self.inputs.vcenter_dc,
vnc=self.vnc_lib,
inputs=self.inputs,
logger=self.logger)
self._kube_manager_inspect = None
# end __init__
def get_project_id(self, project_name=None):
project_name = project_name or self.project_name
auth = self.get_auth_h(project_name=project_name)
if auth:
return auth.get_project_id(project_name or self.project_name,
self.domain_id)
else:
return self.vnc_lib_fixture.project_id if self.vnc_lib_fixture else None
def get_auth_h(self, refresh=False, project_name=None,
username=None, password=None, domain_name=None):
project_name = project_name or self.project_name
username = username or self.username
password = password or <PASSWORD>
attr = '_auth_'+project_name+'_'+username
if not getattr(env, attr, None) or refresh:
if self.inputs.orchestrator == 'openstack':
env[attr] = OpenstackAuth(username, password,
project_name, self.inputs, self.logger,
domain_name=domain_name or self.orch_domain_name,
scope=self.scope)
elif self.inputs.orchestrator == 'vcenter':
env[attr] = VcenterAuth(username, password,
project_name, self.inputs)
# elif self.inputs.orchestrator == 'kubernetes':
# env[attr] = self.get_k8s_api_client_handle()
return env.get(attr)
def get_vnc_lib_h(self, refresh=False):
attr = '_vnc_lib_fixture_' + self.project_name + '_' + self.username
cfgm_ip = self.inputs.api_server_ip or \
self.inputs.contrail_external_vip or self.inputs.cfgm_ip
if not getattr(env, attr, None) or refresh:
if self.inputs.orchestrator == 'openstack' :
domain = self.orch_domain_name
else:
domain = self.domain_name
env[attr] = VncLibFixture(
username=self.username, password=<PASSWORD>,
domain=domain, project_name=self.project_name,
inputs=self.inputs,
cfgm_ip=cfgm_ip,
api_server_port=self.inputs.api_server_port,
auth_server_ip=self.inputs.auth_ip,
orchestrator=self.inputs.orchestrator,
project_id=self.get_project_id(),
certfile = self.inputs.keystonecertfile,
keyfile = self.inputs.keystonekeyfile,
cacert = self.inputs.certbundle,
insecure = self.inputs.insecure,
logger=self.logger)
env[attr].setUp()
self.vnc_lib_fixture = env[attr]
self.vnc_lib = self.vnc_lib_fixture.get_handle()
return self.vnc_lib
def get_api_inspect_handle(self, host):
cfgm_ip = self.inputs.api_server_ip or self.inputs.contrail_external_vip
if cfgm_ip:
host = cfgm_ip
if host not in self.api_server_inspects:
self.api_server_inspects[host] = VNCApiInspect(host,
inputs=self.inputs,
protocol=self.inputs.api_protocol,
logger=self.logger)
return self.api_server_inspects[host]
def get_control_node_inspect_handle(self, host):
if host not in self.cn_inspect:
self.cn_inspect[host] = ControlNodeInspect(host,
self.inputs.bgp_port,
logger=self.logger)
return self.cn_inspect[host]
def get_dns_agent_inspect_handle(self, host):
if host not in self.dnsagent_inspect:
self.dnsagent_inspect[host] = DnsAgentInspect(host,
self.inputs.dns_port,
logger=self.logger)
return self.dnsagent_inspect[host]
def get_vrouter_agent_inspect_handle(self, host):
if host not in self.agent_inspect:
self.agent_inspect[host] = AgentInspect(host,
port=self.inputs.agent_port,
logger=self.logger)
return self.agent_inspect[host]
def get_opserver_inspect_handle(self, host):
#ToDo: WA till scripts are modified to use ip rather than hostname
ip = host if is_v4(host) else self.inputs.get_host_ip(host)
collector_ip = self.inputs.analytics_api_ip or \
self.inputs.contrail_external_vip
if collector_ip:
ip = collector_ip
if ip not in self.ops_inspects:
self.ops_inspects[ip] = VerificationOpsSrv(ip,
port=self.inputs.analytics_api_port,
logger=self.logger,
inputs=self.inputs)
return self.ops_inspects[ip]
def get_k8s_api_client_handle(self):
if self.inputs.orchestrator != 'kubernetes' and self.inputs.slave_orchestrator != 'kubernetes':
return None
if not getattr(self, 'k8s_client', None):
self.k8s_client = Kubernetes_client(self.inputs.kube_config_file,
self.logger)
return self.k8s_client
# end get_k8s_api_client_handle
def get_svc_mon_h(self, refresh=False):
if not getattr(self, '_svc_mon_inspect', None) or refresh:
for cfgm_ip in self.inputs.cfgm_ips:
#contrail-status would increase run time hence netstat approach
cmd = 'netstat -antp | grep :8088 | grep LISTEN'
if 'LISTEN' in self.inputs.run_cmd_on_server(cfgm_ip, cmd, container='svc-monitor'):
self._svc_mon_inspect = SvcMonInspect(cfgm_ip,
logger=self.logger)
break
return self._svc_mon_inspect
def get_kube_manager_h(self, refresh=False):
if not getattr(self, '_kube_manager_inspect', None) or refresh:
for km_ip in self.inputs.kube_manager_ips:
#contrail-status would increase run time hence netstat approach
cmd = 'netstat -antp | grep :8108 | grep LISTEN'
if 'LISTEN' in self.inputs.run_cmd_on_server(km_ip, cmd,
container='contrail-kube-manager'):
self._kube_manager_inspect = KubeManagerInspect(km_ip,
logger=self.logger)
break
return self._kube_manager_inspect
# end get_kube_manager_h
@property
def api_server_inspect(self):
if not getattr(self, '_api_server_inspect', None):
self._api_server_inspect = self.api_server_inspects[
self.inputs.cfgm_ips[0]]
return self._api_server_inspect
@api_server_inspect.setter
def api_server_inspect(self, value):
self._api_server_inspect = value
@property
def ops_inspect(self):
if not getattr(self, '_ops_inspect', None):
self._ops_inspect = self.ops_inspects[self.inputs.collector_ips[0]]
return self._ops_inspect
@ops_inspect.setter
def ops_inspect(self, value):
self._ops_inspect = value
@property
def analytics_obj(self):
if not getattr(self, '_analytics_obj', None):
self._analytics_obj = AnalyticsVerification(self.inputs,
self.cn_inspect, self.agent_inspect,
self.ops_inspects, logger=self.logger)
return self._analytics_obj
@analytics_obj.setter
def analytics_obj(self, value):
self._analytics_obj = value
def update_inspect_handles(self):
self.api_server_inspects.clear()
self.cn_inspect.clear()
self.dnsagent_inspect.clear()
self.agent_inspect.clear()
self.ops_inspects.clear()
self._svc_mon_inspect = None
self._api_server_inspect = None
self._ops_inspect = None
self._analytics_obj = None
self._kube_manager_inspect = None
# end update_inspect_handles
def update_vnc_lib_fixture(self):
self.vnc_lib = self.get_vnc_lib_h(refresh=True)
# end update_vnc_lib_fixture()
def set_vrouter_config_encap(self, encap1=None, encap2=None, encap3=None):
return self.update_vrouter_config_encap(encap1, encap2, encap3, create=True)
# end set_vrouter_config_encap
def update_vrouter_config_encap(self, encap1=None, encap2=None, encap3=None, create=False):
'''Used to change the existing encapsulation priorities to new values'''
if not (encap1 and encap2 and encap3):
return self.delete_vrouter_encap()
try:
# Reading Existing config
current_config = self.vnc_lib.global_vrouter_config_read(
fq_name=['default-global-system-config',
'default-global-vrouter-config'])
except NoIdError as e:
self.logger.exception('No config id found. Creating new one')
if not create:
raise
conf_obj = GlobalVrouterConfig()
self.vnc_lib.global_vrouter_config_create(conf_obj)
encaps_obj = EncapsulationPrioritiesType(
encapsulation=[encap1, encap2, encap3])
confs_obj = GlobalVrouterConfig(encapsulation_priorities=encaps_obj)
result = self.vnc_lib.global_vrouter_config_update(confs_obj)
return result
# end update_vrouter_config_encap
def delete_vrouter_encap(self):
try:
conf_id = self.vnc_lib.get_default_global_vrouter_config_id()
obj = self.vnc_lib.global_vrouter_config_read(id=conf_id)
encap_obj = obj.get_encapsulation_priorities()
if not encap_obj:
return ['', '', '']
encaps = encap_obj.encapsulation
l = len(encaps)
encaps.extend([''] * (3 - l))
obj.set_encapsulation_priorities(None)
self.vnc_lib.global_vrouter_config_update(obj)
return encaps
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
return (errmsg)
# end delete_vrouter_encap
def read_vrouter_config_encap(self):
result = None
try:
conf_id = self.vnc_lib.get_default_global_vrouter_config_id()
config_parameters = self.vnc_lib.global_vrouter_config_read(id=conf_id)
obj = config_parameters.get_encapsulation_priorities()
if not obj:
return ['', '', '']
else:
return obj.encapsulation
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
return result
# end read_vrouter_config_encap
def set_vrouter_config_evpn(self, evpn_status=True):
self.obj = self.vnc_lib
# Check if already configured
try:
conf_id = self.obj.get_default_global_vrouter_config_id()
self.obj.global_vrouter_config_delete(id=conf_id)
except Exception:
msg = "No config id found. Configuring new one"
self.logger.info(msg)
pass
if evpn_status == True:
conf_obj = GlobalVrouterConfig(evpn_status=True)
else:
conf_obj = GlobalVrouterConfig(evpn_status=False)
result = self.obj.global_vrouter_config_create(conf_obj)
return result
# end set_vrouter_config_evpn
def update_vrouter_config_evpn(self, evpn_status=True):
self.obj = self.vnc_lib
if evpn_status == True:
conf_obj = GlobalVrouterConfig(evpn_status=True)
else:
conf_obj = GlobalVrouterConfig(evpn_status=False)
result = self.obj.global_vrouter_config_update(conf_obj)
return result
# end update_vrouter_config_evpn
def delete_vrouter_config_evpn(self):
try:
self.obj = self.vnc_lib
conf_id = self.obj.get_default_global_vrouter_config_id()
self.obj.global_vrouter_config_delete(id=conf_id)
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
# end delete_vrouter_config_evpn
def read_vrouter_config_evpn(self):
result = False
try:
self.obj = self.vnc_lib
conf_id = self.obj.get_default_global_vrouter_config_id()
out = self.obj.global_vrouter_config_read(id=conf_id)
if 'evpn_status' in out.__dict__.keys():
result = out.evpn_status
except NoIdError:
errmsg = "No config id found"
self.logger.info(errmsg)
return result
# end read_vrouter_config_evpn
|
<reponame>GillesArcas/Advent_of_Code<gh_stars>0
import re
from collections import defaultdict
EXAMPLES1 = (
('22-exemple0.txt', 39),
('22-exemple1.txt', 590784),
('22-exemple1-short.txt', 590784),
('22-exemple2.txt', 474140),
)
EXAMPLES2 = (
('22-exemple0.txt', 39),
('22-exemple1-short.txt', 590784),
('22-exemple2.txt', 2758514936282235),
)
INPUT = '22.txt'
def read_list(fn):
steps = list()
with open(fn) as f:
for line in f:
match = re.match(r'(o[nf]+) x=(-?\d+)\.\.(-?\d+),y=(-?\d+)\.\.(-?\d+),z=(-?\d+)\.\.(-?\d+)', line)
on, x1, x2, y1, y2, z1, z2 = match.group(1, 2, 3, 4, 5, 6, 7)
steps.append((int(on == 'on'), int(x1), int(x2), int(y1), int(y2), int(z1), int(z2)))
return steps
def code1(steps):
cubes = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for on, x1, x2, y1, y2, z1, z2 in steps:
if -50 <= x1 <= x2 <= 50 and -50 <= y1 <= y2 <= 50 and -50 <= z1 <= z2 <= 50:
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
for z in range(z1, z2 + 1):
cubes[x][y][z] = on
count = 0
for square in cubes.values():
for line in square.values():
count += sum(line.values())
return count
def inter_seg(x1, x2, x1_, x2_):
if x1 == x1_ and x2 == x2_:
return (x1, x2), None, None
else:
min1 = min(x1, x1_)
max1 = max(x1, x1_)
min2 = min(x2, x2_)
max2 = max(x2, x2_)
if max1 > min2:
return None, (x1, x2), (x1_, x2_)
elif x1 == x1_:
return (max1, min2), None, (min2 + 1, max2)
elif x2 == x2_:
return (max1, min2), (min1, max1 - 1), None
else:
return (max1, min2), (min1, max1 - 1), (min2 + 1, max2)
def included(coord1, coord2):
x11, x12, y11, y12, z11, z12 = coord1
x21, x22, y21, y22, z21, z22 = coord2
return x21 <= x11 and x12 <= x22 and y21 <= y11 and y12 <= y22 and z21 <= z11 and z12 <= z22
def inter_cuboid(cuboid1, cuboid2):
# cuboid1 = step cuboid
on1, x11, x12, y11, y12, z11, z12 = cuboid1
on2, x21, x22, y21, y22, z21, z22 = cuboid2
interx, interx2, interx3 = inter_seg(x11, x12, x21, x22)
intery, intery2, intery3 = inter_seg(y11, y12, y21, y22)
interz, interz2, interz3 = inter_seg(z11, z12, z21, z22)
new_cuboids = set()
if interx is None or intery is None or interz is None:
new_cuboids.add(cuboid2)
else:
for segx in (interx, interx2, interx3):
if segx is None:
continue
for segy in (intery, intery2, intery3):
if segy is None:
continue
for segz in (interz, interz2, interz3):
if segz is None:
continue
if included((*segx, *segy, *segz), cuboid2[1:]) and not included((*segx, *segy, *segz), cuboid1[1:]):
new_cuboids.add((on2, *segx, *segy, *segz))
else:
pass
return new_cuboids
def vol_cuboid(cuboid):
on, x1, x2, y1, y2, z1, z2 = cuboid
return (x2 - x1 + 1) * (y2 - y1 + 1) * (z2 - z1 + 1)
def code2(steps):
cuboids = list()
cuboids.append(steps[0])
for step in steps[1:]:
new_cuboids = set()
for cuboid in cuboids:
new_cuboids.update(inter_cuboid(step, cuboid))
if step[0]:
new_cuboids.add(step)
cuboids = new_cuboids
return sum(vol_cuboid(cuboid) for cuboid in cuboids)
def test(n, code, examples, myinput):
for fn, expected in examples:
data = read_list(fn)
result = code(data)
assert result == expected, (data, expected, result)
print(f'{n}>', code(read_list(myinput)))
test(1, code1, EXAMPLES1, INPUT)
test(2, code2, EXAMPLES2, INPUT)
|
<gh_stars>10-100
from design_bench.datasets.discrete_dataset import DiscreteDataset
from design_bench.disk_resource import DiskResource, SERVER_URL
"""
chembl-AC50-CHEMBL1741322/chembl-x-0.npy 1190
chembl-ALB-CHEMBL3885882/chembl-x-0.npy 1096
chembl-ALP-CHEMBL3885882/chembl-x-0.npy 1096
chembl-ALT-CHEMBL3885882/chembl-x-0.npy 1096
chembl-AST-CHEMBL3885882/chembl-x-0.npy 1096
chembl-BASOLE-CHEMBL3885882/chembl-x-0.npy 1096
chembl-BILI-CHEMBL3885882/chembl-x-0.npy 1093
chembl-BUN-CHEMBL3885882/chembl-x-0.npy 1096
chembl-CHLORIDE-CHEMBL3885882/chembl-x-0.npy 1096
chembl-CHOL-CHEMBL3885882/chembl-x-0.npy 1096
chembl-CK-CHEMBL3885882/chembl-x-0.npy 1014
chembl-CREAT-CHEMBL3885882/chembl-x-0.npy 1096
chembl-EOSLE-CHEMBL3885882/chembl-x-0.npy 1096
chembl-GI50-CHEMBL1963844/chembl-x-0.npy 5049
chembl-GI50-CHEMBL1963848/chembl-x-0.npy 1900
chembl-GI50-CHEMBL1963854/chembl-x-0.npy 5503
chembl-GI50-CHEMBL1963860/chembl-x-0.npy 4757
chembl-GI50-CHEMBL1963866/chembl-x-0.npy 5068
chembl-GI50-CHEMBL1963868/chembl-x-0.npy 5344
chembl-GI50-CHEMBL1963874/chembl-x-0.npy 5419
chembl-GI50-CHEMBL1963876/chembl-x-0.npy 5377
chembl-GI50-CHEMBL1963880/chembl-x-0.npy 5340
chembl-GI50-CHEMBL1963882/chembl-x-0.npy 3596
chembl-GI50-CHEMBL1963885/chembl-x-0.npy 3578
chembl-GI50-CHEMBL1963887/chembl-x-0.npy 2020
chembl-GI50-CHEMBL1963889/chembl-x-0.npy 1859
chembl-GI50-CHEMBL1963895/chembl-x-0.npy 5167
chembl-GI50-CHEMBL1963900/chembl-x-0.npy 3311
chembl-GI50-CHEMBL1963901/chembl-x-0.npy 5282
chembl-GI50-CHEMBL1963903/chembl-x-0.npy 5344
chembl-GI50-CHEMBL1963911/chembl-x-0.npy 5004
chembl-GI50-CHEMBL1963921/chembl-x-0.npy 3570
chembl-GI50-CHEMBL1963922/chembl-x-0.npy 5438
chembl-GI50-CHEMBL1963929/chembl-x-0.npy 5404
chembl-GI50-CHEMBL1963935/chembl-x-0.npy 1541
chembl-GI50-CHEMBL1963945/chembl-x-0.npy 3327
chembl-GI50-CHEMBL1963953/chembl-x-0.npy 5249
chembl-GI50-CHEMBL1963954/chembl-x-0.npy 1925
chembl-GI50-CHEMBL1963960/chembl-x-0.npy 5379
chembl-GI50-CHEMBL1963961/chembl-x-0.npy 5187
chembl-GI50-CHEMBL1963963/chembl-x-0.npy 1988
chembl-GI50-CHEMBL1963976/chembl-x-0.npy 3484
chembl-GI50-CHEMBL1963981/chembl-x-0.npy 5430
chembl-GI50-CHEMBL1963985/chembl-x-0.npy 5458
chembl-GI50-CHEMBL1963989/chembl-x-0.npy 5332
chembl-GI50-CHEMBL1963990/chembl-x-0.npy 5364
chembl-GI50-CHEMBL1963991/chembl-x-0.npy 4645
chembl-GI50-CHEMBL1963994/chembl-x-0.npy 4968
chembl-GI50-CHEMBL1964004/chembl-x-0.npy 4985
chembl-GI50-CHEMBL1964006/chembl-x-0.npy 5460
chembl-GI50-CHEMBL1964007/chembl-x-0.npy 5377
chembl-GI50-CHEMBL1964009/chembl-x-0.npy 4883
chembl-GI50-CHEMBL1964012/chembl-x-0.npy 5351
chembl-GI50-CHEMBL1964014/chembl-x-0.npy 1561
chembl-GI50-CHEMBL1964017/chembl-x-0.npy 3558
chembl-GI50-CHEMBL1964018/chembl-x-0.npy 5020
chembl-GI50-CHEMBL1964021/chembl-x-0.npy 5356
chembl-GI50-CHEMBL1964025/chembl-x-0.npy 5053
chembl-GI50-CHEMBL1964030/chembl-x-0.npy 5379
chembl-GI50-CHEMBL1964034/chembl-x-0.npy 4679
chembl-GI50-CHEMBL1964037/chembl-x-0.npy 5222
chembl-GI50-CHEMBL1964040/chembl-x-0.npy 5354
chembl-GI50-CHEMBL1964043/chembl-x-0.npy 5271
chembl-GI50-CHEMBL1964045/chembl-x-0.npy 3639
chembl-GI50-CHEMBL1964047/chembl-x-0.npy 5487
chembl-GI50-CHEMBL1964048/chembl-x-0.npy 5460
chembl-GI50-CHEMBL1964049/chembl-x-0.npy 5266
chembl-GI50-CHEMBL1964059/chembl-x-0.npy 5396
chembl-GI50-CHEMBL1964062/chembl-x-0.npy 1905
chembl-GI50-CHEMBL1964063/chembl-x-0.npy 3494
chembl-GI50-CHEMBL1964065/chembl-x-0.npy 4421
chembl-GI50-CHEMBL1964066/chembl-x-0.npy 5309
chembl-GI50-CHEMBL1964072/chembl-x-0.npy 5503
chembl-GI50-CHEMBL1964074/chembl-x-0.npy 2025
chembl-GI50-CHEMBL1964075/chembl-x-0.npy 4893
chembl-GI50-CHEMBL1964077/chembl-x-0.npy 5423
chembl-GI50-CHEMBL1964085/chembl-x-0.npy 5306
chembl-GI50-CHEMBL1964086/chembl-x-0.npy 1659
chembl-GI50-CHEMBL1964087/chembl-x-0.npy 5325
chembl-GI50-CHEMBL1964088/chembl-x-0.npy 4984
chembl-GI50-CHEMBL1964091/chembl-x-0.npy 5150
chembl-GI50-CHEMBL1964092/chembl-x-0.npy 5080
chembl-GI50-CHEMBL1964099/chembl-x-0.npy 3056
chembl-GLUC-CHEMBL3885882/chembl-x-0.npy 1096
chembl-HCT-CHEMBL3885882/chembl-x-0.npy 1096
chembl-HGB-CHEMBL3885882/chembl-x-0.npy 1093
chembl-INHIBITION-CHEMBL4513217/chembl-x-0.npy 4807
chembl-INHIBITION-CHEMBL4513218/chembl-x-0.npy 4815
chembl-INHIBITION-CHEMBL4513219/chembl-x-0.npy 4815
chembl-INHIBITION-CHEMBL4513220/chembl-x-0.npy 4589
chembl-INHIBITION-CHEMBL4513221/chembl-x-0.npy 4815
chembl-Inhibition-CHEMBL3507681/chembl-x-0.npy 4574
chembl-Inhibition-CHEMBL3988443/chembl-x-0.npy 4574
chembl-Inhibition-CHEMBL4296187/chembl-x-0.npy 10334
chembl-Inhibition-CHEMBL4296188/chembl-x-0.npy 9777
chembl-Inhibition-CHEMBL4296802/chembl-x-0.npy 9464
chembl-Inhibition-CHEMBL4495582/chembl-x-0.npy 1433
chembl-Inhibition-CHEMBL4513082/chembl-x-0.npy 1433
chembl-LYMLE-CHEMBL3885882/chembl-x-0.npy 1096
chembl-MCH-CHEMBL3885882/chembl-x-0.npy 1093
chembl-MCHC-CHEMBL3885882/chembl-x-0.npy 1093
chembl-MCV-CHEMBL3885882/chembl-x-0.npy 1096
chembl-MONOLE-CHEMBL3885882/chembl-x-0.npy 1096
chembl-NEUTLE-CHEMBL3885882/chembl-x-0.npy 1096
chembl-PHOS-CHEMBL3885882/chembl-x-0.npy 1096
chembl-PLAT-CHEMBL3885882/chembl-x-0.npy 1096
chembl-POTASSIUM-CHEMBL3885882/chembl-x-0.npy 1096
chembl-PROT-CHEMBL3885882/chembl-x-0.npy 1096
chembl-Potency-CHEMBL1613836/chembl-x-0.npy 4118
chembl-Potency-CHEMBL1613838/chembl-x-0.npy 3004
chembl-Potency-CHEMBL1613842/chembl-x-0.npy 3090
chembl-Potency-CHEMBL1613910/chembl-x-0.npy 1107
chembl-Potency-CHEMBL1613914/chembl-x-0.npy 3668
chembl-Potency-CHEMBL1613918/chembl-x-0.npy 1278
chembl-Potency-CHEMBL1613970/chembl-x-0.npy 1009
chembl-Potency-CHEMBL1614038/chembl-x-0.npy 2568
chembl-Potency-CHEMBL1614076/chembl-x-0.npy 1239
chembl-Potency-CHEMBL1614079/chembl-x-0.npy 1402
chembl-Potency-CHEMBL1614087/chembl-x-0.npy 4682
chembl-Potency-CHEMBL1614146/chembl-x-0.npy 4118
chembl-Potency-CHEMBL1614161/chembl-x-0.npy 2131
chembl-Potency-CHEMBL1614166/chembl-x-0.npy 2027
chembl-Potency-CHEMBL1614174/chembl-x-0.npy 2482
chembl-Potency-CHEMBL1614211/chembl-x-0.npy 3493
chembl-Potency-CHEMBL1614227/chembl-x-0.npy 1200
chembl-Potency-CHEMBL1614236/chembl-x-0.npy 3023
chembl-Potency-CHEMBL1614249/chembl-x-0.npy 3282
chembl-Potency-CHEMBL1614250/chembl-x-0.npy 1111
chembl-Potency-CHEMBL1614257/chembl-x-0.npy 1739
chembl-Potency-CHEMBL1614275/chembl-x-0.npy 1887
chembl-Potency-CHEMBL1614280/chembl-x-0.npy 1240
chembl-Potency-CHEMBL1614281/chembl-x-0.npy 1155
chembl-Potency-CHEMBL1614342/chembl-x-0.npy 2517
chembl-Potency-CHEMBL1614361/chembl-x-0.npy 1531
chembl-Potency-CHEMBL1614364/chembl-x-0.npy 1195
chembl-Potency-CHEMBL1614410/chembl-x-0.npy 1093
chembl-Potency-CHEMBL1614421/chembl-x-0.npy 2639
chembl-Potency-CHEMBL1614441/chembl-x-0.npy 3371
chembl-Potency-CHEMBL1614458/chembl-x-0.npy 6304
chembl-Potency-CHEMBL1614459/chembl-x-0.npy 7096
chembl-Potency-CHEMBL1614530/chembl-x-0.npy 4849
chembl-Potency-CHEMBL1614544/chembl-x-0.npy 2218
chembl-Potency-CHEMBL1737902/chembl-x-0.npy 6480
chembl-Potency-CHEMBL1737991/chembl-x-0.npy 4048
chembl-Potency-CHEMBL1738132/chembl-x-0.npy 2610
chembl-Potency-CHEMBL1738184/chembl-x-0.npy 4994
chembl-Potency-CHEMBL1738312/chembl-x-0.npy 3919
chembl-Potency-CHEMBL1738317/chembl-x-0.npy 3331
chembl-Potency-CHEMBL1738442/chembl-x-0.npy 7331
chembl-Potency-CHEMBL1738588/chembl-x-0.npy 7848
chembl-Potency-CHEMBL1738606/chembl-x-0.npy 1208
chembl-Potency-CHEMBL1794308/chembl-x-0.npy 3654
chembl-Potency-CHEMBL1794311/chembl-x-0.npy 1799
chembl-Potency-CHEMBL1794345/chembl-x-0.npy 13702
chembl-Potency-CHEMBL1794352/chembl-x-0.npy 5877
chembl-Potency-CHEMBL1794359/chembl-x-0.npy 1537
chembl-Potency-CHEMBL1794375/chembl-x-0.npy 6607
chembl-Potency-CHEMBL1794401/chembl-x-0.npy 8322
chembl-Potency-CHEMBL1794424/chembl-x-0.npy 2448
chembl-Potency-CHEMBL1794440/chembl-x-0.npy 1002
chembl-Potency-CHEMBL1794461/chembl-x-0.npy 1596
chembl-Potency-CHEMBL1794483/chembl-x-0.npy 9530
chembl-Potency-CHEMBL1794499/chembl-x-0.npy 1290
chembl-Potency-CHEMBL1794553/chembl-x-0.npy 3546
chembl-Potency-CHEMBL1794580/chembl-x-0.npy 9915
chembl-Potency-CHEMBL1794584/chembl-x-0.npy 3014
chembl-Potency-CHEMBL1794585/chembl-x-0.npy 1200
chembl-Potency-CHEMBL2114713/chembl-x-0.npy 1250
chembl-Potency-CHEMBL2114738/chembl-x-0.npy 1580
chembl-Potency-CHEMBL2114775/chembl-x-0.npy 3350
chembl-Potency-CHEMBL2114780/chembl-x-0.npy 4186
chembl-Potency-CHEMBL2114784/chembl-x-0.npy 2479
chembl-Potency-CHEMBL2114788/chembl-x-0.npy 5101
chembl-Potency-CHEMBL2114807/chembl-x-0.npy 2307
chembl-Potency-CHEMBL2114810/chembl-x-0.npy 7269
chembl-Potency-CHEMBL2114836/chembl-x-0.npy 1881
chembl-Potency-CHEMBL2114843/chembl-x-0.npy 4050
chembl-Potency-CHEMBL2114861/chembl-x-0.npy 1798
chembl-Potency-CHEMBL2114908/chembl-x-0.npy 1139
chembl-Potency-CHEMBL2114913/chembl-x-0.npy 2068
chembl-Potency-CHEMBL2354211/chembl-x-0.npy 1594
chembl-Potency-CHEMBL2354221/chembl-x-0.npy 6549
chembl-Potency-CHEMBL2354254/chembl-x-0.npy 5683
chembl-Potency-CHEMBL2354287/chembl-x-0.npy 3305
chembl-Potency-CHEMBL2354311/chembl-x-0.npy 1538
chembl-Potency-CHEMBL3214953/chembl-x-0.npy 1102
chembl-Potency-CHEMBL3215017/chembl-x-0.npy 1197
chembl-Potency-CHEMBL3215106/chembl-x-0.npy 2289
chembl-Potency-CHEMBL3215181/chembl-x-0.npy 3089
chembl-Potency-CHEMBL3215278/chembl-x-0.npy 2233
chembl-Potency-CHEMBL3562077/chembl-x-0.npy 5146
chembl-RBC-CHEMBL3885882/chembl-x-0.npy 1096
chembl-SODIUM-CHEMBL3885882/chembl-x-0.npy 1096
chembl-WBC-CHEMBL3885882/chembl-x-0.npy 1095
chembl-WEIGHT-CHEMBL3885862/chembl-x-0.npy 3196
chembl-WEIGHT-CHEMBL3885863/chembl-x-0.npy 3360
"""
CHEMBL_FILES = ['chembl-AC50-CHEMBL1741322/chembl-x-0.npy',
'chembl-ALB-CHEMBL3885882/chembl-x-0.npy',
'chembl-ALP-CHEMBL3885882/chembl-x-0.npy',
'chembl-ALT-CHEMBL3885882/chembl-x-0.npy',
'chembl-AST-CHEMBL3885882/chembl-x-0.npy',
'chembl-BASOLE-CHEMBL3885882/chembl-x-0.npy',
'chembl-BILI-CHEMBL3885882/chembl-x-0.npy',
'chembl-BUN-CHEMBL3885882/chembl-x-0.npy',
'chembl-CHLORIDE-CHEMBL3885882/chembl-x-0.npy',
'chembl-CHOL-CHEMBL3885882/chembl-x-0.npy',
'chembl-CK-CHEMBL3885882/chembl-x-0.npy',
'chembl-CREAT-CHEMBL3885882/chembl-x-0.npy',
'chembl-EOSLE-CHEMBL3885882/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963844/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963848/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963854/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963860/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963866/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963868/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963874/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963876/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963880/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963882/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963885/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963887/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963889/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963895/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963900/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963901/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963903/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963911/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963921/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963922/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963929/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963935/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963945/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963953/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963954/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963960/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963961/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963963/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963976/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963981/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963985/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963989/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963990/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963991/chembl-x-0.npy',
'chembl-GI50-CHEMBL1963994/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964004/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964006/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964007/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964009/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964012/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964014/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964017/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964018/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964021/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964025/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964030/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964034/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964037/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964040/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964043/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964045/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964047/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964048/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964049/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964059/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964062/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964063/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964065/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964066/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964072/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964074/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964075/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964077/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964085/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964086/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964087/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964088/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964091/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964092/chembl-x-0.npy',
'chembl-GI50-CHEMBL1964099/chembl-x-0.npy',
'chembl-GLUC-CHEMBL3885882/chembl-x-0.npy',
'chembl-HCT-CHEMBL3885882/chembl-x-0.npy',
'chembl-HGB-CHEMBL3885882/chembl-x-0.npy',
'chembl-INHIBITION-CHEMBL4513217/chembl-x-0.npy',
'chembl-INHIBITION-CHEMBL4513218/chembl-x-0.npy',
'chembl-INHIBITION-CHEMBL4513219/chembl-x-0.npy',
'chembl-INHIBITION-CHEMBL4513220/chembl-x-0.npy',
'chembl-INHIBITION-CHEMBL4513221/chembl-x-0.npy',
'chembl-Inhibition-CHEMBL3507681/chembl-x-0.npy',
'chembl-Inhibition-CHEMBL3988443/chembl-x-0.npy',
'chembl-Inhibition-CHEMBL4296187/chembl-x-0.npy',
'chembl-Inhibition-CHEMBL4296188/chembl-x-0.npy',
'chembl-Inhibition-CHEMBL4296802/chembl-x-0.npy',
'chembl-Inhibition-CHEMBL4495582/chembl-x-0.npy',
'chembl-Inhibition-CHEMBL4513082/chembl-x-0.npy',
'chembl-LYMLE-CHEMBL3885882/chembl-x-0.npy',
'chembl-MCH-CHEMBL3885882/chembl-x-0.npy',
'chembl-MCHC-CHEMBL3885882/chembl-x-0.npy',
'chembl-MCV-CHEMBL3885882/chembl-x-0.npy',
'chembl-MONOLE-CHEMBL3885882/chembl-x-0.npy',
'chembl-NEUTLE-CHEMBL3885882/chembl-x-0.npy',
'chembl-PHOS-CHEMBL3885882/chembl-x-0.npy',
'chembl-PLAT-CHEMBL3885882/chembl-x-0.npy',
'chembl-POTASSIUM-CHEMBL3885882/chembl-x-0.npy',
'chembl-PROT-CHEMBL3885882/chembl-x-0.npy',
'chembl-Potency-CHEMBL1613836/chembl-x-0.npy',
'chembl-Potency-CHEMBL1613838/chembl-x-0.npy',
'chembl-Potency-CHEMBL1613842/chembl-x-0.npy',
'chembl-Potency-CHEMBL1613910/chembl-x-0.npy',
'chembl-Potency-CHEMBL1613914/chembl-x-0.npy',
'chembl-Potency-CHEMBL1613918/chembl-x-0.npy',
'chembl-Potency-CHEMBL1613970/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614038/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614076/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614079/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614087/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614146/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614161/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614166/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614174/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614211/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614227/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614236/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614249/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614250/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614257/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614275/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614280/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614281/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614342/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614361/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614364/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614410/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614421/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614441/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614458/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614459/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614530/chembl-x-0.npy',
'chembl-Potency-CHEMBL1614544/chembl-x-0.npy',
'chembl-Potency-CHEMBL1737902/chembl-x-0.npy',
'chembl-Potency-CHEMBL1737991/chembl-x-0.npy',
'chembl-Potency-CHEMBL1738132/chembl-x-0.npy',
'chembl-Potency-CHEMBL1738184/chembl-x-0.npy',
'chembl-Potency-CHEMBL1738312/chembl-x-0.npy',
'chembl-Potency-CHEMBL1738317/chembl-x-0.npy',
'chembl-Potency-CHEMBL1738442/chembl-x-0.npy',
'chembl-Potency-CHEMBL1738588/chembl-x-0.npy',
'chembl-Potency-CHEMBL1738606/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794308/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794311/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794345/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794352/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794359/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794375/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794401/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794424/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794440/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794461/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794483/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794499/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794553/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794580/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794584/chembl-x-0.npy',
'chembl-Potency-CHEMBL1794585/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114713/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114738/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114775/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114780/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114784/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114788/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114807/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114810/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114836/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114843/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114861/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114908/chembl-x-0.npy',
'chembl-Potency-CHEMBL2114913/chembl-x-0.npy',
'chembl-Potency-CHEMBL2354211/chembl-x-0.npy',
'chembl-Potency-CHEMBL2354221/chembl-x-0.npy',
'chembl-Potency-CHEMBL2354254/chembl-x-0.npy',
'chembl-Potency-CHEMBL2354287/chembl-x-0.npy',
'chembl-Potency-CHEMBL2354311/chembl-x-0.npy',
'chembl-Potency-CHEMBL3214953/chembl-x-0.npy',
'chembl-Potency-CHEMBL3215017/chembl-x-0.npy',
'chembl-Potency-CHEMBL3215106/chembl-x-0.npy',
'chembl-Potency-CHEMBL3215181/chembl-x-0.npy',
'chembl-Potency-CHEMBL3215278/chembl-x-0.npy',
'chembl-Potency-CHEMBL3562077/chembl-x-0.npy',
'chembl-RBC-CHEMBL3885882/chembl-x-0.npy',
'chembl-SODIUM-CHEMBL3885882/chembl-x-0.npy',
'chembl-WBC-CHEMBL3885882/chembl-x-0.npy',
'chembl-WEIGHT-CHEMBL3885862/chembl-x-0.npy',
'chembl-WEIGHT-CHEMBL3885863/chembl-x-0.npy']
class ChEMBLDataset(DiscreteDataset):
"""A molecule design dataset that defines a common set of functions
and attributes for a model-based optimization dataset, where the
goal is to find a design 'x' that maximizes a prediction 'y':
max_x { y = f(x) }
Public Attributes:
name: str
An attribute that specifies the name of a model-based optimization
dataset, which might be used when labelling plots in a diagram of
performance in a research paper using design-bench
x_name: str
An attribute that specifies the name of designs in a model-based
optimization dataset, which might be used when labelling plots
in a visualization of performance in a research paper
y_name: str
An attribute that specifies the name of predictions in a model-based
optimization dataset, which might be used when labelling plots
in a visualization of performance in a research paper
x: np.ndarray
the design values 'x' for a model-based optimization problem
represented as a numpy array of arbitrary type
input_shape: Tuple[int]
the shape of a single design values 'x', represented as a list of
integers similar to calling np.ndarray.shape
input_size: int
the total number of components in the design values 'x', represented
as a single integer, the product of its shape entries
input_dtype: np.dtype
the data type of the design values 'x', which is typically either
floating point or integer (np.float32 or np.int32)
y: np.ndarray
the prediction values 'y' for a model-based optimization problem
represented by a scalar floating point value per 'x'
output_shape: Tuple[int]
the shape of a single prediction value 'y', represented as a list of
integers similar to calling np.ndarray.shape
output_size: int
the total number of components in the prediction values 'y',
represented as a single integer, the product of its shape entries
output_dtype: np.dtype
the data type of the prediction values 'y', which is typically a
type of floating point (np.float32 or np.float16)
dataset_size: int
the total number of paired design values 'x' and prediction values
'y' in the dataset, represented as a single integer
dataset_distribution: Callable[np.ndarray, np.ndarray]
the target distribution of the model-based optimization dataset
marginal p(y) used for controlling the sampling distribution
dataset_max_percentile: float
the percentile between 0 and 100 of prediction values 'y' above
which are hidden from access by members outside the class
dataset_min_percentile: float
the percentile between 0 and 100 of prediction values 'y' below
which are hidden from access by members outside the class
dataset_max_output: float
the specific cutoff threshold for prediction values 'y' above
which are hidden from access by members outside the class
dataset_min_output: float
the specific cutoff threshold for prediction values 'y' below
which are hidden from access by members outside the class
internal_batch_size: int
the integer number of samples per batch that is used internally
when processing the dataset and generating samples
freeze_statistics: bool
a boolean indicator that when set to true prevents methods from
changing the normalization and sub sampling statistics
is_normalized_x: bool
a boolean indicator that specifies whether the design values
in the dataset are being normalized
x_mean: np.ndarray
a numpy array that is automatically calculated to be the mean
of visible design values in the dataset
x_standard_dev: np.ndarray
a numpy array that is automatically calculated to be the standard
deviation of visible design values in the dataset
is_normalized_y: bool
a boolean indicator that specifies whether the prediction values
in the dataset are being normalized
y_mean: np.ndarray
a numpy array that is automatically calculated to be the mean
of visible prediction values in the dataset
y_standard_dev: np.ndarray
a numpy array that is automatically calculated to be the standard
deviation of visible prediction values in the dataset
is_logits: bool (only supported for a DiscreteDataset)
a value that indicates whether the design values contained in the
model-based optimization dataset have already been converted to
logits and need not be converted again
Public Methods:
iterate_batches(batch_size: int, return_x: bool,
return_y: bool, drop_remainder: bool)
-> Iterable[Tuple[np.ndarray, np.ndarray]]:
Returns an object that supports iterations, which yields tuples of
design values 'x' and prediction values 'y' from a model-based
optimization data set for training a model
iterate_samples(return_x: bool, return_y: bool):
-> Iterable[Tuple[np.ndarray, np.ndarray]]:
Returns an object that supports iterations, which yields tuples of
design values 'x' and prediction values 'y' from a model-based
optimization data set for training a model
subsample(max_samples: int,
max_percentile: float,
min_percentile: float):
a function that exposes a subsampled version of a much larger
model-based optimization dataset containing design values 'x'
whose prediction values 'y' are skewed
relabel(relabel_function:
Callable[[np.ndarray, np.ndarray], np.ndarray]):
a function that accepts a function that maps from a dataset of
design values 'x' and prediction values y to a new set of
prediction values 'y' and relabels the model-based optimization dataset
clone(subset: set, shard_size: int,
to_disk: bool, disk_target: str, is_absolute: bool):
Generate a cloned copy of a model-based optimization dataset
using the provided name and shard generation settings; useful
when relabelling a dataset buffer from the dis
split(fraction: float, subset: set, shard_size: int,
to_disk: bool, disk_target: str, is_absolute: bool):
split a model-based optimization data set into a training set and
a validation set allocating 'fraction' of the data set to the
validation set and the rest to the training set
normalize_x(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point design values 'x'
as input and standardizes them so that they have zero
empirical mean and unit empirical variance
denormalize_x(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point design values 'x'
as input and undoes standardization so that they have their
original empirical mean and variance
normalize_y(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point prediction values 'y'
as input and standardizes them so that they have zero
empirical mean and unit empirical variance
denormalize_y(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point prediction values 'y'
as input and undoes standardization so that they have their
original empirical mean and variance
map_normalize_x():
a destructive function that standardizes the design values 'x'
in the class dataset in-place so that they have zero empirical
mean and unit variance
map_denormalize_x():
a destructive function that undoes standardization of the
design values 'x' in the class dataset in-place which are expected
to have zero empirical mean and unit variance
map_normalize_y():
a destructive function that standardizes the prediction values 'y'
in the class dataset in-place so that they have zero empirical
mean and unit variance
map_denormalize_y():
a destructive function that undoes standardization of the
prediction values 'y' in the class dataset in-place which are
expected to have zero empirical mean and unit variance
--- for discrete tasks only
to_logits(np.ndarray) > np.ndarray:
A helper function that accepts design values represented as a numpy
array of integers as input and converts them to floating point
logits of a certain probability distribution
to_integers(np.ndarray) > np.ndarray:
A helper function that accepts design values represented as a numpy
array of floating point logits as input and converts them to integer
representing the max of the distribution
map_to_logits():
a function that processes the dataset corresponding to this
model-based optimization problem, and converts integers to a
floating point representation as logits
map_to_integers():
a function that processes the dataset corresponding to this
model-based optimization problem, and converts a floating point
representation as logits to integers
"""
name = "chembl/chembl"
y_name = "standard_value"
x_name = "smiles"
@staticmethod
def register_x_shards(assay_chembl_id="CHEMBL1794345",
standard_type="Potency"): # max percentile 53 works well
"""Registers a remote file for download that contains design values
in a format compatible with the dataset builder class;
these files are downloaded all at once in the dataset initialization
Arguments:
assay_chembl_id: str
a string identifier that specifies which assay to use for
model-based optimization, where the goal is to find a design
value 'x' that maximizes a certain property
standard_type: str
a string identifier that specifies which property of the assay
is being measured for model-based optimization, where the goal is
to maximize that property
Returns:
resources: list of RemoteResource
a list of RemoteResource objects specific to this dataset, which
will be automatically downloaded while the dataset is built
and may serve as shards if the dataset is large
"""
return [DiskResource(
file, is_absolute=False,
download_target=f"{SERVER_URL}/{file}",
download_method="direct") for file in CHEMBL_FILES
if f"{standard_type}-{assay_chembl_id}" in file]
@staticmethod
def register_y_shards(assay_chembl_id="CHEMBL1794345",
standard_type="Potency"):
"""Registers a remote file for download that contains prediction
values in a format compatible with the dataset builder class;
these files are downloaded all at once in the dataset initialization
Arguments:
assay_chembl_id: str
a string identifier that specifies which assay to use for
model-based optimization, where the goal is to find a design
value 'x' that maximizes a certain property
standard_type: str
a string identifier that specifies which property of the assay
is being measured for model-based optimization, where the goal is
to maximize that property
Returns:
resources: list of RemoteResource
a list of RemoteResource objects specific to this dataset, which
will be automatically downloaded while the dataset is built
and may serve as shards if the dataset is large
"""
return [DiskResource(
file.replace("-x-", "-y-"), is_absolute=False,
download_target=f"{SERVER_URL}/{file.replace('-x-', '-y-')}",
download_method="direct") for file in CHEMBL_FILES
if f"{standard_type}-{assay_chembl_id}" in file]
def __init__(self, assay_chembl_id="CHEMBL1794345",
standard_type="Potency",
soft_interpolation=0.6, **kwargs):
"""Initialize a model-based optimization dataset and prepare
that dataset by loading that dataset from disk and modifying
its distribution
Arguments:
assay_chembl_id: str
a string identifier that specifies which assay to use for
model-based optimization, where the goal is to find a design
value 'x' that maximizes a certain property
standard_type: str
a string identifier that specifies which property of the assay
is being measured for model-based optimization, where the goal is
to maximize that property
soft_interpolation: float
a floating point hyper parameter used when converting design values
from integers to a floating point representation as logits, which
interpolates between a uniform and dirac distribution
1.0 = dirac, 0.0 -> uniform
**kwargs: dict
additional keyword arguments which are used to parameterize the
data set generation process, including which shard files are used
if multiple sets of data set shard files can be loaded
"""
# set the names the describe the dataset
self.name = f"chembl-{standard_type}-{assay_chembl_id}/chembl"
self.y_name = standard_type
# initialize the dataset using the method in the base class
super(ChEMBLDataset, self).__init__(
self.register_x_shards(assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
self.register_y_shards(assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
is_logits=False, num_classes=591,
soft_interpolation=soft_interpolation, **kwargs)
|
import openpnm as op
import numpy as np
class BoundaryTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[3, 3, 3])
self.net.add_boundary_pores()
Ps_int = self.net.pores(labels=['top_boundary', 'bottom_boundary'],
mode='not')
Ps_boun = self.net.pores(labels=['top_boundary', 'bottom_boundary'],
mode='union')
Pb_mask = np.random.random(len(Ps_boun)) < 0.5
Ts_int = self.net.throats(labels=['internal', 'surface'], mode='union')
TB_1 = self.net.find_neighbor_throats(pores=Ps_boun[Pb_mask])
TB_2 = self.net.find_neighbor_throats(pores=Ps_boun[~Pb_mask])
self.geo = op.geometry.StickAndBall(network=self.net,
pores=Ps_int,
throats=Ts_int)
self.boun1 = op.geometry.Boundary(network=self.net,
pores=Ps_boun[Pb_mask],
throats=TB_1)
self.boun2 = op.geometry.Boundary(network=self.net,
pores=Ps_boun[~Pb_mask],
throats=TB_2)
self.geo.regenerate_models()
self.boun1.regenerate_models()
self.boun2.regenerate_models()
def teardown_class(self):
mgr = op.Workspace()
mgr.clear()
def test_plot_histogram(self):
for obj in [self.boun1, self.boun2]:
obj.show_hist()
obj.show_hist(props=['pore.diameter', 'pore.volume',
'throat.length'])
obj.show_hist(props=['pore.diameter', 'pore.volume',
'throat.length', 'throat.diameter',
'pore.seed'])
def test_boundary_with_alg(self):
pn = op.network.Cubic(shape=[5, 5, 5], spacing=2.5e-5)
pn.add_boundary_pores()
Ps_int = pn.pores(labels=['*boundary'], mode='not')
Ps_boun = pn.pores(labels=['*boundary'])
Ts_int = pn.throats(labels=['*boundary'], mode='not')
Ts_boun = pn.throats(labels=['*boundary'])
geo = op.geometry.StickAndBall(network=pn,
pores=Ps_int, throats=Ts_int)
boun = op.geometry.Boundary(network=pn, pores=Ps_boun,
throats=Ts_boun)
geo.regenerate_models()
boun.regenerate_models()
air = op.phases.Air(network=pn)
odiff = op.models.physics.diffusive_conductance.ordinary_diffusion
phys_air_geo = op.physics.Standard(network=pn,
phase=air,
geometry=geo)
phys_air_geo.add_model(propname="throat.diffusive_conductance",
model=odiff)
phys_air_boun = op.physics.Standard(network=pn,
phase=air,
geometry=boun)
phys_air_boun.add_model(propname="throat.diffusive_conductance",
model=odiff)
phys_air_boun.regenerate_models()
phys_air_geo.regenerate_models()
health = phys_air_geo.check_data_health()
for check in health.values():
assert len(check) == 0
checks = phys_air_boun.check_data_health().values()
for check in checks:
assert len(check) == 0
FD = op.algorithms.FickianDiffusion(network=pn, phase=air)
FD.set_value_BC(pores=pn.pores('top_boundary'), values=1.0)
FD.set_value_BC(pores=pn.pores('bottom_boundary'), values=0.0)
FD.run()
assert FD.calc_effective_diffusivity() > 0
SF = op.algorithms.StokesFlow(network=pn, phase=air)
SF.set_value_BC(pores=pn.pores('top_boundary'), values=1.0)
SF.set_value_BC(pores=pn.pores('bottom_boundary'), values=0.0)
SF.run()
assert SF.calc_effective_permeability() > 0
if __name__ == '__main__':
t = BoundaryTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
|
from assembler import Assembler
from assembler import Form
from fem import DofHandler
from fem import QuadFE
from fem import Basis
from function import Nodal
from gmrf import Covariance
from gmrf import GaussianField
from mesh import Mesh1D
from plot import Plot
from solver import LinearSystem
# Built-in modules
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
"""
Implement Reduced order Model
-d/dx(q(x,w)d/dx u(x)) = f(x)
u(0) = 1
u(1) = 0
"""
mesh = Mesh1D(resolution=(100,))
mesh.mark_region('left', lambda x: np.abs(x)<1e-10)
mesh.mark_region('right', lambda x: np.abs(x-1)<1e-10)
element = QuadFE(mesh.dim(), 'Q1')
dofhandler = DofHandler(mesh, element)
dofhandler.distribute_dofs()
n = dofhandler.n_dofs()
# =============================================================================
# Random field
# =============================================================================
n_samples = 10
n_train, n_test = 8, 2
i_train = np.arange(n_train)
i_test = np.arange(n_train,n_samples)
cov = Covariance(dofhandler, name='gaussian', parameters={'l':0.1})
cov.compute_eig_decomp()
d,V = cov.get_eig_decomp()
plt.semilogy(d,'.')
plt.show()
log_q = GaussianField(n, K=cov)
log_q.update_support()
qfn = Nodal(dofhandler=dofhandler,
data=np.exp(log_q.sample(n_samples=n_samples)))
plot = Plot()
plot.line(qfn,i_sample=np.arange(n_samples))
# =============================================================================
# Generate Snapshot Set
# =============================================================================
phi = Basis(dofhandler, 'u')
phi_x = Basis(dofhandler, 'ux')
problems = [[Form(kernel=qfn, trial=phi_x, test=phi_x), Form(1, test=phi)],
[Form(1, test=phi, trial=phi)]]
assembler = Assembler(problems, mesh)
assembler.assemble()
A = assembler.af[0]['bilinear'].get_matrix()
b = assembler.af[0]['linear'].get_matrix()
linsys = LinearSystem(phi)
linsys.add_dirichlet_constraint('left',1)
linsys.add_dirichlet_constraint('right',0)
y_snap = Nodal(dofhandler=dofhandler,
data=np.empty((n,n_samples)))
y_data = np.empty((n,n_samples))
for i in range(n_samples):
linsys.set_matrix(A[i].copy())
linsys.set_rhs(b.copy())
linsys.solve_system()
y_data[:,[i]] = linsys.get_solution(as_function=False)
y_snap.set_data(y_data)
plot = Plot()
plot.line(y_snap, i_sample=np.arange(n_samples))
# =============================================================================
# Compute Reduced Order Model
# =============================================================================
M = assembler.af[1]['bilinear'].get_matrix()
y_train = y_data[:,i_train]
y_test = y_data[:,i_test]
U,S,Vt = la.svd(y_train)
x = dofhandler.get_dof_vertices()
m = 8
d = 7
Um = U[:,:m]
plt.plot(x,Um,'k')
# Test functions
i_left = dofhandler.get_region_dofs(entity_flag='left', entity_type='vertex')
B = Um[i_left,:].T
plt.plot(np.tile(x[i_left],B.shape),B,'r.')
plt.show()
Q,R = la.qr(B, mode='full')
psi = Um.dot(Q[:,1:])
plt.plot(x,psi)
plt.show()
rom_tol = 1e-10
rom_error = 1-np.cumsum(S)/np.sum(S)
n_rom = np.sum(rom_error>=rom_tol)
print(n_rom)
Ur = U[:,:n_rom]
Am = np.empty((m,m))
Am[:d,:] = Q[:,1:].T.dot(Um.T.dot(A[0].dot(Um)))
Am[-1,:] = B.ravel()
bm = np.zeros((m,1))
bm[:d,:] = Q[:,1:].T.dot(Um.T.dot(b.toarray()))
bm[-1,:] = 1
c = la.solve(Am,bm)
plt.plot(x,y_data[:,[0]],'k',x,Um.dot(c),'r')
plt.show()
print(Am.shape)
#plt.plot(x,Ur)
#plt.show()
# =============================================================================
# Predict output using ROM
# =============================================================================
u_rom = np.empty((n,n_train))
br = b.T.dot(Ur).T
for i in np.arange(n_train):
Ar = Ur.T.dot(A[i_train[i]].dot(Ur))
cr = la.solve(Ar, br)
u_rom[:,[i]] = Ur.dot(cr)
# =============================================================================
# Compare ROM output with direct numerical simulation
# =============================================================================
#plt.plot(x,u_rom,'k',x,y_data[:,i_train])
#plt.show()
du = np.empty((n,n_train))
for i in range(n_train):
du[:,i] = u_rom[:,i]-y_train[:,i]
#du[:,i] = Ur.dot(Ur.T.dot(u_test[:,i])) - u_test[:,i]
u_error = Nodal(dofhandler=dofhandler, data=du)
#u_error = np.dot(du.T, M.dot(du))
#plot.line(u_error, i_sample=np.arange(0,n_train))
|
<reponame>victoriarspada/woudc-data-registry<gh_stars>1-10
# =================================================================
#
# Terms and Conditions of Use
#
# Unless otherwise noted, computer program source code of this
# distribution # is covered under Crown Copyright, Government of
# Canada, and is distributed under the MIT License.
#
# The Canada wordmark and related graphics associated with this
# distribution are protected under trademark law and copyright law.
# No permission is granted to use them outside the parameters of
# the Government of Canada's corporate identity program. For
# more information, see
# http://www.tbs-sct.gc.ca/fip-pcim/index-eng.asp
#
# Copyright title to all 3rd party software distributed with this
# software is held by the respective copyright holders as noted in
# those files. Users are asked to read the 3rd Party Licenses
# referenced with those assets.
#
# Copyright (c) 2019 Government of Canada
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import statistics
from collections import OrderedDict
LOGGER = logging.getLogger(__name__)
DATASETS = ['Broad-band', 'Lidar', 'Multi-band', 'OzoneSonde', 'RocketSonde',
'Spectral', 'TotalOzone', 'TotalOzoneObs', 'UmkehrN14_1.0',
'UmkehrN14_2.0']
def get_validator(dataset, reporter):
"""
Returns a DatasetValidator instance tied to <dataset>.
If <dataset> is a valid data category but no special validator exists
for it, returns a base validator that automatically succeeds.
:param dataset: Name of a WOUDC data type.
:param reporter: `ReportWriter` instance for error handling and logging.
:returns: Validator class targetted to that data type.
"""
if dataset == 'TotalOzone':
return TotalOzoneValidator(reporter)
elif dataset == 'TotalOzoneObs':
return TotalOzoneObsValidator(reporter)
elif dataset == 'Spectral':
return SpectralValidator(reporter)
elif dataset == 'UmkehrN14':
return UmkehrValidator(reporter)
elif dataset == 'Lidar':
return LidarValidator(reporter)
elif dataset in DATASETS:
return DatasetValidator(reporter)
else:
raise ValueError('Invalid dataset {}'.format(dataset))
class DatasetValidator(object):
"""
Superclass for Extended CSV validators of dataset-specific tables.
Contains no checks of its own, so all files successfully validate.
Is directly useful (without subclassing) for datasets that have no
errors tied to their tables, and so never have dataset-specific errors.
"""
def __init__(self, reporter):
self.reports = reporter
self.errors = []
self.warnings = []
def _add_to_report(self, error_code, line=None, **kwargs):
"""
Submit a warning or error of code <error_code> to the report generator,
with was found at line <line> in the input file. Uses keyword arguments
to detail the warning/error message.
Returns False iff the error is serious enough to abort parsing.
"""
message, severe = self.reports.add_message(error_code, line, **kwargs)
if severe:
LOGGER.error(message)
self.errors.append(message)
else:
LOGGER.warning(message)
self.warnings.append(message)
return not severe
def check_all(self, extcsv):
"""
Assess any dataset-specific tables inside <extcsv> for errors.
Returns True iff no errors were encountered.
:param extcsv: A parsed Extended CSV file of the appropriate dataset.
:returns: `bool` of whether the file's dataset-specific tables
are error-free.
"""
return True
class TotalOzoneValidator(DatasetValidator):
"""
Dataset-specific validator for TotalOzone files.
"""
def __init__(self, reporter):
super(TotalOzoneValidator, self).__init__(reporter)
def check_all(self, extcsv):
"""
Assess any dataset-specific tables inside <extcsv> for errors.
Returns True iff no errors were encountered.
TotalOzone errors include improper formatting and ordering of
dates in the #DAILY, #MONTHLY, and both #TIMESTAMP tables, and
inconsistencies between #MONTHLY and the data it is derived from.
:param extcsv: A parsed Extended CSV file of TotalOzone data.
:returns: `bool` of whether the file's dataset-specific tables
are error-free.
"""
LOGGER.info('Beginning TotalOzone-specific checks')
time_series_ok = self.check_time_series(extcsv)
timestamps_ok = self.check_timestamps(extcsv)
monthly_ok = self.check_monthly(extcsv)
LOGGER.info('TotalOzone-specific checks complete')
return all([timestamps_ok, time_series_ok, monthly_ok])
def check_time_series(self, extcsv):
"""
Assess the ordering of Dates in the #DAILY table in <extcsv>.
Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of TotalOzone data.
:returns: `bool` of whether the ordering of #DAILY Dates is error-free.
"""
LOGGER.debug('Assessing order of #DAILY.Date column')
success = True
timestamp1_date = extcsv.extcsv['TIMESTAMP']['Date']
daily_startline = extcsv.line_num('DAILY') + 2
dates_encountered = {}
rows_to_remove = []
daily_columns = zip(*extcsv.extcsv['DAILY'].values())
is_string = False
in_order = True
prev_date = None
for index, row in enumerate(daily_columns):
line_num = daily_startline + index
daily_date = row[0]
if daily_date.year != timestamp1_date.year:
if not self._add_to_report(103, line_num):
success = False
if prev_date and daily_date < prev_date:
in_order = False
prev_date = daily_date
if daily_date not in dates_encountered:
dates_encountered[daily_date] = row
elif row == dates_encountered[daily_date]:
if not self._add_to_report(104, line_num, date=daily_date):
success = False
rows_to_remove.append(index)
elif not self._add_to_report(105, line_num, date=daily_date):
success = False
rows_to_remove.reverse()
dateList = extcsv.extcsv['DAILY']['Date']
for date in dateList:
if isinstance(date, (str, int)):
is_string = True
if not self._add_to_report(102, daily_startline):
success = False
break
if not is_string:
for index in rows_to_remove:
for column in extcsv.extcsv['DAILY'].values():
column.pop(index)
if not in_order:
if not self._add_to_report(102, daily_startline):
success = False
sorted_dates = sorted(extcsv.extcsv['DAILY']['Date'])
sorted_daily = [dates_encountered[date_]
for date_ in sorted_dates]
for field_num, field in \
enumerate(extcsv.extcsv['DAILY'].keys()):
column = list(map(lambda row: row[field_num],
sorted_daily))
extcsv.extcsv['DAILY'][field] = column
return success
def check_timestamps(self, extcsv):
"""
Assess the two required #TIMESTAMP tables in <extcsv> for errors
and inconsistencies. Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of TotalOzone data.
:returns: `bool` of whether the two #TIMESTAMP tables are error-free.
"""
LOGGER.debug('Assessing #TIMESTAMP tables for similarity')
success = True
timestamp1_date = extcsv.extcsv['TIMESTAMP']['Date']
timestamp1_time = extcsv.extcsv['TIMESTAMP'].get('Time', None)
daily_dates = extcsv.extcsv['DAILY']['Date']
timestamp1_startline = extcsv.line_num('TIMESTAMP')
timestamp1_valueline = timestamp1_startline + 2
if timestamp1_date != daily_dates[0]:
if not self._add_to_report(106, timestamp1_valueline):
success = False
extcsv.extcsv['TIMESTAMP']['Date'] = daily_dates[0]
timestamp_count = extcsv.table_count('TIMESTAMP')
if timestamp_count == 1:
if not self._add_to_report(109):
success = False
utcoffset = extcsv.extcsv['TIMESTAMP']['UTCOffset']
final_date = daily_dates[-1]
timestamp2 = OrderedDict([
('UTCOffset', utcoffset),
('Date', final_date),
('Time', timestamp1_time)
])
extcsv.extcsv['TIMESTAMP_2'] = timestamp2
timestamp2_date = extcsv.extcsv['TIMESTAMP_2']['Date']
timestamp2_time = extcsv.extcsv['TIMESTAMP_2']['Time']
timestamp2_startline = extcsv.line_num('TIMESTAMP_2')
timestamp2_valueline = None if timestamp2_startline is None \
else timestamp2_startline + 2
if timestamp2_date != daily_dates[-1]:
if not self._add_to_report(107, timestamp2_valueline):
success = False
extcsv.extcsv['TIMESTAMP_2']['Date'] = daily_dates[-1]
if timestamp2_time != timestamp1_time:
if not self._add_to_report(90, timestamp2_valueline):
success = False
if timestamp_count > 2:
timestamp3_startline = extcsv.line_num('TIMESTAMP_3')
if not self._add_to_report(108, timestamp3_startline):
success = False
for ind in range(3, timestamp_count + 1):
table_name = 'TIMESTAMP_' + str(ind)
extcsv.remove_table(table_name)
return success
def check_monthly(self, extcsv):
"""
Assess the correctness of the #MONTHLY table in <extcsv> in
comparison with #DAILY. Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of TotalOzone data.
:returns: `bool` of whether the #MONTHLY table is error-free.
"""
LOGGER.debug('Assessing correctness of #MONTHLY table')
success = True
try:
template_monthly = self.derive_monthly_from_daily(extcsv)
except Exception as err:
LOGGER.error(err)
return False
if 'MONTHLY' not in extcsv.extcsv:
if not self._add_to_report(110):
success = False
else:
present_monthly = extcsv.extcsv['MONTHLY']
monthly_startline = extcsv.line_num('MONTHLY')
monthly_valueline = monthly_startline + 2
for field, derived_val in template_monthly.items():
if field not in present_monthly:
if not self._add_to_report(111, monthly_valueline,
field=field):
success = False
elif present_monthly[field] != template_monthly[field]:
if not self._add_to_report(112, monthly_valueline,
field=field):
success = False
extcsv.extcsv['MONTHLY'] = template_monthly
return success
def derive_monthly_from_daily(self, extcsv):
"""
Attempts to make a #MONTHLY table from the data found in #DAILY,
and returns it as an OrderedDict if successful.
If an error is encountered it is reported to the processing logs
before an exception is raised.
:param extcsv: A parsed Extended CSV file of TotalOzone data.
:returns: An OrderedDict representing the derived #MONTHLY table.
"""
LOGGER.debug('Regenerating #MONTHLY table from data')
dates_column = extcsv.extcsv['DAILY']['Date']
ozone_column = extcsv.extcsv['DAILY'].get('ColumnO3', None)
daily_fieldline = extcsv.line_num('DAILY') + 1
daily_valueline = daily_fieldline + 1
if not ozone_column:
self._add_to_report(113, daily_fieldline)
msg = 'Cannot derive #MONTHLY table: #DAILY.ColumnO3 missing'
raise Exception(msg)
ozone_column = list(filter(bool, ozone_column))
if len(ozone_column) == 0:
self._add_to_report(101, daily_valueline)
msg = 'Cannot derive #MONTHLY table: no ozone data in #DAILY'
raise Exception(msg)
first_date = dates_column[0]
mean_ozone = round(statistics.mean(ozone_column), 1)
stddev_ozone = 0 if len(ozone_column) == 1 \
else round(statistics.stdev(ozone_column), 1)
ozone_npts = len(ozone_column)
monthly = OrderedDict([
('Date', first_date),
('ColumnO3', mean_ozone),
('StdDevO3', stddev_ozone),
('Npts', ozone_npts)
])
return monthly
class TotalOzoneObsValidator(DatasetValidator):
"""
Dataset-specific validator for TotalOzoneObs files.
"""
def __init__(self, reporter):
super(TotalOzoneObsValidator, self).__init__(reporter)
def check_all(self, extcsv):
"""
Assess any dataset-specific tables inside <extcsv> for errors.
Returns True iff no errors were encountered.
TotalOzoneObs errors include improper ordering of times in the
#OBSERVATIONS tables.
:param extcsv: A parsed Extended CSV file of TotalOzoneObs data.
:returns: `bool` of whether the file's dataset-specific tables
are error-free.
"""
LOGGER.info('Beginning TotalOzoneObs-specific checks')
time_series_ok = self.check_time_series(extcsv)
LOGGER.info('TotalOzoneObs-specific checks complete')
return time_series_ok
def check_time_series(self, extcsv):
"""
Assess the ordering of Times in the #OBSERVATIONS table in <extcsv>.
Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of TotalOzoneObs data.
:returns: `bool` of whether the ordering of #OBSERVATIONS.Times
is error-free.
"""
LOGGER.debug('Assessing order of #OBSERVATIONS.Time column')
success = True
observations = zip(*extcsv.extcsv['OBSERVATIONS'].values())
observations_valueline = extcsv.line_num('OBSERVATIONS') + 2
times_encountered = {}
rows_to_remove = []
in_order = True
prev_time = None
for index, row in enumerate(observations):
line_num = observations_valueline + index
time = row[0]
if isinstance(prev_time, (str, int, type(None))):
pass
elif isinstance(time, (str, int, type(None))):
success = False
return success
else:
if prev_time and time < prev_time:
in_order = False
prev_time = time
if time not in times_encountered:
times_encountered[time] = row
elif row == times_encountered[time]:
if not self._add_to_report(115, line_num, time=time):
success = False
rows_to_remove.append(index)
elif not self._add_to_report(116, line_num, time=time):
success = False
rows_to_remove.reverse()
for index in rows_to_remove:
for column in extcsv.extcsv['OBSERVATIONS'].values():
column.pop(index)
if not in_order:
if not self._add_to_report(114, observations_valueline):
success = False
sorted_times = sorted(extcsv.extcsv['OBSERVATIONS']['Time'])
sorted_rows = [times_encountered[time] for time in sorted_times]
for field_num, field in enumerate(extcsv.extcsv['OBSERVATIONS']):
column = list(map(lambda row: row[field_num], sorted_rows))
extcsv.extcsv['OBSERVATIONS'][field] = column
return success
class SpectralValidator(DatasetValidator):
"""
Dataset-specific validator for Spectral files.
"""
def __init__(self, reporter):
super(SpectralValidator, self).__init__(reporter)
def check_all(self, extcsv):
"""
Assess any dataset-specific tables inside <extcsv> for errors.
Returns True iff no errors were encountered.
Spectral errors include incorrect groupings of #TIMESTAMP, #GLOBAL,
and #GLOBAL_SUMMARY tables such that the counts of each are different.
:param extcsv: A parsed Extended CSV file of Spectral data.
:returns: `bool` of whether the file's dataset-specific tables
are error-free.
"""
LOGGER.info('Beginning Spectral-specific checks')
groupings_ok = self.check_groupings(extcsv)
LOGGER.info('Spectral-specific checks complete')
return groupings_ok
def check_groupings(self, extcsv):
"""
Assess the numbers of #TIMESTAMP, #GLOBAL, and #GLOBAL_SUMMARY tables
in the input file <extcsv>. Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of Spectral data.
:returns: `bool` of whether the file is free of table grouping errors.
"""
LOGGER.debug('Assessing #TIMESTAMP, #GLOBAL, #GLOBAL_SUMMARY'
' table counts')
success = True
summary_table = 'GLOBAL_SUMMARY_NSF' \
if 'GLOBAL_SUMMARY_NSF' in extcsv.extcsv \
else 'GLOBAL_SUMMARY'
timestamp_count = extcsv.table_count('TIMESTAMP')
global_count = extcsv.table_count('GLOBAL')
summary_count = extcsv.table_count(summary_table)
if not timestamp_count == global_count == summary_count:
if not self._add_to_report(126, summary_table=summary_table):
success = False
return success
class LidarValidator(DatasetValidator):
"""
Dataset-specific validator for Lidar files.
"""
def __init__(self, reporter):
super(LidarValidator, self).__init__(reporter)
def check_all(self, extcsv):
"""
Assess any dataset-specific tables inside <extcsv> for errors.
Returns True iff no errors were encountered.
Lidar errors include incorrect groupings of #OZONE_PROFILE and
and #OZONE_SUMMARY tables such that the counts of each are different.
:param extcsv: A parsed Extended CSV file of Lidar data.
:returns: `bool` of whether the file's dataset-specific tables
are error-free.
"""
LOGGER.info('Beginning Lidar-specific checks')
groupings_ok = self.check_groupings(extcsv)
LOGGER.info('Lidar-specific checks complete')
return groupings_ok
def check_groupings(self, extcsv):
"""
Assess the numbers of #OZONE_PROFILE and #OZONE_SUMMARY tables
in the input file <extcsv>. Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of Lidar data.
:returns: `bool` of whether the file is free of table grouping errors.
"""
LOGGER.debug('Assessing #OZONE_PROFILE, #GLOBAL_SUMMARY table counts')
success = True
profile_count = extcsv.table_count('OZONE_PROFILE')
summary_count = extcsv.table_count('OZONE_SUMMARY')
if profile_count != summary_count:
if not self._add_to_report(125):
success = False
return success
class UmkehrValidator(DatasetValidator):
"""
Dataset-specific validator for Umkehr files.
"""
def __init__(self, reporter):
super(UmkehrValidator, self).__init__(reporter)
def check_all(self, extcsv):
"""
Assess any dataset-specific tables inside <extcsv> for errors.
Returns True iff no errors were encountered.
Umkehr errors include inconsistencies between the two #TIMESTAMP
tables and improper ordering of dates within their data tables.
:param extcsv: A parsed Extended CSV file of Umkehr data.
:returns: `bool` of whether the file's dataset-specific tables
are error-free.
"""
LOGGER.info('Beginning Umkehr-specific checks')
time_series_ok = self.check_time_series(extcsv)
timestamps_ok = self.check_timestamps(extcsv)
LOGGER.info('Umkehr-specific checks complete')
return timestamps_ok and time_series_ok
def check_time_series(self, extcsv):
"""
Assess the ordering of dates in the data table (#N14_VALUES or
#C_PROFILE) in <extcsv>. Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of Umkehr data.
:returns: `bool` of whether the ordering of observation dates
is error-free.
"""
level = extcsv.extcsv['CONTENT']['Level']
data_table = 'N14_VALUES' if level == 1.0 else 'C_PROFILE'
LOGGER.debug('Assessing order of #{}.Date column'.format(data_table))
success = True
data_table_valueline = extcsv.line_num(data_table) + 2
dates_encountered = {}
rows_to_remove = []
columns = zip(*extcsv.extcsv[data_table].values())
in_order = True
prev_date = None
for index, row in enumerate(columns):
line_num = data_table_valueline + index
observation_date = row[0]
if prev_date and observation_date < prev_date:
in_order = False
prev_date = observation_date
if observation_date not in dates_encountered:
dates_encountered[observation_date] = row
elif row == dates_encountered[observation_date]:
if not self._add_to_report(119, line_num, table=data_table,
date=observation_date):
success = False
rows_to_remove.append(index)
elif not self._add_to_report(120, line_num, table=data_table,
date=observation_date):
success = False
rows_to_remove.reverse()
for index in rows_to_remove:
for column in extcsv.extcsv[data_table].values():
column.pop(index)
if not in_order:
if not self._add_to_report(118, data_table_valueline,
table=data_table):
success = False
sorted_dates = sorted(extcsv.extcsv[data_table]['Date'])
sorted_rows = [dates_encountered[date_] for date_ in sorted_dates]
for fieldnum, field in enumerate(extcsv.extcsv[data_table].keys()):
column = list(map(lambda row: row[fieldnum], sorted_rows))
extcsv.extcsv[data_table][field] = column
return success
def check_timestamps(self, extcsv):
"""
Assess the two required #TIMESTAMP tables in <extcsv> for errors
and inconsistencies. Returns True iff no errors were found.
:param extcsv: A parsed Extended CSV file of Umkehr data.
:returns: `bool` of whether the two #TIMESTAMP tables are error-free.
"""
LOGGER.debug('Assessing #TIMESTAMP tables for similarity')
success = True
level = extcsv.extcsv['CONTENT']['Level']
data_table = 'N14_VALUES' if level == 1.0 else 'C_PROFILE'
timestamp1_date = extcsv.extcsv['TIMESTAMP']['Date']
timestamp1_time = extcsv.extcsv['TIMESTAMP'].get('Time', None)
observation_dates = extcsv.extcsv[data_table]['Date']
timestamp1_startline = extcsv.line_num('TIMESTAMP')
timestamp1_valueline = timestamp1_startline + 2
if timestamp1_date != observation_dates[0]:
if not self._add_to_report(121, timestamp1_valueline,
table=data_table):
success = False
extcsv.extcsv['TIMESTAMP']['Date'] = observation_dates[0]
timestamp_count = extcsv.table_count('TIMESTAMP')
if timestamp_count == 1:
if not self._add_to_report(123, table=data_table):
success = False
utcoffset = extcsv.extcsv['TIMESTAMP']['UTCOffset']
final_date = observation_dates[-1]
timestamp2 = OrderedDict([
('UTCOffset', utcoffset),
('Date', final_date),
('Time', timestamp1_time)
])
extcsv.extcsv['TIMESTAMP_2'] = timestamp2
timestamp2_date = extcsv.extcsv['TIMESTAMP_2']['Date']
timestamp2_time = extcsv.extcsv['TIMESTAMP_2']['Time']
timestamp2_startline = extcsv.line_num('TIMESTAMP_2')
timestamp2_valueline = None if timestamp2_startline is None \
else timestamp2_startline + 2
if timestamp2_date != observation_dates[-1]:
if not self._add_to_report(122, timestamp2_valueline,
table=data_table):
success = False
extcsv.extcsv['TIMESTAMP_2']['Date'] = observation_dates[-1]
if timestamp2_time != timestamp1_time:
if not self._add_to_report(90, timestamp2_valueline):
success = False
if timestamp_count > 2:
timestamp3_startline = extcsv.line_num('TIMESTAMP_3')
if not self._add_to_report(108, timestamp3_startline):
success = False
for ind in range(3, timestamp_count + 1):
table_name = 'TIMESTAMP_' + str(ind)
extcsv.remove_table(table_name)
return success
|
from __future__ import print_function, division, absolute_import
from copy import copy
import numpy as np
import scipy.optimize
import regreg.api as rr
from regreg.tests.decorators import set_seed_for_test
@set_seed_for_test()
def test_l1prox():
'''
this test verifies that the l1 prox in lagrange form can be solved
by a primal/dual specification
obviously, we don't to solve the l1 prox this way,
but it verifies that specification is working correctly
'''
l1 = rr.l1norm(4, lagrange=0.3)
ww = np.random.standard_normal(4)*3
ab = l1.proximal(rr.identity_quadratic(0.5, ww, 0,0))
l1c = copy(l1)
l1c.quadratic = rr.identity_quadratic(0.5, ww, None, 0.)
a = rr.simple_problem.nonsmooth(l1c)
solver = rr.FISTA(a)
solver.fit(tol=1.e-10, min_its=100)
ad = a.coefs
l1c = copy(l1)
l1c.quadratic = rr.identity_quadratic(0.5, ww, None, 0.)
a = rr.dual_problem.fromprimal(l1c)
# to ensure code is tested
a.latexify()
a.quadratic
a.solve(return_optimum=True)
solver = rr.FISTA(a)
solver.fit(tol=1.0e-14, min_its=100)
ac = a.primal
np.testing.assert_allclose(ac + 0.1, ab + 0.1, rtol=1.e-4)
np.testing.assert_allclose(ac + 0.1, ad + 0.1, rtol=1.e-4)
@set_seed_for_test()
def test_l1prox_bound():
'''
this test verifies that the l1 prox in bound form can be solved
by a primal/dual specification
obviously, we don't to solve the l1 prox this way,
but it verifies that specification is working correctly
'''
l1 = rr.l1norm(4, bound=2.)
ww = np.random.standard_normal(4)*2
ab = l1.proximal(rr.identity_quadratic(0.5, ww, 0, 0))
l1c = copy(l1)
l1c.quadratic = rr.identity_quadratic(0.5, ww, None, 0.)
a = rr.simple_problem.nonsmooth(l1c)
solver = rr.FISTA(a)
solver.fit(min_its=100)
l1c = copy(l1)
l1c.quadratic = rr.identity_quadratic(0.5, ww, None, 0.)
a = rr.dual_problem.fromprimal(l1c)
solver = rr.FISTA(a)
solver.fit(min_its=100)
ac = a.primal
np.testing.assert_allclose(ac + 0.1, ab + 0.1, rtol=1.e-4)
@set_seed_for_test()
def test_lasso():
'''
this test verifies that the l1 prox can be solved
by a primal/dual specification
obviously, we don't to solve the l1 prox this way,
but it verifies that specification is working correctly
'''
l1 = rr.l1norm(4, lagrange=2.)
l1.quadratic = rr.identity_quadratic(0.5, 0, None, 0.)
X = np.random.standard_normal((10,4))
Y = np.random.standard_normal(10) + 3
loss = rr.quadratic.affine(X, -Y, coef=0.5)
p2 = rr.separable_problem.singleton(l1, loss)
solver2 = rr.FISTA(p2)
solver2.fit(tol=1.0e-14, min_its=100)
f = p2.objective
ans = scipy.optimize.fmin_powell(f, np.zeros(4), ftol=1.0e-12, xtol=1.e-10)
print(f(solver2.composite.coefs), f(ans))
np.testing.assert_allclose(ans + 0.1, solver2.composite.coefs + 0.1, rtol=1.e-3)
|
<reponame>justincredble/Circulation
# -*- coding:utf-8 -*-
from app import db
from app.models import User, Library, Log, Permission
from flask import render_template, url_for, flash, redirect, request, abort
from flask.ext.login import login_required, current_user
from . import library
from .forms import SearchForm, EditLibraryForm, AddLibraryForm
from ..decorators import admin_required, permission_required
@library.route('/')
@login_required
def index():
search_word = request.args.get('search', None)
search_form = SearchForm()
page = request.args.get('page', 1, type=int)
the_libraries = Library.query.order_by(Library.id.desc())
pagination = the_libraries.paginate(page, per_page=8)
result_libraries = pagination.items
return render_template("library.html", libraries=result_libraries, pagination=pagination, search_form=search_form,
title=u"List of Libraries")
@library.route('/<int:library_id>/')
def detail(library_id):
the_library = Library.query.get_or_404(library_id)
show = request.args.get('show', 0, type=int)
if show != 0:
show = 1
page = request.args.get('page', 1, type=int)
pagination = the_library.logs.filter_by(returned=show) \
.order_by(Log.borrow_timestamp.desc()).paginate(page, per_page=5)
logs = pagination.items
return render_template("library_detail.html", library=the_library, logs=logs, pagination=pagination,
title=u"Library Name: " + the_library.name)
@library.route('/<int:library_id>/edit/', methods=['GET', 'POST'])
def edit(library_id):
library = Library.query.get_or_404(library_id)
form = EditLibraryForm()
if form.validate_on_submit():
library.name = form.name.data
library.address = form.address.data
library.public = form.public.data
db.session.add(library)
db.session.commit()
flash(u'Library information saved.', 'success')
return redirect(url_for('library.detail', library_id=library_id))
form.name.data = library.name
form.address.data = library.address
form.public.data = library.public
return render_template("library_edit.html", form=form, library=library, title=u"Edit Library Information")
@library.route('/add/', methods=['GET', 'POST'])
def add():
form = AddLibraryForm()
if form.validate_on_submit():
new_library = Library(
name=form.name.data,
address=form.address.data,
public=form.public.data,
user_id=current_user.id)
db.session.add(new_library)
db.session.commit()
flash(u'%s sucessfully added' % new_library.name, 'success')
return redirect(url_for('library.detail', library_id=new_library.id))
return render_template("library_edit.html", form=form, title=u"Add New Library")
@library.route('/<int:library_id>/delete/')
@permission_required(Permission.DELETE_LIBRARY)
def delete(library_id):
the_library = Library.query.get_or_404(library_id)
the_library.hidden = 1
db.session.add(the_library)
db.session.commit()
flash(u'Library record.', 'info')
return redirect(request.args.get('next') or url_for('library.detail', library_id=library_id))
@library.route('/<int:library_id>/put_back/')
@admin_required
def put_back(library_id):
the_library = Library.query.get_or_404(library_id)
the_library.hidden = 0
db.session.add(the_library)
db.session.commit()
flash(u'Library recovered', 'info')
return redirect(request.args.get('next') or url_for('library.detail', library_id=library_id))
|
<filename>objects.py
import random
import skills
class Object:
''' color = 1...6 '''
def __init__(self, y, x, c, maxyx):
self.name = c
self.maxyx = maxyx # y,x
self.x = x
self.y = y
self.direction = [0, 0]
self.blocking = False
self.char = c
self.color = 0
self.alive = True
self.action = ""
self.fight = skills.Skill(3)
self.max_hp = 10
self.hp = 10
self.sens = random.randint(5, 12)
self.turns = 0
self.reaction = {-1: None,
0: None,
1: None}
self.act = ["fight", "flee", "ignore", "like"]
for key in self.reaction:
r = random.randint(0, 10)
if r < 5:
self.reaction[key] = self.act[0] # fight
elif r >= 5 and r < 7:
self.reaction[key] = self.act[1] # flee
elif r >= 7 and r < 9:
self.reaction[key] = self.act[2] # ignore
elif r >= 9:
self.reaction[key] = self.act[3] # like
self.state = self.reaction[0]
def position(self):
''' return position [y, x] '''
return [self.y, self.x]
class Character(Object):
''' character class '''
def move(self, level):
''' moves the instance and checks for blocking (bool)
returns old_y and old_x '''
self.turns += 1
if self.turns % 5 == 0:
self.hp = min(self.max_hp, self.hp + 0.1)
self.old_x = self.x
self.old_y = self.y
newx = self.x
newy = self.y
newx = min(self.maxyx[1], newx + self.direction[1])
newx = max(0, newx)
newy = min(self.maxyx[0], newy + self.direction[0])
newy = max(0, newy)
foo = level.blocking(newy, newx)
if foo != None and foo != self:
self.x = self.old_x
self.y = self.old_y
a, b = self.fight.roll(foo.fight.limit)
foo.state = foo.reaction[-1]
if a:
foo.hp -= b
self.action = "You hit " + foo.name
if foo.hp < 1:
foo.die(level)
foo.alive = False
else:
self.action = "missed..."
else:
self.x = newx
self.y = newy
self.action = ""
def talk(self, level):
self.action = "@ talks with calming voice"
for y in range(-3, 3, 1):
for x in range(-3, 3, 1):
newx = self.x
newy = self.y
newx = min(self.maxyx[1], newx + x)
newx = max(0, newx)
newy = min(self.maxyx[0], newy + y)
newy = max(0, newy)
foo = level.blocking(newy, newx)
if foo != None and foo != self:
foo.state = foo.reaction[1]
def die(self, level):
q = level.wheres_waldo()
p = q[0] + q[1]
m = self.x + self.y
if abs(p - m) < 2 and self.state == "like":
a = SausageMonster(self.y, self.x, "*", self.maxyx)
a.hp = 1
a.name = "Sausage monster"
a.action = "Alien rises from the death!"
a.color = 5
level.characters.append(a)
class Monster(Character):
''' NPC-class '''
def follow(self, level):
player = level.wheres_waldo()
p1 = level.characters[0]
foo = level.hostiles(self.y, self.x)
if foo != None and abs(foo.y - self.y) < self.sens and abs(foo.x - self.x) < self.sens:
if foo.y < self.y:
self.direction[0] = -1
if foo.y > self.y:
self.direction[0] = 1
if foo.x < self.x:
self.direction[1] = -1
if foo.x > self.x:
self.direction[1] = 1
elif abs(player[0] - self.y) < self.sens and abs(player[1] - self.x) < self.sens:
if player[0] < self.y:
self.direction[0] = -1
if player[0] > self.y:
self.direction[0] = 1
if player[1] < self.x:
self.direction[1] = -1
if player[1] > self.x:
self.direction[1] = 1
if abs(player[0] - self.y) < 3 and abs(player[1] - self.x) < 3:
if p1.hp < p1.max_hp:
p1.hp = min(p1.max_hp, p1.hp + 1)
self.hp -= 2
if self.hp < 1:
self.die(level)
self.alive = False
self.action = self.name + " gives HP for you!"
def attack(self, level):
player = level.wheres_waldo()
if abs(player[0] - self.y) < self.sens and abs(player[1] - self.x) < self.sens:
if player[0] < self.y:
self.direction[0] = -1
if player[0] > self.y:
self.direction[0] = 1
if player[1] < self.x:
self.direction[1] = -1
if player[1] > self.x:
self.direction[1] = 1
def flee(self, level):
player = level.wheres_waldo()
if abs(player[0] - self.y) < self.sens and abs(player[1] - self.x) < self.sens:
if player[0] < self.y:
self.direction[0] = 1
if player[0] > self.y:
self.direction[0] = -1
if player[1] < self.x:
self.direction[1] = 1
if player[1] > self.x:
self.direction[1] = -1
else:
self.state = self.reaction[0]
self.direction = [0, 0]
def move(self, level):
''' moves the instance and checks for blocking (bool)
with added AI, returns old_y and old_x '''
if self.state == "fight":
self.attack(level)
self.color = 1
if self.state == "like":
self.follow(level)
self.color = 2
if self.state == "flee":
self.flee(level)
self.color = 3
if self.state == "ignore":
self.direction[0] = random.randint(-1, 1)
self.direction[1] = random.randint(-1, 1)
self.color = 4
self.old_x = self.x
self.old_y = self.y
newx = self.x
newy = self.y
newx = min(self.maxyx[1], newx + self.direction[1])
newx = max(0, newx)
newy = min(self.maxyx[0], newy + self.direction[0])
newy = max(0, newy)
foo = level.blocking(newy, newx)
if foo != None and foo != self:
self.x = self.old_x
self.y = self.old_y
if self.state == "fight" and foo.name != "tile" and foo.char != "*":
if self.fight.roll(foo.fight.limit):
foo.state = foo.reaction[-1]
foo.hp -= 2
self.action = self.name + " hits " + foo.name
if foo.hp < 1:
foo.die(level)
foo.alive = False
self.action = "killed"
else:
self.action = self.name + " missed..."
if self.state == "like" and foo.char != "@":
if self.fight.roll(foo.fight.limit):
foo.hp -= 2
if foo.hp < 1:
foo.die(level)
foo.alive = False
self.action = "killed"
else:
self.action = "missed..."
if foo.name == "tile":
self.hp -= 1
self.action = self.name + " smashes to the wall"
else:
self.x = newx
self.y = newy
class SausageMonster(Monster):
''' from a movie I saw... '''
def die(self, level):
a = SausageMonster(self.y, self.x, "*", self.maxyx)
a.hp = 1
a.name = "Sausage monster"
a.action = "The sausage moster split into two new monsters"
a.color = 5
b = SausageMonster(self.y + 1, self.x + 1, "*", self.maxyx)
b.hp = 1
b.name = "Sausage monster"
b.color = 5
level.characters.append(a)
level.characters.append(b)
def move(self, level):
self.direction[0] = random.randint(-1, 1)
self.direction[1] = random.randint(-1, 1)
self.old_x = self.x
self.old_y = self.y
newx = self.x
newy = self.y
newx = min(self.maxyx[1], newx + self.direction[1])
newx = max(0, newx)
newy = min(self.maxyx[0], newy + self.direction[0])
newy = max(0, newy)
foo = level.blocking(newy, newx)
if foo != None and foo != self:
self.x = self.old_x
self.y = self.old_y
else:
self.x = newx
self.y = newy
class Tile(Object):
''' tile class '''
def __init__(self, y, x, type_of, maxyx):
self.name = "tile"
self.maxyx = maxyx # y,x
self.x = x
self.y = y
self.blocking = True
self.color = 0
self.alive = False
self.fight = skills.Skill(100)
self.hp = 100
self.reaction = {-1: "ignore",
0: "ignore",
1: "ignore"}
if type_of == 0:
self.char = "|"
self.blocking = True
if type_of == 1:
self.char = "Y"
self.blocking = True
if type_of == 2:
self.char = "~"
self.blocking = False
else:
self.char = "#"
self.blocking = False
class Item(Object):
''' item class '''
def __init__(self, y, x, c, color, maxyx):
self.maxyx = maxyx # y,x
self.alive = False
self.x = x
self.y = y
self.direction = [0, 0]
self.blocking = False
self.char = c
self.color = color
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup the data drive with raid, RAM, or mount network drives."""
from __future__ import print_function
import perfzero.utils as utils
import logging
def get_nvme_devices():
"""Returns list paths to nvme devices."""
devices = []
cmd = 'sudo lsblk'
retcode, log = utils.run_command(cmd)
if retcode:
raise Exception('"{}" failed with code:{} and log:\n{}'.format(
cmd, retcode, log))
lines = log.splitlines()
if lines:
for line in lines:
if line.startswith('nvme'):
parts = line.split()
devices.append('/dev/' + parts[0].strip())
return devices
def create_drive_from_devices(data_dir, devices):
"""Creates a drive at data_dir based on number of devices passed."""
cmd = 'sudo mountpoint -q {}'.format(data_dir)
retcode, _ = utils.run_command(cmd)
if retcode:
if len(devices) > 1:
create_drive_raid(data_dir, devices)
else:
create_single_drive(data_dir, devices[0])
else:
logging.info(
'Skipping drive creation since path {} already exists'.format(data_dir))
def create_single_drive(data_dir, device):
"""Creates a data drive out of a single device."""
cmds = []
cmds.append('sudo mkfs.ext4 -F {}'.format(device))
cmds.append('sudo mkdir -p {}'.format(data_dir))
cmds.append('sudo mount {} {}'.format(device, data_dir))
cmds.append('sudo chmod a+w {}'.format(data_dir))
utils.run_commands(cmds)
logging.info('Created and mounted device {} at {}'.format(device, data_dir))
def create_drive_raid(data_dir, list_of_devices):
"""Creates a raid zero array of nvme drives."""
cmds = []
# Passing 'yes' because GCE nvme drive are sometimes in an odd state and
# think they are in another raid. mdadm does not have -y option.
# Or the kokoro images were left dirty? and that is where the info
# comes from.
cmds.append('yes | sudo mdadm --create /dev/md0 --level=0 '
'--raid-devices={} {}'.format(
len(list_of_devices), ' '.join(list_of_devices)))
cmds.append('sudo mkfs.ext4 -F /dev/md0')
cmds.append('sudo mkdir -p {}'.format(data_dir))
cmds.append('sudo mount /dev/md0 {}'.format(data_dir))
cmds.append('sudo chmod a+w {}'.format(data_dir))
utils.run_commands(cmds)
logging.info('Created and mounted RAID array at {}'.format(data_dir))
def create_ram_disk(data_dir, disk_size):
"""Create a RAM disk."""
cmd = 'sudo mountpoint -q {}'.format(data_dir)
retcode, _ = utils.run_command(cmd)
if retcode:
cmds = []
cmds.append('sudo mkdir -p {}'.format(data_dir))
cmds.append('sudo mount -t tmpfs -o size={}m tmpfs {}'.format(
disk_size, data_dir))
utils.run_commands(cmds)
logging.info('Created RAM disk at {}'.format(data_dir))
else:
logging.debug(
'RAM disk or something else is mounted at {}'.format(data_dir))
|
<gh_stars>1-10
try:
import tkinter as tk
from tkinter.filedialog import askopenfilename
class Window(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.title("scicast")
self.path = tk.StringVar()
self.cell_path = tk.StringVar()
self.gene_path = tk.StringVar()
self.gene_label_path = tk.StringVar()
self.exclude_gene_path = tk.StringVar()
self.asset = tk.StringVar()
self.gene_number = tk.IntVar(value=200)
self.depth_number = tk.IntVar(value=20)
self.kmeans_cluster_range = tk.StringVar(value='2,4')
self.color_cells = tk.StringVar()
self.color_genes = tk.StringVar()
self.test_clust_stability = tk.IntVar(value=0)
self.genes_corr = tk.StringVar()
self.annotate_gene_subset = tk.StringVar()
#type or choose gene matrix file
dir_label = tk.Label(self, text="Browse or type path to gene cell matrix file:")
path_entry = tk.Entry(self, textvariable=self.path, width=40)
browse_button = tk.Button(self, text="Browse for gene cell matrix file", command=self.browse)
#type or choose cell group file
cell_label = tk.Label(self, text="Browse or type path to cell group file:")
cell_path_entry = tk.Entry(self, textvariable=self.cell_path, width=40)
cell_browse_button = tk.Button(self, text="Browse for cell group file", command=self.browse_cellp)
#type or choose gene group file
gene_label = tk.Label(self, text="Browse or type path to gene group file:")
gene_path_entry = tk.Entry(self, textvariable=self.gene_path, width=40)
gene_browse_button = tk.Button(self, text="Browse for gene group file", command=self.browse_genep)
#type or choose file of genes to exclude from all analysis
exclude_gene_label = tk.Label(self, text="Browse or type path to file with genes to exclude from analysis (i.e cell cycle):")
exclude_gene_path_entry = tk.Entry(self, textvariable=self.exclude_gene_path, width=40)
exclude_gene_browse_button = tk.Button(self, text="Browse for exclude genes file", command=self.browse_excludeg)
#type or choose file of genes which will be labeled in gene PCA plot
annotate_gene_subset_label = tk.Label(self, text="Annotate only these genes in gene PCA:")
annotate_gene_subset_path_entry = tk.Entry(self, textvariable=self.annotate_gene_subset, width=40)
annotate_gene_subset_browse_button = tk.Button(self, text="Browse gene annotation file", command=self.browse_annotateg)
#define file extensions
self.file_opt = options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt'),('csv files', '.csv'), ('cufflinks counts', '.count_table'), ('cufflinks fpkm', 'fpkm_table'), ('gene matrix', '.matrix')]
#setup metric menu options
self.metric_menu_var = tk.StringVar()
self.metric_menu_var.set("seuclidean")
metric_menu_label = tk.Label(self, text="Choose Metric:")
metric_option_menu = tk.OptionMenu(self, self.metric_menu_var, 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule')
#setup method option menu
self.method_menu_var = tk.StringVar()
self.method_menu_var.set("ward")
method_menu_label = tk.Label(self, text="Choose Method:")
method_option_menu = tk.OptionMenu(self, self.method_menu_var, 'single', 'complete', 'average', 'weighted', 'centroid', 'median')
#setup qgraph option menu
self.qgraph_menu_var = tk.StringVar()
self.qgraph_menu_var.set("none")
qgraph_menu_label = tk.Label(self, text="Choose which qgraph networks to generate:")
qgraph_option_menu = tk.OptionMenu(self, self.qgraph_menu_var, 'gene','cell','both')
#setup image format selection menu
self.image_format_menu_var = tk.StringVar()
self.image_format_menu_var.set("pdf")
image_format_menu_label = tk.Label(self, text="Select image format for output files:")
image_format_option_menu = tk.OptionMenu(self, self.image_format_menu_var, 'tif', 'png', 'jpeg')
#setup z-direction option menu
self.zdir_menu_var = tk.IntVar()
self.zdir_menu_var.set(0)
zdir_menu_label = tk.Label(self, text="Choose z:")
zdir_option_menu = tk.OptionMenu(self, self.zdir_menu_var, 1,'None')
self.flags = ["Don't Run Heatmaps","Don't Run Correlation", "Verbose", "Test Significance by Groups (User Defined)", "Test Significance by Unbiased Clusters", "Exclude Cells Not in User Cell Groups", "Add Ellipse", "Add Cell Names to PCA", "Display Only Unique Signifcant Genes", "Run Significance Test for kmeans clusters", "Input Matrix is already log2", "use t-SNE (for kmeans clustering)"]
self.variables = []
asset_label = tk.Label(self, text="Output File Name:")
asset_entry = tk.Entry(self, textvariable=self.asset, width=40)
gene_number_label = tk.Label(self, text="Number of genes to include")
gene_number_entry = tk.Entry(self, textvariable=self.gene_number, width=10)
kmeans_range_label = tk.Label(self, text="Range of cluster for kmeans (inclusive):")
kmeans_range_entry = tk.Entry(self, textvariable=self.kmeans_cluster_range, width=10)
depth_number_label = tk.Label(self, text="Depth at which subclustering will stop")
depth_number_entry = tk.Entry(self, textvariable=self.depth_number, width=10)
color_cells_label = tk.Label(self, text="Provide specific colors and markers for each cell group.")
color_cells_entry = tk.Entry(self, textvariable=self.color_cells, width=20)
color_genes_label = tk.Label(self, text="Provide specific colors and markers for each gene group.")
color_genes_entry = tk.Entry(self, textvariable=self.color_cells, width=20)
test_clust_stability_label = tk.Label(self, text="Number of iterations to test cluster stability with varying gene numbers.")
test_clust_stability_entry = tk.Entry(self, textvariable=self.test_clust_stability, width=10)
genes_corr_label = tk.Label(self, text="Comma seperated list of genes to add to correlation search.")
genes_corr_entry = tk.Entry(self, textvariable=self.genes_corr, width=20)
create_button = tk.Button(self, text="Run scicast", command=self.genAsset)
dir_label.grid(row=1, column=1, columnspan=2, sticky='w')
path_entry.grid(row=2, column=1, columnspan=2, sticky='w')
browse_button.grid(row=3, column=1, columnspan=2, sticky='w')
cell_label.grid(row=4, column=1, columnspan=2, sticky='w')
cell_path_entry.grid(row=5, column=1, columnspan=2, sticky='w')
cell_browse_button.grid(row=6, column=1, columnspan=2, sticky='w')
gene_label.grid(row=7, column=1, columnspan=2, sticky='w')
gene_path_entry.grid(row=8, column=1, columnspan=2, sticky='w')
gene_browse_button.grid(row=9, column=1, columnspan=2, sticky='w')
exclude_gene_label.grid(row=10, column=1, columnspan=2, sticky='w')
exclude_gene_path_entry.grid(row=11, column=1, columnspan=2, sticky='w')
exclude_gene_browse_button.grid(row=12, column=1, columnspan=2, sticky='w')
annotate_gene_subset_label.grid(row=9, column=4, columnspan=2, sticky='w')
annotate_gene_subset_path_entry.grid(row=10, column=4, columnspan=2, sticky='w')
annotate_gene_subset_browse_button.grid(row=11, column=4, columnspan=2, sticky='w')
gene_number_label.grid(row=13, column=1, columnspan=2, sticky='w')
gene_number_entry.grid(row=14, column=1, columnspan=2, sticky='w')
depth_number_label.grid(row=15, column=1, columnspan=2, sticky='w')
depth_number_entry.grid(row=16, column=1, columnspan=2, sticky='w')
for i, flag in enumerate(self.flags):
var = tk.BooleanVar()
tk.Checkbutton(self, text=flag, variable=var).grid(row=1+i, column=3, columnspan=1, sticky='w')
self.variables.append(var)
metric_menu_label.grid(row=2+len(self.flags), column=3, columnspan=1, sticky='w')
metric_option_menu.grid(row=3+len(self.flags), column=3, columnspan=1, sticky='w')
method_menu_label.grid(row=4+len(self.flags), column=3, columnspan=1, sticky='w')
method_option_menu.grid(row=5+len(self.flags), column=3, columnspan=1, sticky='w')
qgraph_menu_label.grid(row=15, column=4, columnspan=1, sticky='w')
qgraph_option_menu.grid(row=16, column=4, columnspan=1, sticky='w')
image_format_menu_label.grid(row=17, column=4, columnspan=1, sticky='e')
image_format_option_menu.grid(row=18, column=4, columnspan=1, sticky='e')
zdir_menu_label.grid(row=17, column=4, columnspan=1, sticky='w')
zdir_option_menu.grid(row=18, column=4, columnspan=1, sticky='w')
kmeans_range_label.grid(row=12, column=4, columnspan=1, sticky='w')
kmeans_range_entry.grid(row=13, column=4, columnspan=1, sticky='w')
color_cells_label.grid(row=1, column=4, columnspan=1, sticky='w')
color_cells_entry.grid(row=2, column=4, columnspan=1, sticky='w')
color_genes_label.grid(row=3, column=4, columnspan=1, sticky='w')
color_genes_entry.grid(row=4, column=4, columnspan=1, sticky='w')
test_clust_stability_label.grid(row=5, column=4, columnspan=1, sticky='w')
test_clust_stability_entry.grid(row=6, column=4, columnspan=1, sticky='w')
genes_corr_label.grid(row=7, column=4, columnspan=1, sticky='w')
genes_corr_entry.grid(row=8, column=4, columnspan=1, sticky='w')
asset_label.grid(row=17, column=1, columnspan=1, sticky='w')
asset_entry.grid(row=18, column=1, columnspan=1, sticky='w')
create_button.grid(row=24, column=2, columnspan=2)
def browse(self):
file_path= askopenfilename(**self.file_opt)
if file_path:
self.path.set(file_path)
def browse_cellp(self):
file_path= askopenfilename(**self.file_opt)
if file_path:
self.cell_path.set(file_path)
def browse_genep(self):
file_path= askopenfilename(**self.file_opt)
if file_path:
self.gene_path.set(file_path)
def browse_excludeg(self):
file_path= askopenfilename(**self.file_opt)
if file_path:
self.exclude_gene_path.set(file_path)
def browse_annotateg(self):
file_path= askopenfilename(**self.file_opt)
if file_path:
self.annotate_gene_subset.set(file_path)
def genAsset(self):
all_options_dict = {}
asset_path = self.path.get()
asset_name = self.asset.get()
asset_metric_menu_option = self.metric_menu_var.get()
asset_method_menu_option = self.method_menu_var.get()
asset_gene_number = self.gene_number.get()
asset_depth = self.depth_number.get()
asset_cell_path = self.cell_path.get()
asset_gene_path = self.gene_path.get()
asset_zdir = self.zdir_menu_var.get()
asset_qgraph = self.qgraph_menu_var.get()
asset_image_format = self.image_format_menu_var.get()
asset_kmeans_cluster_range = self.kmeans_cluster_range.get()
asset_exclude_gene_path = self.exclude_gene_path.get()
asset_color_cells = self.color_cells.get()
asset_color_genes = self.color_genes.get()
asset_test_clust_stability = self.test_clust_stability.get()
asset_genes_corr = self.genes_corr.get()
asset_annotate_gene_subset = self.annotate_gene_subset.get()
for var, flag in zip(self.variables, self.flags):
all_options_dict[flag] = var.get()
all_options_dict['filepath'] = asset_path
all_options_dict['output_name'] = asset_name
all_options_dict['method'] = asset_method_menu_option
all_options_dict['metric'] =asset_metric_menu_option
all_options_dict['gene_number'] =asset_gene_number
all_options_dict['depth'] = asset_depth
all_options_dict['cell_file'] = asset_cell_path
all_options_dict['gene_file'] = asset_gene_path
all_options_dict['zdir'] = asset_zdir
all_options_dict['qgraph'] = asset_qgraph
all_options_dict['image_format'] = asset_image_format
all_options_dict['kmeans_cluster_range'] = asset_kmeans_cluster_range
all_options_dict['exclude_genes'] = asset_exclude_gene_path
all_options_dict['color_cells'] = asset_color_cells
all_options_dict['color_genes'] = asset_color_genes
all_options_dict['test_clust_stability'] = asset_test_clust_stability
all_options_dict['genes_corr'] = asset_genes_corr
all_options_dict['annotate_gene_subset'] = asset_annotate_gene_subset
self.all_dict = all_options_dict
self.destroy()
except ImportError:
import Tkinter as tk
import tkFileDialog
class Window(tk.Frame):
def __init__(self):
tk.Frame.__init__(self)
#self.title("scicast")
self.path = tk.StringVar()
self.cell_path = tk.StringVar()
self.gene_path = tk.StringVar()
self.gene_label_path = tk.StringVar()
self.exclude_gene_path = tk.StringVar()
self.asset = tk.StringVar()
self.gene_number = tk.IntVar(value=200)
self.depth_number = tk.IntVar(value=20)
self.kmeans_cluster_range = tk.StringVar(value='2,4')
self.color_cells = tk.StringVar()
self.color_genes = tk.StringVar()
self.test_clust_stability = tk.IntVar(value=0)
self.genes_corr = tk.StringVar()
self.annotate_gene_subset = tk.StringVar()
#type or choose gene matrix file
dir_label = tk.Label(self, text="Browse or type path to gene cell matrix file:")
path_entry = tk.Entry(self, textvariable=self.path, width=40)
browse_button = tk.Button(self, text="Browse for gene cell matrix file", command=self.browse)
#type or choose cell group file
cell_label = tk.Label(self, text="Browse or type path to cell group file:")
cell_path_entry = tk.Entry(self, textvariable=self.cell_path, width=40)
cell_browse_button = tk.Button(self, text="Browse for cell group file", command=self.browse_cellp)
#type or choose gene group file
gene_label = tk.Label(self, text="Browse or type path to gene group file:")
gene_path_entry = tk.Entry(self, textvariable=self.gene_path, width=40)
gene_browse_button = tk.Button(self, text="Browse for gene group file", command=self.browse_genep)
#type or choose file of genes to exclude from all analysis
exclude_gene_label = tk.Label(self, text="Browse or type path to file with genes to exclude from analysis (i.e cell cycle):")
exclude_gene_path_entry = tk.Entry(self, textvariable=self.exclude_gene_path, width=40)
exclude_gene_browse_button = tk.Button(self, text="Browse for exclude genes file", command=self.browse_excludeg)
#type or choose file of genes which will be labeled in gene PCA plot
annotate_gene_subset_label = tk.Label(self, text="Annotate only these genes in gene PCA:")
annotate_gene_subset_path_entry = tk.Entry(self, textvariable=self.annotate_gene_subset, width=40)
annotate_gene_subset_browse_button = tk.Button(self, text="Browse gene annotation file", command=self.browse_annotateg)
#define file extensions
self.file_opt = options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt'),('csv files', '.csv')]
#setup metric menu options
self.metric_menu_var = tk.StringVar()
self.metric_menu_var.set("seuclidean")
metric_menu_label = tk.Label(self, text="Choose Metric:")
metric_option_menu = tk.OptionMenu(self, self.metric_menu_var, 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule')
#setup method option menu
self.method_menu_var = tk.StringVar()
self.method_menu_var.set("ward")
method_menu_label = tk.Label(self, text="Choose Method:")
method_option_menu = tk.OptionMenu(self, self.method_menu_var, 'single', 'complete', 'average', 'weighted', 'centroid', 'median')
#setup qgraph option menu
self.qgraph_menu_var = tk.StringVar()
self.qgraph_menu_var.set("none")
qgraph_menu_label = tk.Label(self, text="Choose which qgraph networks to generate:")
qgraph_option_menu = tk.OptionMenu(self, self.qgraph_menu_var, 'gene','cell','both')
#setup image format selection menu
self.image_format_menu_var = tk.StringVar()
self.image_format_menu_var.set("pdf")
image_format_menu_label = tk.Label(self, text="Select image format for output files:")
image_format_option_menu = tk.OptionMenu(self, self.image_format_menu_var, 'tif', 'png', 'jpeg')
#setup z-direction option menu
self.zdir_menu_var = tk.IntVar()
self.zdir_menu_var.set(0)
zdir_menu_label = tk.Label(self, text="Choose z:")
zdir_option_menu = tk.OptionMenu(self, self.zdir_menu_var, 1,'None')
self.flags = ["Don't Run Heatmaps","Don't Run Correlation", "Verbose", "Test Significance by Groups (User Defined)", "Test Significance by Unbiased Clusters", "Exclude Cells Not in User Cell Groups", "Add Ellipse", "Add Cell Names to PCA", "Display Only Unique Signifcant Genes", "Run Significance Test for kmeans clusters", "Input Matrix is already log2", "use t-SNE (for kmeans clustering)"]
self.variables = []
asset_label = tk.Label(self, text="Output File Name:")
asset_entry = tk.Entry(self, textvariable=self.asset, width=40)
gene_number_label = tk.Label(self, text="Number of genes to include")
gene_number_entry = tk.Entry(self, textvariable=self.gene_number, width=10)
kmeans_range_label = tk.Label(self, text="Range of cluster for kmeans (inclusive):")
kmeans_range_entry = tk.Entry(self, textvariable=self.kmeans_cluster_range, width=10)
depth_number_label = tk.Label(self, text="Depth at which subclustering will stop")
depth_number_entry = tk.Entry(self, textvariable=self.depth_number, width=10)
color_cells_label = tk.Label(self, text="Provide specific colors and markers for each cell group.")
color_cells_entry = tk.Entry(self, textvariable=self.color_cells, width=20)
color_genes_label = tk.Label(self, text="Provide specific colors and markers for each gene group.")
color_genes_entry = tk.Entry(self, textvariable=self.color_cells, width=20)
test_clust_stability_label = tk.Label(self, text="Number of iterations to test cluster stability with varying gene numbers.")
test_clust_stability_entry = tk.Entry(self, textvariable=self.test_clust_stability, width=10)
genes_corr_label = tk.Label(self, text="Comma seperated list of genes to add to correlation search.")
genes_corr_entry = tk.Entry(self, textvariable=self.genes_corr, width=20)
create_button = tk.Button(self, text="Run scicast", command=self.genAsset)
dir_label.grid(row=1, column=1, columnspan=2, sticky='w')
path_entry.grid(row=2, column=1, columnspan=2, sticky='w')
browse_button.grid(row=3, column=1, columnspan=2, sticky='w')
cell_label.grid(row=4, column=1, columnspan=2, sticky='w')
cell_path_entry.grid(row=5, column=1, columnspan=2, sticky='w')
cell_browse_button.grid(row=6, column=1, columnspan=2, sticky='w')
gene_label.grid(row=7, column=1, columnspan=2, sticky='w')
gene_path_entry.grid(row=8, column=1, columnspan=2, sticky='w')
gene_browse_button.grid(row=9, column=1, columnspan=2, sticky='w')
exclude_gene_label.grid(row=10, column=1, columnspan=2, sticky='w')
exclude_gene_path_entry.grid(row=11, column=1, columnspan=2, sticky='w')
exclude_gene_browse_button.grid(row=12, column=1, columnspan=2, sticky='w')
annotate_gene_subset_label.grid(row=9, column=4, columnspan=2, sticky='w')
annotate_gene_subset_path_entry.grid(row=10, column=4, columnspan=2, sticky='w')
annotate_gene_subset_browse_button.grid(row=11, column=4, columnspan=2, sticky='w')
gene_number_label.grid(row=13, column=1, columnspan=2, sticky='w')
gene_number_entry.grid(row=14, column=1, columnspan=2, sticky='w')
depth_number_label.grid(row=15, column=1, columnspan=2, sticky='w')
depth_number_entry.grid(row=16, column=1, columnspan=2, sticky='w')
for i, flag in enumerate(self.flags):
var = tk.BooleanVar()
tk.Checkbutton(self, text=flag, variable=var).grid(row=1+i, column=3, columnspan=1, sticky='w')
self.variables.append(var)
metric_menu_label.grid(row=2+len(self.flags), column=3, columnspan=1, sticky='w')
metric_option_menu.grid(row=3+len(self.flags), column=3, columnspan=1, sticky='w')
method_menu_label.grid(row=4+len(self.flags), column=3, columnspan=1, sticky='w')
method_option_menu.grid(row=5+len(self.flags), column=3, columnspan=1, sticky='w')
qgraph_menu_label.grid(row=15, column=4, columnspan=1, sticky='w')
qgraph_option_menu.grid(row=16, column=4, columnspan=1, sticky='w')
image_format_menu_label.grid(row=17, column=5, columnspan=1, sticky='w')
image_format_option_menu.grid(row=18, column=5, columnspan=1, sticky='w')
zdir_menu_label.grid(row=17, column=4, columnspan=1, sticky='w')
zdir_option_menu.grid(row=18, column=4, columnspan=1, sticky='w')
kmeans_range_label.grid(row=12, column=4, columnspan=1, sticky='w')
kmeans_range_entry.grid(row=13, column=4, columnspan=1, sticky='w')
color_cells_label.grid(row=1, column=4, columnspan=1, sticky='w')
color_cells_entry.grid(row=2, column=4, columnspan=1, sticky='w')
color_genes_label.grid(row=3, column=4, columnspan=1, sticky='w')
color_genes_entry.grid(row=4, column=4, columnspan=1, sticky='w')
test_clust_stability_label.grid(row=5, column=4, columnspan=1, sticky='w')
test_clust_stability_entry.grid(row=6, column=4, columnspan=1, sticky='w')
genes_corr_label.grid(row=7, column=4, columnspan=1, sticky='w')
genes_corr_entry.grid(row=8, column=4, columnspan=1, sticky='w')
asset_label.grid(row=17, column=1, columnspan=1, sticky='w')
asset_entry.grid(row=18, column=1, columnspan=1, sticky='w')
create_button.grid(row=24, column=2, columnspan=2)
def browse(self):
file_path= tkFileDialog.askopenfilename(**self.file_opt)
if file_path:
self.path.set(file_path)
def browse_cellp(self):
file_path= tkFileDialog.askopenfilename(**self.file_opt)
if file_path:
self.cell_path.set(file_path)
def browse_genep(self):
file_path= tkFileDialog.askopenfilename(**self.file_opt)
if file_path:
self.gene_path.set(file_path)
def browse_excludeg(self):
file_path= tkFileDialog.askopenfilename(**self.file_opt)
if file_path:
self.exclude_gene_path.set(file_path)
def browse_annotateg(self):
file_path= tkFileDialog.askopenfilename(**self.file_opt)
if file_path:
self.annotate_gene_subset.set(file_path)
def genAsset(self):
all_options_dict = {}
asset_path = self.path.get()
asset_name = self.asset.get()
asset_metric_menu_option = self.metric_menu_var.get()
asset_method_menu_option = self.method_menu_var.get()
asset_gene_number = self.gene_number.get()
asset_depth = self.depth_number.get()
asset_cell_path = self.cell_path.get()
asset_gene_path = self.gene_path.get()
asset_zdir = self.zdir_menu_var.get()
asset_qgraph = self.qgraph_menu_var.get()
asset_image_format = self.image_format_menu_var.get()
asset_kmeans_cluster_range = self.kmeans_cluster_range.get()
asset_exclude_gene_path = self.exclude_gene_path.get()
asset_color_cells = self.color_cells.get()
asset_color_genes = self.color_genes.get()
asset_test_clust_stability = self.test_clust_stability.get()
asset_genes_corr = self.genes_corr.get()
asset_annotate_gene_subset = self.annotate_gene_subset.get()
for var, flag in zip(self.variables, self.flags):
all_options_dict[flag] = var.get()
all_options_dict['filepath'] = asset_path
all_options_dict['output_name'] = asset_name
all_options_dict['method'] = asset_method_menu_option
all_options_dict['metric'] =asset_metric_menu_option
all_options_dict['gene_number'] =asset_gene_number
all_options_dict['depth'] = asset_depth
all_options_dict['cell_file'] = asset_cell_path
all_options_dict['gene_file'] = asset_gene_path
all_options_dict['zdir'] = asset_zdir
all_options_dict['qgraph'] = asset_qgraph
all_options_dict['image_format'] = asset_image_format
all_options_dict['kmeans_cluster_range'] = asset_kmeans_cluster_range
all_options_dict['exclude_genes'] = asset_exclude_gene_path
all_options_dict['color_cells'] = asset_color_cells
all_options_dict['color_genes'] = asset_color_genes
all_options_dict['test_clust_stability'] = asset_test_clust_stability
all_options_dict['genes_corr'] = asset_genes_corr
all_options_dict['annotate_gene_subset'] = asset_annotate_gene_subset
self.all_dict = all_options_dict
self.destroy()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'HaiFeng'
__mtime__ = '2016/9/17'
"""
from generate.ctp_data_type import *
import os
class Generate:
def __init__(self, dir):
self.ctp_dir = dir
def run(self):
"""主函数"""
fcpp = open(os.path.join(os.path.abspath(self.ctp_dir), 'ThostFtdcUserApiStruct.h'), 'r')
fpy = open(os.path.join(os.path.abspath('..\..\hf_py_ctp\py_ctp'), 'ctp_struct.py'), 'w', encoding='utf-8')
fpy.write('#!/usr/bin/env python\n')
fpy.write('#coding:utf-8\n')
fpy.write('from ctypes import *\n')
fpy.write('from py_ctp.ctp_enum import *\n')
fpy.write('\n')
# fpy.write('structDict = {}\n')
# fpy.write('\n')
py_get = ''
py_line = ''
py_str = ''
py_dict = ''
py_clone = ''
py_str_idx = 0
py_str_format = ''
for no, line in enumerate(fcpp):
# 结构体申明注释
if '///' in line and '\t' not in line:
remark = line[3:-1]
continue
# 结构体变量注释
elif '\t///' in line:
remark = line[4:-1]
continue
# 结构体申明
elif 'struct ' in line:
content = line.split(' ')
name = content[1].replace('\n', '')
# struct begin
py_line = 'class %s(Structure):\n' % name
py_line += '\t"""%s"""\n' % remark
py_line += '\t_fields_ = [\n'
# 结构体变量
elif '\t' in line and '///' not in line:
content = line.split('\t')
typedef = content[1]
type_ = typedefDict[typedef]
variable = content[2].replace(';\n', "")
# fields
py_line = '\t\t#%s\n' % remark
py_line += '\t\t("%s",%s),\n' % (variable, type_)
#
if type_.find('c_char*') >= 0:
#decode遇到特殊字符需处理:0xa3 -> iso-8859-1 乱玛
py_get += "\tdef get{0}(self):\n\t\treturn str(self.{0}, 'GB2312')\n".format(variable)
py_str_format += "str(self.{0}, 'GB2312'), ".format(variable)
py_str += "{0}=\\'{{{1}}}\\', ".format(variable, py_str_idx)
py_dict += "'{0}':{1},".format(variable, "str(self.{0}, 'GB2312')".format(variable))
elif type_.find('c_char') >= 0:
var_type = typedef[typedef.find('Ftdc') + 4:]
py_get += "\tdef get{0}(self):\n\t\treturn {1}(ord(self.{0}))\n".format(variable, var_type)
#如何输出enum类型的名字,而非值**某些情况下返回的值为0
py_str_format += "'' if ord(self.{0}) == 0 else {1}(ord(self.{0})).name, ".format(variable, var_type)
#py_str_format += "self.{0}, ".format(variable)
py_str += "{0}={2}.{{{1}}}, ".format(variable, py_str_idx, var_type)
py_dict += "'{0}':{1},".format(variable, "'' if ord(self.{0}) == 0 else {1}(ord(self.{0})).name".format(variable, var_type))
else:
py_get += "\tdef get{0}(self):\n\t\treturn self.{0}\n".format(variable)
py_str_format += "self.{0}, ".format(variable)
py_str += "{0}={{{1}}}, ".format(variable, py_str_idx)
py_dict += "'{0}':{1},".format(variable, "self.{0}".format(variable))
py_clone += "\t\tobj.{0}=self.{0}\n".format(variable)
py_str_idx+=1
#py_str += '{0}={{self.{0}}}, '.format(variable)
# 结构体结束
elif '}' in line:
# struct end
py_line = '\t\t]\n\n'
py_line += py_get + '\n'
#py_line += "\tdef __str__(self):\n\t\treturn '{0}'.format(self=self)\n\n".format(py_str[0:len(py_str)-2])
py_line += "\tdef __str__(self):\n\t\treturn '{0}'.format({1})\n\n".format(py_str[0:len(py_str) - 2], py_str_format[0:len(py_str_format) - 2])
py_line += "\t@property\n\tdef __dict__(self):\n\t\treturn {{{0}}}\n\n".format(py_dict[:-1])
py_line += "\tdef clone(self):\n\t\tobj={0}()\n{1}\t\treturn obj\n\n".format(name, py_clone)
# 结构体开始
elif '{' in line:
py_line = ''
py_get = ''
py_str = ''
py_dict = ''
py_clone = ''
py_str_idx = 0
py_str_format = ''
# 其他
else:
py_line = '\n'
continue
fpy.write(py_line)
if __name__ == '__main__':
Generate('../ctp_20160606').run()
|
<reponame>bb13135811/Introducing_Python
# 檔案輸入/輸出
# fileobj = open(filename, mode)
#(open()回傳的檔案物件) (指示檔案類型)
# 使用write()來編寫文字檔案
poem = '''There was a young lady named Bright,
Whose speed was far faster than light;
She started one day
In a relative way,
And returned on the previous night.'''
len(poem)
fout = open('relativity', 'wt')
fout.write(poem) # write()會回傳被寫入的byte數量
fout.close()
fout = open('relativity', 'wt')
print(poem, file=fout)
fout.close()
# 使用print寫入時可以使用sep與end參數指定分隔符號與結束符號
# sep,預設為空格' '
# end,預設為換行'\n'
fout = open('relativity', 'wt')
print(poem, file=fout, sep='', end='') # 除非傳入其他東西,否則使用預設值
fout.close()
fout = open('relativity', 'wt')
size = len(poem)
offset = 0
chunk = 100
while True:
if offset > size:
break
fout.write(poem[offset:offset+chunk])
offset += chunk
fout.close()
fout = open('relativity', 'xt') # 使用x 避免覆寫
# 同時使用例外處理程式
try:
fout = open('relativity', 'xt')
fout.write('stomp stomp stomp')
except FileExistsError:
print('relativity already exist')
# 用read()、readline()、readlines()來讀取文字檔
# fin.read(),一次讀入全部,或是指定讀入字節數,注意記憶體占用情況
fin = open('relativity', 'rt') # 使用read()不使用引數則讀取整個檔案
poem = fin.read()
fin.close()
len(poem)
# 可提供最大字元數量,限制read()每次回傳的數量,每次讀取100字元,將每個段落附加到poem字串
poem = ''
fin = open('relativity', 'rt')
chunk = 100
while True:
fragment = fin.read(chunk)
if not fragment:
break
poem += fragment
print("目前",len(poem),"字")
fin.close()
len(poem)
# fin.readline(),一次讀入一行
poem = ''
fin = open('relativity', 'rt')
while True:
line = fin.readline()
if not line:
break
poem += line
print('這行',len(line),'字') # 含換行字元
fin.close()
len(poem)
# 使用迭代器,一次回傳一行
poem = ''
fin = open('relativity', 'rt')
for line in fin:
poem += line
fin.close()
len(poem)
# fin.readlines(),疊代器用法,寫法更好看
fin = open('relativity', 'rt')
lines = fin.readlines()
fin.close()
print(len(lines), 'lines read')
for line in lines:
print(line,end='!')
# 用write()寫入二進位檔案
bdata = bytes(range(0,256))
len(bdata)
fout = open('bfile', 'wb')
fout.write(bdata)
fout.close()
fout = open('bfile', 'wb')
size = len(bdata)
offset = 0
chunk = 100
while True:
if offset > size:
break
fout.write(bdata[offset:offset+chunk])
offset += chunk
fout.close()
# 用read()讀取二進位檔案
fin = open('bfile', 'rb')
bdata = fin.read()
len(bdata)
fin.close()
# 用with自動關閉檔案
# 格式 -> expression as variable
with open('relativity', 'wt') as fout:
fout.write(poem) # 這行程式完成後會自動關閉檔案
# 用seek()來更改位置
# file.tell()可以查詢讀取位置 seek(offset,origin)
fin = open('bfile', 'rb')
fin.tell()
# 使用seek()跳到檔案結尾的一個byte之前
fin.seek(255)
bdata = bytes(range(0, 256))
fin = open('bfile', 'wb')
fin.write(bdata)
fin.close()
fin = open('bfile', 'rb')
data = fin.read()
len(data)
fin.close()
fin = open('bfile', 'rb')
fin.tell()
fin.close()
fin.seek(255) # 跳到結尾前一個byte
bdata = fin.read() # 讀到檔案結尾
len(bdata) # 一個byte
bdata[0] # 這是256個byte裡的255位移植
# seek()也會回傳目前的位移 -> seek(offset, origin)
# origin = 0(預設),從頭算起第offset個byte
# origin = 1,從目前位置算起第offset個byte
# origin = 2,從最後往前為算第offset個byte
import os # os模組定義這些值
os.SEEK_SET
os.SEEK_CUR
os.SEEK_END
# 以不同方式讀取最後的byte
fin = open('bfile', 'rb')
fin.seek(-1, 2) # 從最後讀取-1位移植
fin.tell()
bdata = fin.read()
len(bdata)
bdata[0]
fin.close()
fin = open('bfile', 'rb')
fin.seek(254, 0)
fin.tell()
fin.seek(1,1)
fin.tell()
bdata = fin.read()
len(bdata)
bdata[0]
# 以上函式適合用於二進位檔案 / 文字檔除非是ASCII,否則因字元bytes不同,計算位移植困難
# 文字檔結構
# CSV
# 常見分隔符號, 1., 2. | 3. \t
# 轉義序列: 假如欄位內容有分隔字元,必須以引號匡選整欄 or 在前面加上轉義字元
# 檔案會使用不同的行尾字元
# 第一行可能是欄位名稱
# 如何讀取、寫入一連串的“列”,每一列有一連串的欄位
import csv
villains = [
['Doctor', 'No'],
['Rosa', 'Klebb'],
['Mister', 'Big'],
['Auric', 'Goldfinger'],
['Ernst', 'Blofield'],
]
with open('villains', 'wt') as fout: # 寫入
csvout = csv.writer(fout)
csvout.writerows(villains)
import csv
with open('villains', 'rt') as fin: # 讀取
cin = csv.reader(fin)
villains = [row for row in cin]
print(villains)
import csv
with open('villains', 'rt') as fin:
cin = csv.DictReader(fin, fieldnames=['first', 'last']) # 欄位名'first' 'last'
villains = [row for row in cin]
print(villains)
# 使用新的DictWriter()重寫CSV檔
import csv
villains = [
{'first':'Doctor','last':'No'},
{'first':'Rosa', 'last':'Klebb'},
{'first':'Mister','last':'Big'},
{'first':'Auric', 'last':'Goldfinger'},
{'first':'Ernst', 'last':'Blofield'},
]
with open('villains', 'wt') as fout:
cout = csv.DictWriter(fout, ['first', 'last']) # 會建立有標頭行的檔案
cout.writeheader() # 檔頭
cout.writerows(villains)
print(villains)
# 將檔案讀回
import csv
with open('villains', 'rt') as fin:
cin = csv.DictReader(fin) # 忽略fieldname引數,使用檔案第一行的值作為欄位標籤
villains = [row for row in cin]
villains
# XML
# 使用“標籤”劃分資料
# 1.標籤以<字元開頭
# 2.忽略空白字元
# 3.通常<menu>等開始標籤會匹配結束標籤</menu>
# 4.標籤可被嵌套在其他標籤裡
# 5.開始標籤裡可能會有選用的屬性
# 6.標籤裡可以放“值”
# 7.假如標籤內沒有值或子標籤,可用一個標籤敘述,在最後的括號前加上斜號,以取代開始與結束標籤
# 8.可以隨意選擇放置資料的位置
# 使用ElementTree解析XML
import xml.etree.ElementTree as et
tree = et.ElementTree(file='menu.xml')
root = tree.getroot()
root.tag
# 每個元素包含如下屬性
# tag:string物件,表示資料代表的種類。
# attrib:dictionary物件,表示附有的屬性。
# text:string物件,表示element的內容。
# tail:string物件,表示element閉合之後的尾跡。
# 若干子元素(child elements)
for child in root:
print('tag:', child.tag, 'attributes:', child.attrib)
for grandchild in child:
print('\ttag:', grandchild.tag, 'attrubutes:', child.attrib)
len(root) # number of menu sections
len(root[0]) # number of breakfast items
# JSON
menu = \
{
"breakfast": {
"hours": "7-11",
"items": {
"breakfast burritos": "$6.00",
"pancakes": "$4.00"
}
},
"lunch" : {
"hours": "11-3",
"items": {
"hamburger": "$5.00"
}
},
"dinner": {
"hours": "3-10",
"items": {
"spaghetti": "$8.00"
}
}
}
import json
menu_json = json.dumps(menu) # json.dumps()將python轉化為json
menu_json
menu2 = json.loads(menu_json) # json.loads() 將json轉化為python物件
menu2
import datetime
now = datetime.datetime.utcnow()
now
import json
json.dumps(now)
# 將datetime轉換成某些JSON了解的東西,例如字串 or epoch值
now_str = str(now)
json.dumps(now_str)
from time import mktime
now_epoch = int(mktime(now.timetuple()))
json.dumps(now_epoch)
#可以使用“繼承”修改JSON的編碼方式
import json
class DTEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime): #isinstance()會檢查obj物件是否為datetime.datetime類別
return int(mktime(obj.timetuple()))
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
json.dumps(now, cls=DTEncoder)
# 可使用isinstance()與適合類型的方式查看結構並檢視值
# 例如:項目為字典,可用keys()、values()、items()擷取內容
# YAML
# 與JSON很像,有鍵與值,但可處理更多資料類型
import yaml
with open('mcintyre.yaml', 'rt') as fin:
text = fin.read()
data = yaml.safe_load(text) # lead()將YAML字串轉換成python資料
data['details'] # 如果要匯入不信任的YAML,使用safe_load()來代替load()
len(data['poems'])
# 假如資料超過一層,可使用字典/串列/字典來參考,取得第二首詩的標題
data['poems'][1]['title']
# configparser解析器
import configparser
cfg = configparser.ConfigParser()
cfg.read('settings.cfg')
cfg
cfg['french']
cfg['french']['greeting']
cfg['files']['bin']
# 用pickle來序列化(serializing)
# pickle模組可儲存及還原特殊二進位格式物件
import pickle
import datetime
now1 = datetime.datetime.utcnow()
pickled = pickle.dumps(now1)
now2 = pickle.loads(pickled)
now1
now2
# 可在自己的類別及物件使用pickle
import pickle
class Tiny():
def __str__(self):
return "tiny"
obj1 = Tiny()
obj1
str(obj1)
pickled = pickle.dumps(obj1) # dumps() 序列化(將資料結構或物件狀態轉換成可取用格式)
pickled
obj2 = pickle.loads(pickled) # loads() 反序列化
obj2
# SQL
# 陳述式分兩大種
# DDL (資料定義語言)
# DML (資料處理語言)
# DB-API
# connect() - 連結到資料庫,可傳入帳號、密碼、伺服器地址等引數
# cursor() - 建立cursor物件來處理查詢指令
# execute() & executemany() - 對資料庫執行一或多個SQL指令
# fetchone()、fetchmany()、fetchall() - 取得execute的結果
# SQLite
import sqlite3
conn = sqlite3.connect('enterprise.db') # 製作稱為'enterprise.db的資料庫
curs = conn.cursor() # cursor() - 建立cursor物件來處理查詢指令
curs.execute('''CREATE TABLE zoo
(critter VARCHAR(20) PRIMARY KEY,
count INT,
damages FLOAT)''') # execute() & executemany() - 對資料庫執行一或多個SQL指令
# critter - 變數長度字串,為主鍵
# count - 該動物目前整數數量
# damages - 動物造成損失金額
# 加入動物
curs.execute('INSERT INTO zoo VALUES("duck", 5, 0.0)')
curs.execute('INSERT INTO zoo VALUES("bear", 2, 1000.0)')
# 使用“佔位符” - 較安全的資料插入方式
ins = 'INSERT INTO zoo(critter, count, damages) VALUES(?,?,?)'
curs.execute(ins, ('weasel', 1, 2000.0))
curs.execute('SELECT * FROM zoo') # 將所有動物取出
rows = curs.fetchall()
print(rows)
curs.execute('SELECT * FROM ZOO ORDER BY count')
curs.fetchall()
curs.execute('SELECT * FROM ZOO ORDER BY count DESC') # 以降冪排序
curs.fetchall()
curs.execute('''SELECT * FROM zoo WHERE
damages = (SELECT MAX(damages) FROM zoo)''')
curs.fetchall()
# 必須在完成工作時關閉開發連結與cursor
curs.close()
conn.close()
import sqlalchemy as sa
conn = sa.create_engine('sqlite://')
conn.execute('''CREATE TABLE zoo
(critter VARCHAR(20) PRIMARY KEY,
count INT,
damages FLOAT)''')
ins = 'INSERT INTO zoo(critter, count, damages) VALUES (?, ?, ?)'
conn.execute(ins, 'duck', 10, 0.0)
conn.execute(ins, 'bear', 2, 1000.0)
conn.execute(ins, 'weasel', 1, 2000.0)
rows = conn.execute('SELECT * FROM zoo')
print(rows) # rows在SQLAlchemy不是串列,而是ResultProxy,無法被印出來
for row in rows:
print(row)
# SQL Expression Language
import sqlalchemy as sa
conn = sa.create_engine('sqlite://')
# 為定義zoo資料表,使用Expression Language取代SQL
meta = sa.MetaData()
zoo = sa.Table('zoo', meta,
sa.Column('critter', sa.String, primary_key=True),
sa.Column('count', sa.Integer),
sa.Column('damages', sa.Float)
)
meta.create_all(conn)
# 使用Expression Language函式來插入資料
conn.execute(zoo.insert(('bear', 2, 1000.0)))
conn.execute(zoo.insert(('weasel', 1, 2000.0)))
conn.execute(zoo.insert(('duck', 10, 0)))
result = conn.execute(zoo.select()) # 選擇資料表內zoo物件的所有東西
rows = result.fetchall()
print(rows)
# 物件-關係對應器
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
conn = sa.create_engine('sqlite:///zoo.db')
Base = declarative_base()
class Zoo(Base):
__tablename__ = 'zoo'
critter = sa.Column('critter', sa.String, primary_key=True)
count = sa.Column('count', sa.Integer)
damages = sa.Column('damages', sa.Float)
def __init__(self, critter, count, damages):
self.critter = critter
self.count = count
self.damages = damages
def __repr__(self):
return "<Zoo({}, {}, {})>".format(self.critter, self.count, self.damages)
Base.metadata.create_all(conn) #建立資料表與資料庫
#建立Python物件插入資料
first = Zoo('duck', 5, 0.0)
second = Zoo('bear', 2, 1000.0)
third = Zoo('weasel', 1, 2000.0)
first
# 建立session與資料庫互動
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=conn) # 將conn綁定
session = Session()
# 將之前三個物件寫入資料庫
session.add(first) # add() - 加入一個物件
session.add_all([second, third]) # add_all() - 加入一個串列
session.commit()
rows = conn.execute('''SELECT * FROM zoo''')
for row in rows:
print(row)
# NOSQL 資料存放區
# dbm (鍵-值存放空間)
# 'r'代表讀取,'w'代表寫入,'c'代表兩者,假如不存在會建立檔案
import dbm
db = dbm.open('definition', 'c')
# 建立鍵-值對,將值指派給鍵
db['mustard'] = 'yellow'
db['ketchup'] = 'red'
db['pesto'] = 'green'
len(db)
db['mustard']
db.close()
db = dbm.open('definition', 'r')
db['mustard']
# Redis (ㄧ種資料結構伺服器)
# 字串
#連結到某個Redis伺服器的主機(預設為localhost)、連接埠(預設為6379)
import redis
conn = redis.Redis()
redis.Redis('localhost')
redis.Redis('localhost', 6379) # 兩者會產生相同的結果
#列出所有鍵
conn.keys('*')
conn.set('secret', 'ni!')
conn.set('carats', 24)
conn.set('fever', '101.5')
# 以鍵取回值
conn.get('secret')
conn.get('carats')
conn.get('fever')
# 當鍵不存在時,setnx()才會設定新值
conn.setnx('secret', 'the new one') #已存在,則出現False
conn.get('secret') #還是會回傳舊值
conn.getset('secret', 'the new one') #先回傳舊值,接著設為新值
conn.get('secret') # 新值設定成功
# 使用getrange()取得子字串
conn.getrange('secret', -3, -1) # 0=開始,-1=結束
# 使用setrange()替換一個子字串
conn.setrange('secret', 0, 'THE') # 使用以零開始的位移
conn.get('secret')
#使用mset()一次設定多個鍵
conn.mset({'pie': 'cherry', 'cordial': 'sherry'})
# 使用mget一次取得多個值
conn.mget(['fever', 'carats'])
conn.mget(['pie', 'cordial'])
#使用delete()刪除鍵
conn.delete('fever')
conn.get('fever')
#使用incr()或incrbyfloat()來遞增,decr()遞減
conn.incr('carats')
conn.incr('carats', 100)
conn.decr('carats', 50)
conn.set('fever', '101.5')
conn.incrbyfloat('fever')
conn.incrbyfloat('fever', 0.05)
conn.incrbyfloat('fever', -2.5) # 沒有decrbyfloat(),因此用負數遞增
# 串列
# Redis串列只能存放字串,首次插入便會建立串列
# 使用lpush()在開頭插入
conn.lpush('zoo', 'bear')
conn.lpush('zoo', 'alligator', 'duck')
# 使用linsert()在一個值之前/之後插入
conn.linsert('zoo', 'before', 'bear', 'beaver')
conn.linsert('zoo', 'after', 'bear', 'cassowary')
# 使用lset()在某位移植執行插入(串列必須已存在)
conn.lset('zoo', 2, 'marmoset')
# 使用rpush()在結尾插入
conn.rpush('zoo', 'yak')
# 使用lindex()取得某位移的值
conn.lindex('zoo', 3)
# 使用lrange()取得某位移範圍內的值 (0~-1可取出全部值)
conn.lrange('zoo', 0, 2)
# 使用ltrim()修剪串列,只留下位移範圍內的值
conn.ltrim('zoo', 1, 4)
# 使用lrange()取得某位移範圍內的值 (0~-1可取出全部值)
conn.lrange('zoo', 0, -1)
# 雜湊
# 與Python字典很像,但只能容納字串,因此只能往下一層,無法製作深層嵌套結構
# 使用 hmset() 設定雜湊song的do與re
conn.hmset('song', {'do': 'a dear', 're': 'about a dear'})
# 使用 hset() 設定雜湊的單一欄位值
conn.hset('song', 'mi', 'a note to follow re')
# 使用 hget() 取得一個欄位的值
conn.hget('song', 'mi')
# 使用 hmget() 取得多個欄位的值
conn.hmget('song', 're', 'do')
# 使用 hkeys() 取得雜湊所有欄位的鍵
conn.hkeys('song')
# 使用 hvals() 取得所有欄位的值
conn.hvals('song')
# 使用 hlen()取得欄位數量
conn.hlen('song')
# 使用 hgetall()取得所有欄位鍵與值
conn.hgetall('song')
# 當欄位的鍵不存在,使用 hsetnx()設定欄位
conn.hsetnx('song', 'fa', 'a new note')
conn.hgetall('song')
# 集合
conn.sadd('zoo', 'duck', 'goat', 'turkey')# 將一或多個值加至集合
conn.scard('zoo') # 取得集合值的數量
conn.smembers('zoo') # 取得集合所有值
conn.srem('zoo', 'turkey') # 移除集合的值
conn.sadd('better_zoo', 'tiger', 'wolf', 'duck') # 製作第二組集合
conn.sinter('zoo', 'better_zoo') # 查看交集
conn.sinterstore('wolf_zoo','zoo', 'better_zoo') # 將交集結果存入另一組集合
conn.smembers('wolf_zoo')
conn.sunion('zoo', 'better_zoo') # 取得聯集
conn.sunionstore('f_zoo', 'zoo', 'better_zoo') # 將聯集結果存入另一組集合
conn.smembers('f_zoo')
conn.sdiff('zoo', 'better_zoo') # 取得差集
conn.sdiffstore('zoo_sale', 'zoo', 'better_zoo') # 將差集結果存入
conn.smembers('zoo_sale')
# 有序集合 ( sorted set )
# zset是Redis最多功能的類型之一,每個值都有相關的浮點分數(score)
# 可以用值或分數存取每一個項目
#用途:
#排行榜
#輔助索引
#時間序列,使用時戳來作為分數
import time
now = time.time()
now
conn.zadd("logins", {'smeagol': now})
conn.zadd("logins", {'sauron': now + (5*60)})
conn.zadd("logins", {'bilbo': now + (2*60*60)})
conn.zadd("logins", {'treebeard': now + (24 * 60 * 60)})
conn.zrank('logins', 'bilbo')
conn.zscore("logins", 'bilbo')
conn.zrange('logins', 0, -1)
conn.zrange('logins', 0, -1, withscores=True)
# 位元
# 先建立每天的位元集
days = ['2013-02-25', '2013-02-26', '2013-02-27']
big_spender = 1089
tire_kicker = 40459
late_joiner = 550212
# 假設在第一天,有兩位使用者來訪
conn.setbit(days[0], big_spender, 1)
conn.setbit(days[0], tire_kicker, 1)
# 隔天其中一位回訪
conn.setbit(days[1], big_spender, 1)
# 再隔一天,其中一位回訪第二次,並加入一位新使用者
conn.setbit(days[2], big_spender, 1)
conn.setbit(days[2], late_joiner, 1)
# 取得這幾天的訪客數量
for day in days:
print(conn.bitcount(day))
# 是否有特定的使用者在特定日期造訪
conn.getbit(days[1], tire_kicker)
# 每天有多少使用者造訪
conn.bitop('and', 'everyday', *days)
# bitop() -> BITOP(operation, destkey, key [key ...])
# 將一或多個key進行位元操作,並將結果保存到destkey上
conn.bitcount('everyday')
conn.getbit('everyday', big_spender)
conn.bitop('or', 'alldays', *days)
conn.bitcount('alldays') # 這三天裡,有幾位使用者曾造訪
# 快取與逾期
# 所有Redis鍵都有存活時間,可使用expire()函式指示要將鍵保存多久
import time
key = 'now see it'
conn.set(key, 'not for long')
conn.expire(key, 30)
conn.ttl(key)
conn.get(key)
time.sleep(10)
conn.get(key)
# expireat()指令會在給定的epoch時間讓一個鍵逾期
# 讓鍵逾期可讓快取維持在最新狀態、限制登入session |
"""Performance visualization class"""
import os
from dataclasses import dataclass, field
from typing import Dict, List
import pandas as pd
import seaborn as sns
import scikit_posthocs as sp
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot
import matplotlib.pylab as plt
from tqdm import tqdm
from src.data import Data
@dataclass
class VizMetrics(Data):
"""Generates plots to visualize models performance.
This object generates performance plots to compare different spatial
cross-validation approaches.
Attributes
----------
root_path : str
Root path
index_col : str
The metrics csv index column name
fs_method : str
The feature selection method used
ml_method : str
The machine learning method used
"""
cv_methods: List = field(default_factory=list)
index_col: str = None
fs_method: str = None
ml_method: str = None
cv_methods_path: List = field(default_factory=list)
cv_methods_results: Dict = field(default_factory=dict)
def init_methods_path(self):
"""Initialize spatial cv folder paths"""
self.cv_methods_path = [
os.path.join(
self.root_path,
"results",
method,
"evaluations",
self.fs_method,
self.ml_method,
"metrics.csv",
)
for method in self.cv_methods
]
def load_cv_results(self):
"""Load metric results from each spatial cv being considered"""
for data_path, method in zip(self.cv_methods_path, self.cv_methods):
self.cv_methods_results[method] = pd.read_csv(
data_path, index_col=self.index_col
)
def generate_metric_df(self, metric):
"""Generates a dataframe for a given metric"""
index_fold = self.cv_methods_results["Optimistic"].index
metric_df = pd.DataFrame(columns=self.cv_methods, index=index_fold)
for cv_method, results in self.cv_methods_results.items():
metric_df[cv_method] = results[metric]
metric_df.index = metric_df.index.astype(str)
return metric_df
def generate_metric_plot(self):
"""Generates plots for the performance metrics"""
sns.set(font_scale=2.2)
sns.set_style("whitegrid", {"axes.grid": False})
rmse = self.generate_metric_df(metric="RMSE")
features = self.generate_metric_df(metric="N_FEATURES")
instances = self.generate_metric_df(metric="TRAIN_SIZE")
metrics = {"rmse": rmse, "features": features, "instances": instances}
with PdfPages(os.path.join(self.cur_dir, "metrics.pdf")) as pdf_pages:
for metric_name, metric in tqdm(metrics.items(), desc="Generating plots"):
fig, fig_ax = pyplot.subplots(figsize=(20, 5))
plt.xticks(rotation=45)
fig_ax.set(ylabel="")
fig_ax.set_title(metric_name.upper())
sns.lineplot(
data=metric,
markers=True,
err_style="bars",
ax=fig_ax,
dashes=True,
linewidth=5,
palette=[
"#16b004",
"#6e1703",
"#6e1703",
"#6e1703",
"#6e1703",
"#6e1703",
"#6e1703",
"#f8ff0a",
],
# palette="Set1"
)
fig_ax.legend(
bbox_to_anchor=(0.5, -1.0),
loc="lower center",
ncol=4,
borderaxespad=0.0,
)
pdf_pages.savefig(fig, bbox_inches="tight")
def generate_mean_table(self):
"""Generates the mean performance for each spatial cv approache"""
self.logger_info("Generating mean table.")
columns = self.cv_methods_results["Optimistic"].columns.values.tolist()
columns_std = [f"{col}_std" for col in columns]
columns = columns + columns_std
columns = columns.sort()
mean_df = pd.DataFrame(columns=columns, index=self.cv_methods)
for method, results in self.cv_methods_results.items():
describe_df = results.describe()
for col in results.columns:
mean_df.loc[
method, col
] = f"{describe_df.loc['mean', col]} ({describe_df.loc['std', col]})"
mean_df.T.to_csv(os.path.join(self.cur_dir, "mean_metrics.csv"))
def tukey_post_hoc_test(self, metric):
"""Generate post hoc statistics"""
metric_df = pd.DataFrame(columns=self.cv_methods)
for method, results in self.cv_methods_results.items():
metric_df[method] = results[metric]
metric_df = metric_df.melt(var_name="groups", value_name="values")
test = sp.posthoc_tukey(metric_df, val_col="values", group_col="groups")
print(test)
def run(self):
"""Runs the visualization step"""
self._make_folders(["comparison"])
self.set_logger_to_crit("matplotlib")
self.init_methods_path()
self.load_cv_results()
self.generate_mean_table()
self.generate_metric_plot()
self.tukey_post_hoc_test("RMSE")
|
<reponame>mjclarke94/aiida-lammps
from hashlib import md5
from io import StringIO
from aiida.orm import Data
from aiida.plugins.entry_point import get_entry_point_names, load_entry_point
class EmpiricalPotential(Data):
"""
Store the empirical potential data
"""
entry_name = "lammps.potentials"
pot_lines_fname = "potential_lines.txt"
@classmethod
def list_types(cls):
return get_entry_point_names(cls.entry_name)
@classmethod
def load_type(cls, entry_name):
return load_entry_point(cls.entry_name, entry_name)
def __init__(self, type, data=None, **kwargs):
"""Empirical potential data, used to create LAMMPS input files.
Parameters
----------
type: str
the type of potential (should map to a `lammps.potential` entry point)
data: dict
data required to create the potential file and input lines
"""
super(EmpiricalPotential, self).__init__(**kwargs)
self.set_data(type, data)
def set_data(self, potential_type, data=None):
"""Store the potential type (e.g. Tersoff, EAM, LJ, ..)."""
if potential_type is None:
raise ValueError("'potential_type' must be provided")
if potential_type not in self.list_types():
raise ValueError(
"'potential_type' must be in: {}".format(self.list_types())
)
pot_class = self.load_type(potential_type)(data or {})
atom_style = pot_class.atom_style
default_units = pot_class.default_units
allowed_element_names = pot_class.allowed_element_names
external_contents = pot_class.get_external_content() or {}
pot_lines = pot_class.get_input_potential_lines()
charge_dict = pot_class.charge_dict
self.set_attribute("potential_type", potential_type)
self.set_attribute("atom_style", atom_style)
self.set_attribute("default_units", default_units)
self.set_attribute("charge_dict", charge_dict)
self.set_attribute("kind_map", charge_dict)
self.set_attribute(
"allowed_element_names",
sorted(allowed_element_names)
if allowed_element_names
else allowed_element_names,
)
# store potential section of main input file
self.set_attribute(
"md5|input_lines", md5(pot_lines.encode("utf-8")).hexdigest()
)
self.put_object_from_filelike(StringIO(pot_lines), self.pot_lines_fname)
# store external files required by the potential
external_files = []
for fname, content in external_contents.items():
self.set_attribute(
"md5|{}".format(fname.replace(".", "_")),
md5(content.encode("utf-8")).hexdigest(),
)
self.put_object_from_filelike(StringIO(content), fname)
external_files.append(fname)
self.set_attribute("external_files", sorted(external_files))
# delete any previously stored files that are no longer required
for fname in self.list_object_names():
if fname not in external_files + [self.pot_lines_fname]:
self.delete_object(fname)
@property
def potential_type(self):
"""Return lammps atom style."""
return self.get_attribute("potential_type")
@property
def atom_style(self):
"""Return lammps atom style."""
return self.get_attribute("atom_style")
@property
def default_units(self):
"""Return lammps default units."""
return self.get_attribute("default_units")
@property
def allowed_element_names(self):
"""Return available atomic symbols."""
return self.get_attribute("allowed_element_names")
def get_input_lines(self, kind_symbols=None):
"""Return the command(s) required to setup the potential.
The placeholder ``{kind_symbols}`` will be replaced,
with a list of symbols for each kind in the structure.
e.g.::
pair_style eam
pair_coeff * * {kind_symbols}
get_input_lines(["S", "Cr"])::
pair_style eam
pair_coeff * * S Cr
"""
content = self.get_object_content(self.pot_lines_fname, "r")
if kind_symbols:
content = content.replace("{kind_symbols}", " ".join(kind_symbols))
return content
def get_external_files(self):
"""Return the mapping of external filenames to content."""
fmap = {}
for fname in self.get_attribute("external_files"):
fmap[fname] = self.get_object_content(fname, "r")
return fmap
@property
def charge_dict(self):
return self.get_attribute("charge_dict", default=None)
@charge_dict.setter
def charge_dict(self, value):
return self.set_attribute("charge_dict", value)
@property
def kind_map(self):
return self.get_attribute("kind_map", default=None)
@kind_map.setter
def kind_map(self, value):
return self.set_attribute("kind_map", value)
|
"""
Class for the calculation of photon-ALPs conversion in galaxy clusters
History:
- 06/01/12: created
- 07/18/13: cleaned up
"""
__version__=0.02
__author__="<NAME> // <EMAIL>"
import numpy as np
from math import ceil
import eblstud.ebl.tau_from_model as Tau
from eblstud.misc.constants import *
import logging
import warnings
from numpy.random import rand, seed
from PhotALPsConv.Bturb import Bgaussian as Bgaus
# --- Conversion without absorption, designed to match values in Clusters -------------------------------------------#
from deltas import *
class PhotALPs_ICM(object):
"""
Class for photon ALP conversion in galaxy clusters and the intra cluster medium (ICM)
Attributes
----------
Lcoh: coherence length / domain size of turbulent B-field in the cluster in kpc
B: field strength of transverse component of the cluster B-field, in muG
r_abell: size of cluster filled with the constant B-field in kpc
g: Photon ALP coupling in 10^{-11} GeV^-1
m: ALP mass in neV
n: thermal electron density in the cluster, in 10^{-3} cm^-3
Nd: number of domains, Lcoh/r_abell
Psin: random angle in domain n between transverse B field and propagation direction
T1: Transfer matrix 1 (3x3xNd)-matrix
T2: Transfer matrix 2 (3x3xNd)-matrix
T3: Transfer matrix 3 (3x3xNd)-matrix
Un: Total transfer matrix in all domains (3x3xNd)-matrix
Dperp: Mixing matrix parameter Delta_perpedicular in n-th domain
Dpar: Mixing matrix parameter Delta_{||} in n-th domain
Dag: Mixing matrix parameter Delta_{a\gamma} in n-th domain
Da: Mixing matrix parameter Delta_{a} in n-th domain
alph: Mixing angle
Dosc: Oscillation Delta
EW1: Eigenvalue 1 of mixing matrix
EW2: Eigenvalue 2 of mixing matrix
EW3: Eigenvalue 3 of mixing matrix
Notes
-----
For Photon - ALP mixing theory see e.g. De Angelis et al. 2011 and also Horns et al. 2012
http://adsabs.harvard.edu/abs/2011PhRvD..84j5030D
http://adsabs.harvard.edu/abs/2012PhRvD..86g5024H
"""
def __init__(self, **kwargs):
"""
init photon axion conversion in intracluster medium
Parameters
----------
Lcoh: coherence length / domain size of turbulent B-field in the cluster in kpc, default: 10 kpc
B: field strength of transverse component of the cluster B-field, in muG, default: 1 muG
r_abell: size of cluster filled with the constant B-field in kpc. default: 1500 * h
g: Photon ALP coupling in 10^{-11} GeV^-1, default: 1.
m: ALP mass in neV, default: 1.
n: thermal electron density in the cluster, in 10^{-3} cm^-3, default: 1.
Bn_const: boolean, if True n and B are constant all over the cluster
if False than B and n are modeled, see notes
Bgauss: boolean, if True, B field calculated from gaussian turbulence spectrum,
if False then domain-like structure is assumed.
kH: float, upper wave number cutoff, should be at at least > 1. / osc. wavelength (default = 200 / (1 kpc))
kL: float, lower wave number cutoff, should be of same size as the system (default = 1 / (r_abell kpc))
q: float, power-law turbulence spectrum (default: q = 11/3 is Kolmogorov type spectrum)
dkType: string, either linear, log, or random. Determine the spacing of the dk intervals
dkSteps: int, number of dkSteps. For log spacing, number of steps per decade / number of decades ~ 10
should be chosen.
r_core: Core radius for n and B modeling in kpc, default: 200 kpc
beta: power of n dependence, default: 2/3
eta: power with what B follows n, see Notes. Typical values: 0.5 <= eta <= 1. default: 1.
Returns
-------
Nothing.
Notes
-----
If Bn_const = False then electron density is modeled according to Carilli & Taylor (2002) Eq. 2:
n_e(r) = n * (1 - (r/r_core)**2.)**(-3/2*beta)
with typical values of r_core = 200 kpc and beta = 2/3.
The magnetic field is supposed to follow n_e(r) with (Feretti et al. 2012, p. 41, section 7.1)
B(r) = B * (n_e(r)/n) ** eta
with typical values 1 muG <= B <= 15muG and 0.5 <= eta <= 1
"""
# --- Set the defaults
kwargs.setdefault('g',1.)
kwargs.setdefault('m',1.)
kwargs.setdefault('B',1.)
kwargs.setdefault('n',1.)
kwargs.setdefault('Lcoh',10.)
kwargs.setdefault('r_abell',100.)
kwargs.setdefault('r_core',200.)
kwargs.setdefault('E_GeV',1.)
kwargs.setdefault('B_gauss',False)
kwargs.setdefault('kL',0.)
kwargs.setdefault('kH',15.)
kwargs.setdefault('q',-11. / 3.)
kwargs.setdefault('dkType','log')
kwargs.setdefault('dkSteps',0)
kwargs.setdefault('Bn_const',True)
kwargs.setdefault('beta',2. / 3.)
kwargs.setdefault('eta',1.)
# --------------------
self.update_params(**kwargs)
super(PhotALPs_ICM,self).__init__()
return
def update_params(self, new_Bn = True, **kwargs):
"""Update all parameters with new values and initialize all matrices
kwargs
------
new_B_n: boolean, if True, recalculate B field and electron density
"""
self.__dict__.update(kwargs)
if self.B_gauss:
if not self.kL:
self.kL = 1. / self.r_abell
kwargs['kL'] = self.kL
self.Lcoh = 1. / self.kH
kwargs['Lcoh'] = self.Lcoh
self.bfield = Bgaus(**kwargs) # init gaussian turbulent field
self.Nd = int(self.r_abell / self.Lcoh) # number of domains, no expansion assumed
self.r = np.linspace(self.Lcoh, self.r_abell + self.Lcoh, int(self.Nd))
if new_Bn:
self.new_B_n()
self.T1 = np.zeros((3,3,self.Nd),np.complex) # Transfer matrices
self.T2 = np.zeros((3,3,self.Nd),np.complex)
self.T3 = np.zeros((3,3,self.Nd),np.complex)
self.Un = np.zeros((3,3,self.Nd),np.complex)
return
def new_B_n(self):
"""
Recalculate Bfield and density, if Kolmogorov turbulence is set to true, new random values for B and Psi are calculated.
"""
if self.B_gauss:
Bt = self.bfield.Bgaus(self.r) # calculate first transverse component
self.bfield.new_random_numbers() # new random numbers
Bu = self.bfield.Bgaus(self.r) # calculate second transverse component
self.B = np.sqrt(Bt ** 2. + Bu ** 2.) # calculate total transverse component
self.Psin = np.arctan2(Bt , Bu) # and angle to x2 (t) axis -- use atan2 to get the quadrants right
if self.Bn_const:
self.n = self.n * np.ones(int(self.Nd)) # assuming a constant electron density over all domains
if not self.B_gauss:
self.B = self.B * np.ones(int(self.Nd)) # assuming a constant B-field over all domains
else:
if np.isscalar(self.n):
n0 = self.n
else:
n0 = self.n[0]
# check for double beta profile
try:
if np.isscalar(self.n2):
n2 = self.n2
else:
n2 = self.n2[0]
try: # check for two different beta values
self.beta2
self.n = n0 * (np.ones(int(self.Nd)) + self.r**2./self.r_core**2.)**(-1.5 * self.beta) +\
n2 * (np.ones(int(self.Nd)) + self.r**2./self.r_core2**2.)**(-1.5 * self.beta2)
self.B = self.B * (self.n / (n0 + n2) )**self.eta
except NameError:
self.n = np.sqrt(n0**2. * (np.ones(int(self.Nd)) + self.r**2./self.r_core**2.)**(-3. * self.beta) +\
n2**2. * (np.ones(int(self.Nd)) + self.r**2./self.r_core2**2.)**(-3. * self.beta) )
self.B = self.B * (self.n / np.sqrt(n0**2. + n2**2.) )**self.eta
except AttributeError:
self.n = n0 * (np.ones(int(self.Nd)) + self.r**2./self.r_core**2.)**(-1.5 * self.beta)
self.B = self.B * (self.n / n0 )**self.eta
return
def new_random_psi(self):
"""
Calculate new random psi values
Parameters:
-----------
None
Returns:
--------
Nothing
"""
self.Psin = 2. * np.pi * rand(1,int(self.Nd))[0] # angle between photon propagation on B-field in i-th domain
return
def __setDeltas(self):
"""
Set Deltas of mixing matrix for each domain
Parameters
----------
None (self only)
Returns
-------
Nothing
"""
self.Dperp = Delta_pl_kpc(self.n,self.E) + 2.*Delta_QED_kpc(self.B,self.E) # np.arrays , self.Nd-dim
self.Dpar = Delta_pl_kpc(self.n,self.E) + 3.5*Delta_QED_kpc(self.B,self.E) # np.arrays , self.Nd-dim
self.Dag = Delta_ag_kpc(self.g,self.B) # np.array, self.Nd-dim
self.Da = Delta_a_kpc(self.m,self.E) * np.ones(int(self.Nd)) # np.ones, so that it is np.array, self.Nd-dim
self.alph = 0.5 * np.arctan2(2. * self.Dag , (self.Dpar - self.Da))
self.Dosc = np.sqrt((self.Dpar - self.Da)**2. + 4.*self.Dag**2.)
return
def __setEW(self):
"""
Set Eigenvalues
Parameters
----------
None (self only)
Returns
-------
Nothing
"""
# Eigen values are all self.Nd-dimensional
self.__setDeltas()
self.EW1 = self.Dperp
self.EW2 = 0.5 * (self.Dpar + self.Da - self.Dosc)
self.EW3 = 0.5 * (self.Dpar + self.Da + self.Dosc)
return
def __setT1n(self):
"""
Set T1 in all domains
Parameters
----------
None (self only)
Returns
-------
Nothing
"""
c = np.cos(self.Psin)
s = np.sin(self.Psin)
self.T1[0,0,:] = c*c
self.T1[0,1,:] = -1. * c*s
self.T1[1,0,:] = self.T1[0,1]
self.T1[1,1,:] = s*s
return
def __setT2n(self):
"""
Set T2 in all domains
Parameters
----------
None (self only)
Returns
-------
Nothing
"""
c = np.cos(self.Psin)
s = np.sin(self.Psin)
ca = np.cos(self.alph)
sa = np.sin(self.alph)
self.T2[0,0,:] = s*s*sa*sa
self.T2[0,1,:] = s*c*sa*sa
self.T2[0,2,:] = -1. * s * sa *ca
self.T2[1,0,:] = self.T2[0,1]
self.T2[1,1,:] = c*c*sa*sa
self.T2[1,2,:] = -1. * c *ca * sa
self.T2[2,0,:] = self.T2[0,2]
self.T2[2,1,:] = self.T2[1,2]
self.T2[2,2,:] = ca * ca
return
def __setT3n(self):
"""
Set T3 in all domains
Parameters
----------
None (self only)
Returns
-------
Nothing
"""
c = np.cos(self.Psin)
s = np.sin(self.Psin)
ca = np.cos(self.alph)
sa = np.sin(self.alph)
self.T3[0,0,:] = s*s*ca*ca
self.T3[0,1,:] = s*c*ca*ca
self.T3[0,2,:] = s*sa*ca
self.T3[1,0,:] = self.T3[0,1]
self.T3[1,1,:] = c*c*ca*ca
self.T3[1,2,:] = c * sa *ca
self.T3[2,0,:] = self.T3[0,2]
self.T3[2,1,:] = self.T3[1,2]
self.T3[2,2,:] = sa*sa
return
def __setUn(self):
"""
Set Transfer Matrix Un in n-th domain
Parameters
----------
None (self only)
Returns
-------
Nothing
"""
self.Un = np.exp(1.j * self.EW1 * self.Lcoh) * self.T1 + \
np.exp(1.j * self.EW2 * self.Lcoh) * self.T2 + \
np.exp(1.j * self.EW3 * self.Lcoh) * self.T3
return
def SetDomainN(self):
"""
Set Transfer matrix in all domains and multiply it
Parameters
----------
None (self only)
Returns
-------
Transfer matrix as 3x3 complex numpy array
"""
if not self.Nd == self.Psin.shape[0]:
raise TypeError("Number of domains (={0:n}) is not equal to number of angles (={1:n})!".format(self.Nd,self.Psin.shape[0]))
self.__setEW()
self.__setT1n()
self.__setT2n()
self.__setT3n()
self.__setUn() # self.Un contains now all 3x3 matrices in all self.Nd domains
# do the martix multiplication
for i in range(self.Un.shape[2]):
if not i:
U = self.Un[:,:,i]
else:
U = np.dot(U,self.Un[:,:,i]) # first matrix on the left
return U
|
<reponame>ak3ra/torchgeo<filename>tests/test_train.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import re
import subprocess
import sys
from pathlib import Path
import pytest
pytestmark = pytest.mark.slow
def test_required_args() -> None:
args = [sys.executable, "train.py"]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert ps.returncode != 0
assert b"MissingMandatoryValue" in ps.stderr
def test_output_file(tmp_path: Path) -> None:
output_file = tmp_path / "output"
output_file.touch()
args = [
sys.executable,
"train.py",
"experiment.name=test",
"program.output_dir=" + str(output_file),
"experiment.task=test",
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert ps.returncode != 0
assert b"NotADirectoryError" in ps.stderr
def test_experiment_dir_not_empty(tmp_path: Path) -> None:
output_dir = tmp_path / "output"
experiment_dir = output_dir / "test"
experiment_dir.mkdir(parents=True)
experiment_file = experiment_dir / "foo"
experiment_file.touch()
args = [
sys.executable,
"train.py",
"experiment.name=test",
"program.output_dir=" + str(output_dir),
"experiment.task=test",
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert ps.returncode != 0
assert b"FileExistsError" in ps.stderr
def test_overwrite_experiment_dir(tmp_path: Path) -> None:
experiment_name = "test"
output_dir = tmp_path / "output"
data_dir = os.path.join("tests", "data", "cyclone")
log_dir = tmp_path / "logs"
experiment_dir = output_dir / experiment_name
experiment_dir.mkdir(parents=True)
experiment_file = experiment_dir / "foo"
experiment_file.touch()
args = [
sys.executable,
"train.py",
"experiment.name=test",
"program.output_dir=" + str(output_dir),
"program.data_dir=" + data_dir,
"program.log_dir=" + str(log_dir),
"experiment.task=cyclone",
"program.overwrite=True",
"trainer.fast_dev_run=1",
]
ps = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
)
assert re.search(
b"The experiment directory, .*, already exists, we might overwrite data in it!",
ps.stdout,
)
@pytest.mark.parametrize("task", ["test", "foo"])
def test_invalid_task(task: str, tmp_path: Path) -> None:
output_dir = tmp_path / "output"
args = [
sys.executable,
"train.py",
"experiment.name=test",
"program.output_dir=" + str(output_dir),
"experiment.task=" + task,
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert ps.returncode != 0
assert b"ValueError" in ps.stderr
def test_missing_config_file(tmp_path: Path) -> None:
output_dir = tmp_path / "output"
config_file = tmp_path / "config.yaml"
args = [
sys.executable,
"train.py",
"experiment.name=test",
"program.output_dir=" + str(output_dir),
"experiment.task=test",
"config_file=" + str(config_file),
]
ps = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert ps.returncode != 0
assert b"FileNotFoundError" in ps.stderr
def test_config_file(tmp_path: Path) -> None:
output_dir = tmp_path / "output"
data_dir = os.path.join("tests", "data", "cyclone")
log_dir = tmp_path / "logs"
config_file = tmp_path / "config.yaml"
config_file.write_text(
f"""
program:
output_dir: {output_dir}
data_dir: {data_dir}
log_dir: {log_dir}
experiment:
name: test
task: cyclone
trainer:
fast_dev_run: true
"""
)
args = [
sys.executable,
"train.py",
"config_file=" + str(config_file),
]
subprocess.run(args, check=True)
@pytest.mark.parametrize("task", ["cyclone", "sen12ms", "landcoverai"])
def test_tasks(task: str, tmp_path: Path) -> None:
output_dir = tmp_path / "output"
data_dir = os.path.join("tests", "data", task)
log_dir = tmp_path / "logs"
args = [
sys.executable,
"train.py",
"experiment.name=test",
"program.output_dir=" + str(output_dir),
"program.data_dir=" + data_dir,
"program.log_dir=" + str(log_dir),
"trainer.fast_dev_run=1",
"experiment.task=" + task,
"program.overwrite=True",
]
subprocess.run(args, check=True)
|
<gh_stars>0
#!/usr/bin/env python3
import json
import os, os.path
import subprocess
import tempfile
from FFprobe_output import *
def blackdetect(src, vfilter='blackdetect=d=1/15:picture_black_ratio_th=0.85:pixel_black_th=0.1', encoding='UTF-8'):
command = ['ffprobe', '-v', 'error', '-of', 'flat', '-show_entries', \
'tags=lavfi.black_start,lavfi.black_end,lavfi.black_duration', \
'-f', 'lavfi', 'movie={src},{vfilter}[out0]'.format(**locals()) ]
print(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
out, _ = proc.communicate()
if out:
content = parse_flat(out.decode(encoding).splitlines())
return content
class FFprobe_query:
def __init__(self, src, encoding='UTF-8'):
self.src = src
proc = subprocess.Popen(['ffprobe', '-v', 'error', src, '-of', 'flat', '-show_streams', '-show_format'], stdout=subprocess.PIPE)
out, _ = proc.communicate()
self.flat = parse_flat(out.decode(encoding).splitlines())
def save(self, filename):
with open(filename, 'w') as fo:
json.dump(self.flat, fo)
def get_bit_rates(self, **kwargs):
for i, s in self.flat['streams']['stream'].items():
if 'bit_rate' in s:
yield i, s['bit_rate']
@property
def bit_rate(self):
return sum(b for i, b in self.get_bit_rates())
@property
def video(self):
return self.get_stream('video')
@property
def audio(self):
return self.get_stream('audio')
def get_stream(self, codec_type=[]):
if isinstance(codec_type, str):
codec_type = [codec_type]
for k, d in self.flat['streams']['stream'].items():
if d['codec_type'] in codec_type:
yield k, d
@property
def dimensions(self):
try:
[(vtn, vt)] = self.video # one and only one video track
return vt['width'], vt['height']
except:
pass
def blackdetect(self, **kwargs):
try:
return self._blackdetect
except:
pass
assert os.path.exists(self.src)
try:
blackdetect_file = self.blackdetect_file
except:
self.blackdetect_file = blackdetect_file = self.src+'.blackdetect'
if os.path.exists(blackdetect_file):
with open(blackdetect_file) as fi:
content = json.load(fi)
else:
content = blackdetect(self.src, **kwargs)
with open(blackdetect_file, 'w') as fo:
if content:
json.dump(content, fo)
self._blackdetect = content
return content
#
def parse_blackdetect(blackdetect_iterable, mode='times'):
frame_tags = sorted((int(k), v['tags']) for k, v in blackdetect_iterable.items())
if 'lavfi_black_end' in frame_tags[0][1]:
ends = frame_tags[0::2]
starts = frame_tags[1::2]
elif 'lavfi_black_start' in frame_tags[0][1]:
starts = frame_tags[0::2]
ends = [ (0, {'lavfi_black_end': 0}) ]+frame_tags[1::2]
else:
raise ValueError(frame_tags[0])
if 'lavfi_black_start' in frame_tags[-1][1]:
# normal
pass
elif 'lavfi_black_end' in frame_tags[-1][1]:
starts.append( ( 1E6, {'lavfi_black_start': 1E6} ) ) # TODO
else:
raise ValueError(frame_tags[0])
if len(ends) != len(starts):
print("Different sizes:", ends, starts)
pairs = zip(ends, starts)
if 'times' == mode:
for endp, startp in pairs:
end, start = Decimal(endp[1]['lavfi_black_end']), Decimal(startp[1]['lavfi_black_start'])
if 0 <= end and 0 <= start:
yield end, start
else:
print(endp, startp, "ignored")
#return [ (endp[1]['lavfi_black_end'], startp[1]['lavfi_black_start']) for endp, startp in pairs ]
elif 'frames' == mode:
for endp, startp in pairs:
end, start = int(endp[0]), int(startp[0])
yield end, start
#return [ (endp[0], startp[0]) for endp, startp in pairs ]
else:
raise ValueError(mode)
def make_scenes(filename):
props = FFprobe_query(filename)
black_frames = props.blackdetect()['frames']['frame']
pairs = parse_blackdetect(black_frames)
return pairs
def get_thumbs(filename, from_t=None, to_t=None, output='{filepart}-%08d.PNG'):
dirname, basename = os.path.split(filename)
filepart, ext = os.path.splitext(basename)
try:
output = output.format(**locals())
except:
print("{output} is likely not what you want".format(**locals()) )
if os.path.sep not in output:
output = os.path.join(tempfile.mkdtemp(), output)
if from_t and to_t:
command = [ 'ffmpeg', '-skip_frame', 'nokey', '-ss', str(from_t), '-i', filename, '-to', str(to_t), '-f', 'image2', output ]
else:
command = [ 'ffmpeg', '-skip_frame', 'nokey', '-i', filename, '-f', 'image2', output ]
print(command)
if subprocess.check_call(command):
print('succeeded')
else:
print('failed')
return output
#
if __name__ == '__main__':
import pprint
import sys
args = sys.argv[1:]
filename = args[0]
ffp=FFprobe_query(filename)
print("Blackdetect:")
pprint.pprint(ffp.blackdetect())
pairs = make_scenes(filename)
for from_t, to_t in pairs:
get_thumbs(filename, from_t=from_t, to_t=to_t)
|
<filename>galaxy2galaxy/layers/flows.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_hub as hub
import numpy as np
import collections
import functools
from tensorflow_probability.python.bijectors import affine_scalar
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import tensorshape_util
from galaxy2galaxy.layers.tfp_utils import RationalQuadraticSpline
tfd = tfp.distributions
tfb = tfp.bijectors
__all__ = ['masked_autoregressive_conditional_template',
'ConditionalNeuralSpline',
'conditional_neural_spline_template',
'autoregressive_conditional_neural_spline_template',
'_clip_by_value_preserve_grad']
def masked_autoregressive_conditional_template(hidden_layers,
conditional_tensor,
shift_only=False,
activation=tf.nn.relu,
log_scale_min_clip=-5.,
log_scale_max_clip=3.,
log_scale_clip_gradient=False,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
name = name or "masked_autoregressive_default_template"
with tf.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]):
def _fn(x):
"""MADE parameterized via `masked_autoregressive_default_template`."""
# TODO(b/67594795): Better support of dynamic shape.
input_shape = (
np.int32(x.shape.as_list())
if x.shape.is_fully_defined() else tf.shape(x))
if len(x.shape) == 1:
x = x[tf.newaxis, ...]
x = tf.concat([conditional_tensor, x], axis=1)
cond_depth = conditional_tensor.shape.with_rank_at_least(1)[-1].value
input_depth = x.shape.with_rank_at_least(1)[-1].value
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
for i, units in enumerate(hidden_layers):
x = tfb.masked_dense(
inputs=x,
units=units,
num_blocks=input_depth,
exclusive=True if i == 0 else False,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tfb.masked_dense(
inputs=x,
units=(1 if shift_only else 2) * input_depth,
num_blocks=input_depth,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
x = x[:, cond_depth:]
x = tf.reshape(x, shape=input_shape)
return x, None
else:
x = x[:, 2*cond_depth:]
x = tf.reshape(x, shape=tf.concat([input_shape, [2]], axis=0))
shift, log_scale = tf.unstack(x, num=2, axis=-1)
which_clip = (
tf.clip_by_value
if log_scale_clip_gradient else _clip_by_value_preserve_grad)
log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
return shift, log_scale
return tf.make_template(name, _fn)
def _clip_by_value_preserve_grad(x, clip_value_min, clip_value_max, name=None):
"""Clips input while leaving gradient unaltered."""
with tf.name_scope(name, "clip_by_value_preserve_grad",
[x, clip_value_min, clip_value_max]):
clip_x = tf.clip_by_value(x, clip_value_min, clip_value_max)
return x + tf.stop_gradient(clip_x - x)
class ConditionalNeuralSpline(tf.Module):
def __init__(self, conditional_tensor=None, nbins=32, hidden_layers=[256],
activation='relu',name=None):
self._nbins = nbins
self._built = False
self._bin_widths = None
self._bin_heights = None
self._knot_slopes = None
self._layers= []
self._activation = activation
self._hidden_layers = hidden_layers
self._conditional_tensor = conditional_tensor
super(ConditionalNeuralSpline, self).__init__(name)
def __call__(self, x, nunits):
if not self._built:
def _bin_positions(x):
x = tf.reshape(x, [-1, nunits, self._nbins])
return tf.math.softmax(x, axis=-1) * (2 - self._nbins * 1e-2) + 1e-2
def _slopes(x):
x = tf.reshape(x, [-1, nunits, self._nbins - 1])
return tf.math.softplus(x) + 1e-2
for i, units in enumerate(self._hidden_layers):
self._layers.append(tf.keras.layers.Dense(units, activation=self._activation,
name='layer_%d'%i))
self._bin_widths = tf.keras.layers.Dense(
nunits * self._nbins, activation=_bin_positions, name='w')
self._bin_heights = tf.keras.layers.Dense(
nunits * self._nbins, activation=_bin_positions, name='h')
self._knot_slopes = tf.keras.layers.Dense(
nunits * (self._nbins - 1), activation=_slopes, name='s')
self._built = True
# If provided, we append the condition as an input to the network
if self._conditional_tensor is not None:
net = tf.concat([x, self._conditional_tensor], axis=-1)
else:
net = x
# Apply hidden layers
for layer in self._layers:
net = layer(net)
return RationalQuadraticSpline(
bin_widths=self._bin_widths(net),
bin_heights=self._bin_heights(net),
knot_slopes=self._knot_slopes(net))
def conditional_neural_spline_template(conditional_tensor=None,
nbins=32,
hidden_layers=[256],
activation=tf.nn.relu,
name=None):
with tf.name_scope(name):
def _fn(x, nunits):
# If provided, we append the condition as an input to the network
if conditional_tensor is not None:
net = tf.concat([x, conditional_tensor], axis=-1)
else:
net = x
for i, units in enumerate(hidden_layers):
net = tf.layers.dense(net, units, activation=activation, name='layer_%d'%i)
def _bin_positions(x):
x = tf.reshape(x, [-1, nunits, nbins])
return tf.math.softmax(x, axis=-1) * (2 - nbins * 1e-2) + 1e-2
def _slopes(x):
x = tf.reshape(x, [-1, nunits, nbins - 1])
return tf.math.softplus(x) + 1e-2
bin_widths = tf.layers.dense(net, nunits * nbins, activation=_bin_positions, name='w')
bin_heights = tf.layers.dense(net, nunits * nbins, activation=_bin_positions, name='h')
knot_slopes = tf.layers.dense(net, nunits * (nbins - 1), activation=_slopes, name='s')
return RationalQuadraticSpline(
bin_widths=bin_widths,
bin_heights=bin_heights,
knot_slopes=knot_slopes)
return tf.make_template(name, _fn)
def autoregressive_conditional_neural_spline_template(conditional_tensor,
hidden_layers=[256],
nbins=32,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
name = name or "autoregressive_conditional_neural_spline_template"
with tf.name_scope(name):
def _fn(x):
"""MADE parameterized via `masked_autoregressive_default_template`."""
# TODO(b/67594795): Better support of dynamic shape.
input_shape = (
np.int32(x.shape.as_list())
if x.shape.is_fully_defined() else tf.shape(x))
if len(x.shape) == 1:
x = x[tf.newaxis, ...]
x = tf.concat([conditional_tensor, x], axis=1)
cond_depth = conditional_tensor.shape.with_rank_at_least(1)[-1].value
input_depth = x.shape.with_rank_at_least(1)[-1].value
if input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
def _bin_positions(x):
x = tf.reshape(x, [-1, input_depth , nbins])
return tf.math.softmax(x, axis=-1) * (2 - nbins * 1e-2) + 1e-2
def _slopes(x):
x = tf.reshape(x, [-1, input_depth, nbins - 1])
return tf.math.softplus(x) + 1e-2
for i, units in enumerate(hidden_layers):
x = tfb.masked_dense(
inputs=x,
units=units,
num_blocks=input_depth,
exclusive=True if i == 0 else False,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
bin_widths = tfb.masked_dense(
inputs=x,
units=input_depth*nbins,
num_blocks=input_depth,
activation=_bin_positions,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
bin_heights = tfb.masked_dense(
inputs=x,
units=input_depth*nbins,
num_blocks=input_depth,
activation=_bin_positions,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
knot_slopes = tfb.masked_dense(
inputs=x,
units=input_depth*(nbins -1),
num_blocks=input_depth,
activation=_slopes,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
return RationalQuadraticSpline(
bin_widths=bin_widths[:, cond_depth:],
bin_heights=bin_heights[:, cond_depth:],
knot_slopes=knot_slopes[:, cond_depth:])
return tf.make_template(name, _fn)
|
<filename>demo_gl.py
import caffe
import argparse
import os
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='gpu')
parser.add_argument('--model_dir', default='/media/tim_ho/HDD1/Projects/VNect-tensorflow/models')
parser.add_argument('--input_size', default=368)
parser.add_argument('--num_of_joints', default=21)
parser.add_argument('--pool_scale', default=8)
parser.add_argument('--plot_2d', default=False)
parser.add_argument('--plot_3d', default=False)
args = parser.parse_args()
joint_color_code = [[139, 53, 255],
[0, 56, 255],
[43, 140, 237],
[37, 168, 36],
[147, 147, 0],
[70, 17, 145]]
# Limb parents of each joint
limb_parents = [1, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13]
# input scales
scales = [1.0, 0.7]
def demo():
joints_2d = np.zeros(shape=(args.num_of_joints, 2), dtype=np.int32)
joints_3d = np.zeros(shape=(args.num_of_joints, 3), dtype=np.float32)
if args.plot_3d:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122)
plt.show()
if args.device == 'cpu':
caffe.set_mode_cpu()
elif args.device == 'gpu':
caffe.set_mode_gpu()
caffe.set_device(0)
else:
raise ValueError('No such device')
model_prototxt_path = os.path.join(args.model_dir, 'vnect_net.prototxt')
model_weight_path = os.path.join(args.model_dir, 'vnect_model.caffemodel')
# Load model
model = caffe.Net(model_prototxt_path,
model_weight_path,
caffe.TEST)
# Show network structure and shape
for layer_name in model.params.keys():
print(layer_name, model.params[layer_name][0].data.shape)
print('')
for i in model.blobs.keys():
print(i, model.blobs[i].data.shape)
cam = cv2.VideoCapture(0)
is_tracking = False
# for img_name in os.listdir('test_imgs'):
while True:
# if not is_tracking:
img_path = 'test_imgs/{}'.format('dance.jpg')
t1 = time.time()
input_batch = []
cam_img = utils.read_square_image('', cam, args.input_size, 'WEBCAM')
# cam_img = utils.read_square_image(img_path, '', args.input_size, 'IMAGE')
# cv2.imshow('', cam_img)
# cv2.waitKey(0)
orig_size_input = cam_img.astype(np.float32)
for scale in scales:
resized_img = utils.resize_pad_img(orig_size_input, scale, args.input_size)
input_batch.append(resized_img)
input_batch = np.asarray(input_batch, dtype=np.float32)
input_batch = np.transpose(input_batch, (0, 3, 1, 2))
input_batch /= 255.0
input_batch -= 0.4
model.blobs['data'].data[...] = input_batch
# Forward
model.forward()
# Get output data
x_hm = model.blobs['x_heatmap'].data
y_hm = model.blobs['y_heatmap'].data
z_hm = model.blobs['z_heatmap'].data
hm = model.blobs['heatmap'].data
# Trans coordinates
x_hm = x_hm.transpose([0, 2, 3, 1])
y_hm = y_hm.transpose([0, 2, 3, 1])
z_hm = z_hm.transpose([0, 2, 3, 1])
hm = hm.transpose([0, 2, 3, 1])
# Average scale outputs
hm_size = args.input_size // args.pool_scale
hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
x_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
y_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
z_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
for i in range(len(scales)):
rescale = 1.0 / scales[i]
scaled_hm = cv2.resize(hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_x_hm = cv2.resize(x_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_y_hm = cv2.resize(y_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_z_hm = cv2.resize(z_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
mid = [scaled_hm.shape[0] // 2, scaled_hm.shape[1] // 2]
hm_avg += scaled_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
x_hm_avg += scaled_x_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
y_hm_avg += scaled_y_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
z_hm_avg += scaled_z_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
hm_avg /= len(scales)
x_hm_avg /= len(scales)
y_hm_avg /= len(scales)
z_hm_avg /= len(scales)
t2 = time.time()
# Get 2d joints
joints_2d = utils.extract_2d_joint_from_heatmap(hm_avg, args.input_size, joints_2d)
# Get 3d joints
joints_3d = utils.extract_3d_joints_from_heatmap(joints_2d, x_hm_avg, y_hm_avg, z_hm_avg, args.input_size,
joints_3d)
print('Post FPS', 1/(time.time()-t2))
# Plot 2d location heatmap
joint_map = np.zeros(shape=(args.input_size, args.input_size, 3))
for joint_num in range(joints_2d.shape[0]):
cv2.circle(joint_map, center=(joints_2d[joint_num][1], joints_2d[joint_num][0]), radius=3,
color=(255, 0, 0), thickness=-1)
# Plot 2d limbs
limb_img = utils.draw_limbs_2d(cam_img, joints_2d, limb_parents)
# Plot 3d limbs
if args.plot_3d:
ax.clear()
ax.view_init(azim=0, elev=90)
ax.set_xlim(-700, 700)
ax.set_ylim(-800, 800)
ax.set_zlim(-700, 700)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
utils.draw_limbs_3d(joints_3d, limb_parents, ax)
# draw heatmap
# hm_img = utils.draw_predicted_heatmap(hm_avg*200, args.input_size)
# cv2.imshow('hm', hm_img.astype(np.uint8))
# cv2.waitKey(0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
utils.draw_limb_3d_gl(joints_3d, limb_parents)
pygame.display.flip()
pygame.time.wait(1)
concat_img = np.concatenate((limb_img, joint_map), axis=1)
# ax2.imshow(concat_img[..., ::-1].astype(np.uint8))
cv2.imshow('2d', concat_img.astype(np.uint8))
cv2.waitKey(1)
# ax2.imshow(concat_img.astype(np.uint8))
# plt.pause(0.0001)
# plt.show(block=False)
print('Forward FPS', 1 / (time.time() - t1))
if __name__ == '__main__':
pygame.init()
display = (800, 600)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
gluPerspective(70, (display[0] / display[1]), 0.1, 200.0)
view_range = 800
# glOrtho(-view_range, view_range,
# -view_range, view_range,
# -view_range, view_range)
glTranslatef(0.0, 0.0, 100)
demo()
|
<reponame>WshgL/pyuavcan
# Copyright (c) 2021 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
from __future__ import annotations
import sys
import random
from typing import Optional, Union
from pathlib import Path
import logging
import pyuavcan
from ._node import Node, NodeInfo
from . import register
from ._transport_factory import make_transport
from ._registry_factory import make_registry
class MissingTransportConfigurationError(register.MissingRegisterError):
pass
class SimpleNode(Node):
def __init__(
self,
presentation: pyuavcan.presentation.Presentation,
info: NodeInfo,
registry: register.Registry,
) -> None:
self._presentation = presentation
self._info = info
self._registry = registry
super().__init__()
@property
def presentation(self) -> pyuavcan.presentation.Presentation:
return self._presentation
@property
def info(self) -> NodeInfo:
return self._info
@property
def registry(self) -> register.Registry:
return self._registry
def make_node(
info: NodeInfo,
registry: Union[None, register.Registry, str, Path] = None,
*,
transport: Optional[pyuavcan.transport.Transport] = None,
reconfigurable_transport: bool = False,
) -> Node:
"""
Initialize a new node by parsing the configuration encoded in the UAVCAN registers.
Aside from the registers that encode the transport configuration (which are documented in :func:`make_transport`),
the following registers are considered (if they don't exist, they are automatically created).
They are split into groups by application-layer function they configure.
.. list-table:: General
:widths: 1 1 9
:header-rows: 1
* - Register name
- Register type
- Register semantics
* - ``uavcan.node.unique_id``
- ``unstructured``
- The unique-ID of the local node.
This register is only used if the caller did not set ``unique_id`` in ``info``.
If not defined, a new random value is generated and stored as immutable
(therefore, if no persistent register file is used, a new unique-ID is generated at every launch, which
may be undesirable in some applications, particularly those that require PnP node-ID allocation).
* - ``uavcan.node.description``
- ``string``
- As defined by the UAVCAN Specification, this standard register is intended to store a human-friendly
description of the node.
Empty by default and never accessed by the library, since it is intended mostly for remote use.
.. list-table:: :mod:`pyuavcan.application.diagnostic`
:widths: 1 1 9
:header-rows: 1
* - Register name
- Register type
- Register semantics
* - ``uavcan.diagnostic.severity``
- ``natural8[1]``
- If the value is a valid severity level as defined in ``uavcan.diagnostic.Severity``,
the node will publish its application log records of matching severity level to the standard subject
``uavcan.diagnostic.Record`` using :class:`pyuavcan.application.diagnostic.DiagnosticPublisher`.
This is done by installing a root handler in :mod:`logging`.
Disabled by default.
* - ``uavcan.diagnostic.timestamp``
- ``bit[1]``
- If true, the published log messages will initialize the synchronized ``timestamp`` field
from the log record timestamp provided by the :mod:`logging` library.
This is only safe if the UAVCAN network is known to be synchronized on the same time system as the
wall clock of the local computer.
Otherwise, the timestamp is left at zero (which means "unknown" per Specification).
Disabled by default.
Additional application-layer functions and their respective registers may be added later.
:param info:
Response object to ``uavcan.node.GetInfo``. The following fields will be populated automatically:
- ``protocol_version`` from :data:`pyuavcan.UAVCAN_SPECIFICATION_VERSION`.
- If not set by the caller: ``unique_id`` is read from register as specified above.
- If not set by the caller: ``name`` is constructed from hex-encoded unique-ID like:
``anonymous.b0228a49c25ff23a3c39915f81294622``.
:param registry:
If this is an instance of :class:`pyuavcan.application.register.Registry`, it is used as-is
(ownership is taken).
Otherwise, this is a register file path (or None) that is passed over to
:func:`pyuavcan.application.make_registry`
to construct the registry instance for this node.
This instance will be available under :class:`pyuavcan.application.Node.registry`.
:param transport:
If not provided (default), a new transport instance will be initialized based on the available registers using
:func:`make_transport`.
If provided, the node will be constructed with this transport instance and take its ownership.
In the latter case, existence of transport-related registers will NOT be ensured.
:param reconfigurable_transport:
If True, the node will be constructed with :mod:`pyuavcan.transport.redundant`,
which permits runtime reconfiguration.
If the transport argument is given and it is not a redundant transport, it will be wrapped into one.
Also see :func:`make_transport`.
:raises:
- :class:`pyuavcan.application.register.MissingRegisterError` if a register is expected but cannot be found,
or if no transport is configured.
- :class:`pyuavcan.application.register.ValueConversionError` if a register is found but its value
cannot be converted to the correct type, or if the value of an environment variable for a register
is invalid or incompatible with the register's type
(e.g., an environment variable set to ``Hello world`` cannot initialize a register of type ``real64[3]``).
- Also see :func:`make_transport`.
.. note::
Consider extending this factory with a capability to automatically run the node-ID allocation client
:class:`pyuavcan.application.plug_and_play.Allocatee` if ``uavcan.node.id`` is not set.
Until this is implemented, to run the allocator one needs to construct the transport manually using
:func:`make_transport` and :func:`make_registry`,
then run the allocation client, then invoke this factory again with the above-obtained Registry instance,
having done ``registry["uavcan.node.id"] = allocated_node_id`` beforehand.
While tedious, this is not that much of a problem because the PnP protocol is mostly intended for
hardware nodes rather than software ones.
A typical software node would normally receive its node-ID at startup (see also Yakut Orchestrator).
"""
from pyuavcan.transport.redundant import RedundantTransport
if not isinstance(registry, register.Registry):
registry = make_registry(registry)
assert isinstance(registry, register.Registry)
def init_transport() -> pyuavcan.transport.Transport:
assert isinstance(registry, register.Registry)
if transport is None:
out = make_transport(registry, reconfigurable=reconfigurable_transport)
if out is not None:
return out
raise MissingTransportConfigurationError(
"Available registers do not encode a valid transport configuration"
)
if not isinstance(transport, RedundantTransport) and reconfigurable_transport:
out = RedundantTransport()
out.attach_inferior(transport)
return out
return transport
# Populate certain fields of the node info structure automatically and create standard registers.
info.protocol_version.major, info.protocol_version.minor = pyuavcan.UAVCAN_SPECIFICATION_VERSION
if info.unique_id.sum() == 0:
info.unique_id = bytes( # type: ignore
registry.setdefault(
"uavcan.node.unique_id",
register.Value(unstructured=register.Unstructured(random.getrandbits(128).to_bytes(16, sys.byteorder))),
)
)
registry.setdefault("uavcan.node.description", register.Value(string=register.String()))
if len(info.name) == 0:
info.name = "anonymous." + info.unique_id.tobytes().hex() # type: ignore
# Construct the node and its application-layer functions.
node = SimpleNode(pyuavcan.presentation.Presentation(init_transport()), info, registry)
_make_diagnostic_publisher(node)
return node
def _make_diagnostic_publisher(node: Node) -> None:
from .diagnostic import DiagnosticSubscriber, DiagnosticPublisher
uavcan_severity = int(
node.registry.setdefault("uavcan.diagnostic.severity", register.Value(natural8=register.Natural8([0xFF])))
)
timestamping_enabled = bool(
node.registry.setdefault("uavcan.diagnostic.timestamp", register.Value(bit=register.Bit([False])))
)
try:
level = DiagnosticSubscriber.SEVERITY_UAVCAN_TO_PYTHON[uavcan_severity]
except LookupError:
return
diag_publisher = DiagnosticPublisher(node, level=level)
diag_publisher.timestamping_enabled = timestamping_enabled
logging.root.addHandler(diag_publisher)
node.add_lifetime_hooks(None, lambda: logging.root.removeHandler(diag_publisher))
_logger = logging.getLogger(__name__)
|
<reponame>crisbodnar/cwn
import torch
import pytest
import itertools
from data.dummy_complexes import (get_house_complex, get_square_complex, get_pyramid_complex,
get_square_dot_complex, get_kite_complex)
from data.complex import ComplexBatch
from data.dummy_complexes import get_testing_complex_list
from data.data_loading import DataLoader, load_dataset
def validate_double_house(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4, 5, 6, 5, 8, 6, 7, 7, 8, 7, 9, 8, 9],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3, 6, 5, 8, 5, 7, 6, 8, 7, 9, 7, 9, 8]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4, 6, 6, 9, 9, 7, 7, 8, 8, 11, 11, 10, 10], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5, 8, 10, 8, 11, 10, 11],
[4, 2, 5, 2, 5, 4, 10, 8, 11, 8, 11, 10]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_lower = torch.tensor([[0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5, 6, 7, 6, 9, 7, 8, 7, 11, 8, 9, 8, 10, 8, 11, 9, 10, 10, 11],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4, 7, 6, 9, 6, 8, 7, 11, 7, 9, 8, 10, 8, 11, 8, 10, 9, 11, 10]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4, 6, 6, 5, 5, 7, 7, 7, 7, 8, 8, 8, 8, 7, 7, 8, 8, 9, 9],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1], [1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2, 2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0, 1], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_square_dot_and_square(batch):
expected_node_upper = torch.tensor([ [0, 1, 0, 3, 1, 2, 2, 3, 5, 6, 5, 8, 6, 7, 7, 8],
[1, 0, 3, 0, 2, 1, 3, 2, 6, 5, 8, 5, 7, 6, 8, 7]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 4, 4, 7, 7, 5, 5, 6, 6], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
expected_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 2, 2, 3, 4, 5, 4, 7, 5, 6, 6, 7],
[1, 0, 3, 0, 2, 1, 3, 2, 5, 4, 7, 4, 6, 5, 7, 6]], dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 3, 3, 6, 6, 5, 5, 7, 7, 8, 8],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [1], [2], [3], [4]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1,], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert batch.edges.upper_index is None
assert batch.edges.shared_coboundaries is None
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
def validate_kite_and_house(batch):
kite_node_upper = torch.tensor([[0, 1, 0, 2, 1, 2, 1, 3, 2, 3, 3, 4],
[1, 0, 2, 0, 2, 1, 3, 1, 3, 2, 4, 3]], dtype=torch.long)
shifted_house_node_upper = 5 + torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
expected_node_upper = torch.cat([kite_node_upper, shifted_house_node_upper], 1)
kite_node_shared_coboundaries = torch.tensor([0, 0, 2, 2, 1, 1, 3, 3, 4, 4, 5, 5], dtype=torch.long)
shifted_house_node_shared_coboundaries = 6 + torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4], dtype=torch.long)
expected_node_shared_coboundaries = torch.cat([kite_node_shared_coboundaries, shifted_house_node_shared_coboundaries], 0)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=torch.long)
kite_edge_upper = torch.tensor([[0, 1, 0, 2, 1, 2, 1, 3, 1, 4, 3, 4],
[1, 0, 2, 0, 2, 1, 3, 1, 4, 1, 4, 3]], dtype=torch.long)
shifted_house_edge_upper = 6 + torch.tensor([[2, 4, 2, 5, 4, 5],
[4, 2, 5, 2, 5, 4]], dtype=torch.long)
expected_edge_upper = torch.cat([kite_edge_upper, shifted_house_edge_upper], 1)
kite_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
shifted_house_edge_shared_coboundaries = 2 + torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_shared_coboundaries = torch.cat([kite_edge_shared_coboundaries, shifted_house_edge_shared_coboundaries], 0)
kite_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 3, 0, 2, 1, 2, 2, 4, 1, 4, 3, 4, 3, 5, 4, 5],
[1, 0, 3, 0, 3, 1, 2, 0, 2, 1, 4, 2, 4, 1, 4, 3, 5, 3, 5, 4]], dtype=torch.long)
shifted_house_lower = 6 + torch.tensor([[0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4]], dtype=torch.long)
expected_edge_lower = torch.cat([kite_edge_lower, shifted_house_lower], 1)
kite_edge_shared_boundaries = torch.tensor([1, 1, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3], dtype=torch.long)
shifted_house_edge_shared_boundaries = 5 + torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4], dtype=torch.long)
expected_edge_shared_boundaries = torch.cat([kite_edge_shared_boundaries, shifted_house_edge_shared_boundaries], 0)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_two_cell_lower = torch.tensor([[0, 1],
[1, 0]], dtype=torch.long)
expected_two_cell_shared_boundaries = torch.tensor([1, 1], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1], [2], [1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2, 2, 2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0, 0, 1], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.shared_coboundaries is None
assert torch.equal(expected_two_cell_lower, batch.two_cells.lower_index)
assert torch.equal(expected_two_cell_shared_boundaries, batch.two_cells.shared_boundaries)
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_house_and_square(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4, 5, 6, 5, 8, 6, 7, 7, 8],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3, 6, 5, 8, 5, 7, 6, 8, 7]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4, 6, 6, 9, 9, 7, 7, 8, 8], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5],
[4, 2, 5, 2, 5, 4]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5, 6, 7, 6, 9, 7, 8, 8, 9],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4, 7, 6, 9, 6, 8, 7, 9, 8]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4, 6, 6, 5, 5, 7, 7, 8, 8],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_house_square_house(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4, 5, 6, 5, 8, 6, 7, 7, 8, 9, 10, 9, 12, 10, 11, 11, 12, 11, 13, 12, 13],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3, 6, 5, 8, 5, 7, 6, 8, 7, 10, 9, 12, 9, 11, 10, 12, 11, 13, 11, 13, 12]],
dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4, 6, 6, 9, 9, 7, 7, 8, 8, 10, 10, 13, 13, 11, 11, 12, 12, 15, 15, 14, 14],
dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5], [1], [2], [3], [4], [1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5, 12, 14, 12, 15, 14, 15],
[4, 2, 5, 2, 5, 4, 14, 12, 15, 12, 15, 14]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_lower = torch.tensor([ [0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5, 6, 7, 6, 9, 7, 8, 8, 9, 10, 11, 10, 13, 11, 12, 11, 15, 12, 13, 12, 14, 12, 15, 13, 14, 14, 15],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4, 7, 6, 9, 6, 8, 7, 9, 8, 11, 10, 13, 10, 12, 11, 15, 11, 13, 12, 14, 12, 15, 12, 14, 13, 15, 14]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4, 6, 6, 5, 5, 7, 7, 8, 8, 10, 10, 9, 9, 11, 11, 11, 11, 12, 12, 12, 12, 11, 11, 12, 12, 13, 13],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6], [1], [2], [3], [4], [1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1], [1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2, 2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0, 2], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def validate_house_no_batching(batch):
expected_node_upper = torch.tensor([[0, 1, 0, 3, 1, 2, 2, 3, 2, 4, 3, 4],
[1, 0, 3, 0, 2, 1, 3, 2, 4, 2, 4, 3]], dtype=torch.long)
expected_node_shared_coboundaries = torch.tensor([0, 0, 3, 3, 1, 1, 2, 2, 5, 5, 4, 4], dtype=torch.long)
expected_node_x = torch.tensor([[1], [2], [3], [4], [5]], dtype=torch.float)
expected_node_y = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
expected_node_batch = torch.tensor([0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_upper = torch.tensor([[2, 4, 2, 5, 4, 5],
[4, 2, 5, 2, 5, 4]], dtype=torch.long)
expected_edge_shared_coboundaries = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_edge_lower = torch.tensor([[0, 1, 0, 3, 1, 2, 1, 5, 2, 3, 2, 4, 2, 5, 3, 4, 4, 5],
[1, 0, 3, 0, 2, 1, 5, 1, 3, 2, 4, 2, 5, 2, 4, 3, 5, 4]],
dtype=torch.long)
expected_edge_shared_boundaries = torch.tensor([1, 1, 0, 0, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3, 4, 4],
dtype=torch.long)
expected_edge_x = torch.tensor([[1], [2], [3], [4], [5], [6]], dtype=torch.float)
expected_edge_y = torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.long)
expected_edge_batch = torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.long)
expected_two_cell_x = torch.tensor([[1]], dtype=torch.float)
expected_two_cell_y = torch.tensor([2], dtype=torch.long)
expected_two_cell_batch = torch.tensor([0], dtype=torch.long)
assert torch.equal(expected_node_upper, batch.nodes.upper_index)
assert torch.equal(expected_node_shared_coboundaries, batch.nodes.shared_coboundaries)
assert batch.nodes.lower_index is None
assert batch.nodes.shared_boundaries is None
assert torch.equal(expected_node_x, batch.nodes.x)
assert torch.equal(expected_node_y, batch.nodes.y)
assert torch.equal(expected_node_batch, batch.nodes.batch)
assert torch.equal(expected_edge_upper, batch.edges.upper_index)
assert torch.equal(expected_edge_shared_coboundaries, batch.edges.shared_coboundaries)
assert torch.equal(expected_edge_lower, batch.edges.lower_index)
assert torch.equal(expected_edge_shared_boundaries, batch.edges.shared_boundaries)
assert torch.equal(expected_edge_x, batch.edges.x)
assert torch.equal(expected_edge_y, batch.edges.y)
assert torch.equal(expected_edge_batch, batch.edges.batch)
assert batch.two_cells.upper_index is None
assert batch.two_cells.lower_index is None
assert batch.two_cells.shared_coboundaries is None
assert batch.two_cells.shared_boundaries is None
assert torch.equal(expected_two_cell_x, batch.two_cells.x)
assert torch.equal(expected_two_cell_y, batch.two_cells.y)
assert torch.equal(expected_two_cell_batch, batch.two_cells.batch)
def test_double_house_batching():
"""
4 9
/ \ / \
3---2 8---7
| | | |
0---1 5---6
. .
4 5 10 11
. 2 . . 8 .
3 1 9 7
. 0 . . 6 .
. .
/0\ /1\
.---. .---.
| | | |
.---. .---.
"""
house_1 = get_house_complex()
house_2 = get_house_complex()
complex_list = [house_1, house_2]
batch = ComplexBatch.from_complex_list(complex_list)
validate_double_house(batch)
def test_house_and_square_batching():
"""
4
/ \
3---2 8---7
| | | |
0---1 5---6
.
4 5
. 2 . . 8 .
3 1 9 7
. 0 . . 6 .
.
/0\
.---. .---.
| | | |
.---. .---.
"""
house_1 = get_house_complex()
square = get_square_complex()
complex_list = [house_1, square]
batch = ComplexBatch.from_complex_list(complex_list)
validate_house_and_square(batch)
def test_house_square_house_batching():
"""
4 13
/ \ / \
3---2 8---7 12--11
| | | | | |
0---1 5---6 9---10
. .
4 5 14 15
. 2 . . 8 . . 12.
3 1 9 7 13 11
. 0 . . 6 . . 10 .
. .
/0\ /1\
.---. .---. .---.
| | | | | |
.---. .---. .---.
"""
house_1 = get_house_complex()
house_2 = get_house_complex()
square = get_square_complex()
complex_list = [house_1, square, house_2]
batch = ComplexBatch.from_complex_list(complex_list)
validate_house_square_house(batch)
def test_square_dot_square_batching():
'''
3---2 8---7
| | | |
0---1 4 5---6
. 2 . . 6 .
3 1 7 5
. 0 . . . 4 .
.---. .---.
| | | |
.---. . .---.
'''
square_dot = get_square_dot_complex()
square = get_square_complex()
complex_list = [square_dot, square]
batch = ComplexBatch.from_complex_list(complex_list)
validate_square_dot_and_square(batch)
def test_kite_house_batching():
'''
2---3---4 9
/ \ / / \
0---1 8---7
| |
5---6
. 4 . 5 . .
2 1 3 10 11
. 0 . . 8 .
9 7
. 6 .
.---.---. .
/0\1/ /2\
.---. .---.
| |
.---.
'''
kite = get_kite_complex()
house = get_house_complex()
complex_list = [kite, house]
batch = ComplexBatch.from_complex_list(complex_list)
validate_kite_and_house(batch)
def test_data_loader():
data_list_1 = [
get_house_complex(),
get_house_complex(),
get_house_complex(),
get_square_complex()]
data_list_2 = [
get_house_complex(),
get_square_complex(),
get_house_complex(),
get_house_complex()]
data_list_3 = [
get_house_complex(),
get_square_complex(),
get_pyramid_complex(),
get_pyramid_complex()]
data_list_4 = [
get_square_dot_complex(),
get_square_complex(),
get_kite_complex(),
get_house_complex(),
get_house_complex()]
data_loader_1 = DataLoader(data_list_1, batch_size=2)
data_loader_2 = DataLoader(data_list_2, batch_size=3)
data_loader_3 = DataLoader(data_list_3, batch_size=3, max_dim=3)
data_loader_4 = DataLoader(data_list_4, batch_size=2)
count = 0
for batch in data_loader_1:
count += 1
if count == 1:
validate_double_house(batch)
elif count == 2:
validate_house_and_square(batch)
assert count == 2
count = 0
for batch in data_loader_2:
count += 1
if count == 1:
validate_house_square_house(batch)
elif count == 2:
validate_house_no_batching(batch)
assert count == 2
count = 0
for batch in data_loader_3:
count += 1
assert count == 2
count = 0
for batch in data_loader_4:
count += 1
if count == 1:
validate_square_dot_and_square(batch)
elif count == 2:
validate_kite_and_house(batch)
else:
validate_house_no_batching(batch)
assert count == 3
def test_set_for_features_in_batch():
house_1 = get_house_complex()
house_2 = get_house_complex()
square = get_square_complex()
complex_list = [house_1, square, house_2]
vx = torch.arange(21, 35, dtype=torch.float).view(14, 1)
ex = torch.arange(21, 37, dtype=torch.float).view(16, 1)
tx = torch.arange(21, 23, dtype=torch.float).view(2, 1)
xs = [vx, ex, tx]
batch = ComplexBatch.from_complex_list(complex_list)
batch.set_xs(xs)
assert torch.equal(batch.cochains[0].x, vx)
assert torch.equal(batch.cochains[1].x, ex)
assert torch.equal(batch.cochains[2].x, tx)
def test_set_xs_does_not_mutate_dataset():
"""Batches should be copied, so these mutations should not change the dataset"""
data_list = get_testing_complex_list()
data_loader = DataLoader(data_list, batch_size=5, max_dim=2)
# Save batch contents
xs = [[] for _ in range(4)] # we consider up to dim 3 due to the presence of pyramids
for batch in data_loader:
for i in range(batch.dimension + 1):
xs[i].append(batch.cochains[i].x)
txs = []
for i in range(4):
txs.append(torch.cat(xs[i], dim=0) if len(xs[i]) > 0 else None)
# Set batch features
for batch in data_loader:
new_xs = []
for i in range(batch.dimension + 1):
new_xs.append(torch.zeros_like(batch.cochains[i].x))
batch.set_xs(new_xs)
# Save batch contents after set_xs
xs_after = [[] for _ in range(4)]
for batch in data_loader:
for i in range(batch.dimension + 1):
xs_after[i].append(batch.cochains[i].x)
txs_after = []
for i in range(4):
txs_after.append(torch.cat(xs_after[i], dim=0) if len(xs_after[i]) > 0 else None)
# Check that the batch features are the same
for i in range(4):
if txs_after[i] is None:
assert txs[i] is None
else:
assert torch.equal(txs_after[i], txs[i])
def test_batching_returns_the_same_features():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
batched_x[dim].append(params[dim].x)
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
x = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
x[dim].append(params[dim].x)
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(batched_xs[i], xs[i])
@pytest.mark.data
def test_batching_returns_the_same_features_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
batched_x[dim].append(params[dim].x)
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
x[dim].append(params[dim].x)
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(batched_xs[i], xs[i])
@pytest.mark.data
def test_batching_returns_the_same_features_on_ring_proteins():
dataset = load_dataset('PROTEINS', max_dim=2, fold=0, init_method='mean',
max_ring_size=7)
assert len(dataset) == 1113
assert dataset.max_dim == 2
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
batched_x[dim].append(params[dim].x)
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
x[dim].append(params[dim].x)
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(batched_xs[i], xs[i])
@pytest.mark.data
def test_batching_returns_the_same_up_attr_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
# Batched
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['up_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['up_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
# TODO: Modify test after merging the top_feature branch
# Right now, the last level cannot have top features
if params[dim].kwargs['up_attr'] is not None and dim < batch_max_dim:
x[dim].append(params[dim].kwargs['up_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(xs[i], batched_xs[i])
def test_batching_returns_the_same_up_attr():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
# Batched
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['up_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['up_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
# TODO: Modify test after merging the top_feature branch
# Right now, the last level cannot have top features
if params[dim].kwargs['up_attr'] is not None and dim < batch_max_dim:
x[dim].append(params[dim].kwargs['up_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert torch.equal(xs[i], batched_xs[i])
@pytest.mark.data
def test_batching_returns_the_same_down_attr_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['down_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['down_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['down_attr'] is not None:
x[dim].append(params[dim].kwargs['down_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert len(xs[i]) == len(batched_xs[i])
assert torch.equal(xs[i], batched_xs[i])
def test_batching_returns_the_same_down_attr():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
batched_x = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['down_attr'] is not None:
batched_x[dim].append(params[dim].kwargs['down_attr'])
batched_xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x[i]) > 0:
batched_xs[i] = torch.cat(batched_x[i], dim=0)
# Un-batched
x = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['down_attr'] is not None:
x[dim].append(params[dim].kwargs['down_attr'])
xs = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x[i]) > 0:
xs[i] = torch.cat(x[i], dim=0)
for i in range(batch_max_dim+1):
if xs[i] is None or batched_xs[i] is None:
assert xs[i] == batched_xs[i]
else:
assert len(xs[i]) == len(batched_xs[i])
assert torch.equal(xs[i], batched_xs[i])
@pytest.mark.data
def test_batching_of_boundary_index_on_proteins():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
assert len(dataset) == 1113
split_idx = dataset.get_idx_split()
dataset = dataset[split_idx['valid']]
assert len(dataset) == 111
batch_max_dim = 3
data_loader = DataLoader(dataset, batch_size=32, max_dim=batch_max_dim)
batched_x_boundaries = [[] for _ in range(batch_max_dim+1)]
batched_x_cells = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
batched_x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
batched_x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
batched_xs_boundaries = [None for _ in range(batch_max_dim+1)]
batched_xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x_boundaries[i]) > 0:
batched_xs_boundaries[i] = torch.cat(batched_x_boundaries[i], dim=0)
if len(batched_x_cells[i]) > 0:
batched_xs_cells[i] = torch.cat(batched_x_cells[i], dim=0)
# Un-batched
x_boundaries = [[] for _ in range(batch_max_dim+1)]
x_cells = [[] for _ in range(batch_max_dim+1)]
for complex in dataset:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
xs_boundaries = [None for _ in range(batch_max_dim+1)]
xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x_boundaries[i]) > 0:
xs_boundaries[i] = torch.cat(x_boundaries[i], dim=0)
xs_cells[i] = torch.cat(x_cells[i], dim=0)
for i in range(batch_max_dim+1):
if xs_boundaries[i] is None or batched_xs_boundaries[i] is None:
assert xs_boundaries[i] == batched_xs_boundaries[i]
else:
assert len(xs_boundaries[i]) == len(batched_xs_boundaries[i])
assert torch.equal(xs_boundaries[i], batched_xs_boundaries[i])
if xs_cells[i] is None or batched_xs_cells[i] is None:
assert xs_cells[i] == batched_xs_cells[i]
else:
assert len(xs_cells[i]) == len(batched_xs_cells[i])
assert torch.equal(xs_cells[i], batched_xs_cells[i])
def test_batching_of_boundary_index():
data_list = get_testing_complex_list()
# Try multiple parameters
dims = [1, 2, 3]
bs = list(range(2, 11))
params = itertools.product(bs, dims)
for batch_size, batch_max_dim, in params:
data_loader = DataLoader(data_list, batch_size=batch_size, max_dim=batch_max_dim)
batched_x_boundaries = [[] for _ in range(batch_max_dim+1)]
batched_x_cells = [[] for _ in range(batch_max_dim+1)]
for batch in data_loader:
params = batch.get_all_cochain_params()
assert len(params) <= batch_max_dim+1
for dim in range(len(params)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
batched_x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
batched_x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
batched_xs_boundaries = [None for _ in range(batch_max_dim+1)]
batched_xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(batched_x_boundaries[i]) > 0:
batched_xs_boundaries[i] = torch.cat(batched_x_boundaries[i], dim=0)
if len(batched_x_cells[i]) > 0:
batched_xs_cells[i] = torch.cat(batched_x_cells[i], dim=0)
# Un-batched
x_boundaries = [[] for _ in range(batch_max_dim+1)]
x_cells = [[] for _ in range(batch_max_dim+1)]
for complex in data_list:
params = complex.get_all_cochain_params()
for dim in range(min(len(params), batch_max_dim+1)):
if params[dim].kwargs['boundary_attr'] is not None:
assert params[dim].boundary_index is not None
boundary_attrs = params[dim].kwargs['boundary_attr']
x_boundaries[dim].append(
torch.index_select(boundary_attrs, 0, params[dim].boundary_index[0]))
x_cells[dim].append(
torch.index_select(params[dim].x, 0, params[dim].boundary_index[1]))
xs_boundaries = [None for _ in range(batch_max_dim+1)]
xs_cells = [None for _ in range(batch_max_dim+1)]
for i in range(batch_max_dim+1):
if len(x_boundaries[i]) > 0:
xs_boundaries[i] = torch.cat(x_boundaries[i], dim=0)
xs_cells[i] = torch.cat(x_cells[i], dim=0)
for i in range(batch_max_dim+1):
if xs_boundaries[i] is None or batched_xs_boundaries[i] is None:
assert xs_boundaries[i] == batched_xs_boundaries[i]
else:
assert len(xs_boundaries[i]) == len(batched_xs_boundaries[i])
assert torch.equal(xs_boundaries[i], batched_xs_boundaries[i])
if xs_cells[i] is None or batched_xs_cells[i] is None:
assert xs_cells[i] == batched_xs_cells[i]
else:
assert len(xs_cells[i]) == len(batched_xs_cells[i])
assert torch.equal(xs_cells[i], batched_xs_cells[i])
@pytest.mark.data
def test_data_loader_shuffling():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
data_loader = DataLoader(dataset, batch_size=32)
unshuffled_ys = []
for data in data_loader:
unshuffled_ys.append(data.y)
data_loader = DataLoader(dataset, batch_size=32, shuffle=True)
shuffled_ys = []
for data in data_loader:
shuffled_ys.append(data.y)
unshuffled_ys = torch.cat(unshuffled_ys, dim=0)
shuffled_ys = torch.cat(shuffled_ys, dim=0)
assert list(unshuffled_ys.size()) == list(shuffled_ys.size())
assert not torch.equal(unshuffled_ys, shuffled_ys)
@pytest.mark.data
def test_idx_splitting_works():
dataset = load_dataset('PROTEINS', max_dim=3, fold=0, init_method='mean')
splits = dataset.get_idx_split()
val_dataset = dataset[splits["valid"]]
ys1 = []
for data in val_dataset:
ys1.append(data.y)
ys2 = []
for i in splits['valid']:
data = dataset.get(i)
ys2.append(data.y)
ys1 = torch.cat(ys1, dim=0)
ys2 = torch.cat(ys2, dim=0)
assert torch.equal(ys1, ys2)
|
from django.contrib import admin
from .models import (
Brand,
Certificate,
Company,
Corporation,
Country,
Product,
ProductPriceInStore,
Rating,
Store,
MainProductCategory,
ProductCategory,
SubProductCategory,
BigTen,
)
class MainProductCategoryAdmin(admin.ModelAdmin):
list_display = (
"name",
"created",
"updated",
)
list_display_links = (
"name",
"created",
"updated",
)
search_fields = [
"name",
]
class ProductCategoryAdmin(admin.ModelAdmin):
list_display = (
"name",
"created",
"updated",
)
list_display_links = (
"name",
"created",
"updated",
)
search_fields = [
"name",
]
class SubProductCategoryAdmin(admin.ModelAdmin):
list_display = (
"name",
"created",
"updated",
)
list_display_links = (
"name",
"created",
"updated",
)
search_fields = [
"name",
]
class RatingAdmin(admin.ModelAdmin):
list_display = ("corporation",)
list_display_links = ("corporation",)
@classmethod
def corporation(self, obj):
if obj.corporation is not None:
return obj.corporation.name
else:
return None
corporation.short_description = "Rating of associated Concern"
class CountryAdmin(admin.ModelAdmin):
list_display = (
"name",
"code",
"created",
"updated",
)
list_display_links = ("name",)
search_fields = [
"name",
"code",
"created",
"updated",
]
class RatingInline(admin.StackedInline):
max_num = 1
model = Rating
class CorporationAdmin(admin.ModelAdmin):
list_display = (
"name",
"logo",
"wiki",
"origin",
"origin_code",
"created",
"updated",
)
list_display_links = ("name",)
search_fields = [
"name",
]
autocomplete_fields = ("origin",)
inlines = [
RatingInline,
]
def origin_code(self, obj):
if obj.origin is not None:
return obj.origin.code
else:
return None
class CompanyAdmin(admin.ModelAdmin):
list_display = (
"name",
"logo",
"wiki",
"corporation",
"origin",
"origin_code",
"created",
"updated",
)
list_display_links = ("name",)
search_fields = [
"name",
"logo",
"corporation__name",
"origin",
"created",
"updated",
]
autocomplete_fields = ("corporation",)
def origin_code(self, obj):
if obj.origin is not None:
return obj.origin.code
else:
return None
class BrandAdmin(admin.ModelAdmin):
list_display = (
"name",
"logo",
"wiki",
"company",
"corporation",
"created",
"updated",
)
list_display_links = ("name",)
search_fields = [
"name",
"logo",
"wiki",
"company__name",
"corporation__name",
"created",
"updated",
]
autocomplete_fields = (
"corporation",
"company",
)
class ProdcutPriceInStoreAdmin(admin.ModelAdmin):
list_display = ("store", "product", "price")
search_fields = ["store__name", "product__name", "price"]
class PriceInline(admin.StackedInline):
max_num = 1
model = ProductPriceInStore
class CertificateAdmin(admin.ModelAdmin):
list_display = (
"name",
"wiki",
"created",
"updated",
)
list_display_links = ("name",)
search_fields = [
"name",
"created",
"updated",
]
autocomplete_fields = ("product",)
class ProductAdmin(admin.ModelAdmin):
list_display = (
"name",
"state",
"code",
"brand",
"checked_by",
"scanned_counter",
"upvote_counter",
"downvote_counter",
"data_source",
"added_by",
"logo",
"wiki",
"scraped_image",
"created",
"updated",
)
list_display_links = ("name",)
search_fields = [
"name",
"code",
"brand__name",
"scanned_counter",
"created",
"updated",
"checked_by__username",
]
exclude = ("scanned_counter", "added_by", "upvote_counter", "downvote_counter",)
autocomplete_fields = ("brand",)
inlines = [
PriceInline,
]
class StoreAdmin(admin.ModelAdmin):
list_display = (
"name",
"country",
)
list_display_links = ("name",)
search_fields = [
"name",
]
autocomplete_fields = ()
class BigTenAdmin(admin.ModelAdmin):
list_display = (
"name",
)
list_display_links = ("name",)
search_fields = [
"name",
]
autocomplete_fields = ()
admin.site.site_header = "Goodbuy Database"
admin.site.register(
Corporation, CorporationAdmin,
)
admin.site.register(
Company, CompanyAdmin,
)
admin.site.register(
Brand, BrandAdmin,
)
admin.site.register(
Product, ProductAdmin,
)
admin.site.register(
Country, CountryAdmin,
)
admin.site.register(
Rating, RatingAdmin,
)
admin.site.register(
MainProductCategory, MainProductCategoryAdmin,
)
admin.site.register(
ProductCategory, ProductCategoryAdmin,
)
admin.site.register(
SubProductCategory, SubProductCategoryAdmin,
)
admin.site.register(
Store, StoreAdmin,
)
admin.site.register(
ProductPriceInStore, ProdcutPriceInStoreAdmin,
)
admin.site.register(
Certificate, CertificateAdmin,
)
admin.site.register(
BigTen, BigTenAdmin,
)
|
import datetime
import os
import sqlite3
import uuid
from flask import Flask, request, render_template, jsonify
from flask_cors import CORS
from util import Tweet, tweet_factory, datetime_format, send_data
app = Flask(__name__)
CORS(app)
# Configuration
initial_tweets_count = 100
tablet_servers = ['http://localhost:5001', 'http://localhost:5002']
tablets_per_server = 2
# Database
database_path = 'tweets.db'
if not os.path.exists(database_path):
import twitter
twitter_api = twitter.Api(
consumer_key=os.environ['CONSUMER_KEY'],
consumer_secret=os.environ['CONSUMER_SECRET'],
access_token_key=os.environ['ACCESS_TOKEN_KEY'],
access_token_secret=os.environ['ACCESS_TOKEN_SECRET'])
stream_sample = twitter_api.GetStreamSample()
db = sqlite3.connect(database_path)
db.execute(
'CREATE TABLE Tweets (id TEXT PRIMARY KEY, user TEXT, created_at TEXT, content TEXT)')
for _ in range(initial_tweets_count):
next_tweet = stream_sample.next()
while 'delete' in next_tweet:
next_tweet = stream_sample.next()
Tweet({'id': uuid.uuid4().hex,
'user': next_tweet['user']['screen_name'],
'created_at': next_tweet['created_at'],
'content': next_tweet['text']}).insert_into(db)
db.commit()
db.close()
db = sqlite3.connect(database_path, check_same_thread=False)
db.row_factory = tweet_factory
# Initialization
all_tweets = db.execute('SELECT * FROM Tweets').fetchall()
number_of_tablets = len(tablet_servers) * tablets_per_server
first_datetime = min(all_tweets).get_datetime()
last_datetime = max(all_tweets).get_datetime()
timespan = (last_datetime - first_datetime) / number_of_tablets
# Helper functions
def get_server_index(tablet_index):
return tablet_index // tablets_per_server
def get_tablet_index(tweet):
return min(int((tweet.get_datetime() - first_datetime).total_seconds() / timespan.total_seconds()),
number_of_tablets - 1)
def get_server(tweet):
return tablet_servers[get_server_index(get_tablet_index(tweet))]
for tweet in all_tweets:
send_data(get_server(tweet) + '/master/create/', tweet.to_dict())
@app.route('/', methods=['GET'])
def index():
return render_template('index.html',
first_datetime=first_datetime.strftime(datetime_format),
last_datetime=last_datetime.strftime(datetime_format),
now=datetime.datetime.now().strftime(datetime_format),
new_id=uuid.uuid4().hex)
@app.route('/create/', methods=['POST'])
def create():
return jsonify([get_server(Tweet({'created_at': request.form['created_at']}))])
@app.route('/read/', methods=['POST'])
def read():
first_server_index = get_server_index(get_tablet_index(Tweet({'created_at': request.form['from']})))
last_server_index = get_server_index(get_tablet_index(Tweet({'created_at': request.form['to']})))
return jsonify([tablet_servers[server_index]
for server_index in range(first_server_index, last_server_index + 1)])
@app.route('/update/', methods=['POST'])
def update():
return jsonify([get_server(
Tweet({'id': request.form['id'],
'user': request.form['user'],
'created_at': request.form['created_at'],
'content': request.form['content']}))])
@app.route('/delete/', methods=['POST'])
def delete():
return jsonify([get_server(
Tweet({'id': request.form['id'],
'user': request.form['user'],
'created_at': request.form['created_at'],
'content': request.form['content']}))])
@app.route('/sync/create/', methods=['POST'])
def sync_create():
Tweet({'id': request.form['id'],
'user': request.form['user'],
'created_at': request.form['created_at'],
'content': request.form['content']}).insert_into(db)
db.commit()
return 'Synced!'
@app.route('/sync/update/', methods=['POST'])
def sync_update():
db.execute('UPDATE Tweets SET user = ?, created_at = ?, content = ? WHERE id = ?',
[request.form['user'], request.form['created_at'], request.form['content'], request.form['id']])
db.commit()
return 'Synced!'
@app.route('/sync/delete/', methods=['POST'])
def sync_delete():
db.execute('DELETE FROM Tweets WHERE id = ?',
[request.form['id']])
db.commit()
return 'Synced!'
|
<filename>tapas/utils/hybridqa_utils.py
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helper methods for the HybridQA dataset."""
import collections
import dataclasses
import enum
import json
import os
import string
import tempfile
from typing import Any, Iterable, List, Mapping, Text, Optional, Tuple, Set, Union, MutableMapping
import urllib.parse
import zipfile
from absl import logging
import numpy as np
from tapas.protos import annotated_text_pb2
from tapas.protos import interaction_pb2
from tapas.scripts import prediction_utils
from tapas.utils import text_utils
import tensorflow.compat.v1 as tf
_annotated_cell = annotated_text_pb2.AnnotatedText.annotated_cell_ext
_annotation_descriptions = annotated_text_pb2.AnnotationDescription.annotation_descriptions_ext
_ZIP_NAME = 'WikiTables-WithLinks'
_TRANSLATION_TABLE = str.maketrans(string.punctuation,
' ' * len(string.punctuation))
_ARTICLES = ('a', 'an', 'the') # List from HybridQA/evaluate_script.py
_RECALL_KS = (1, 3, 5)
CoordinateType = Tuple[int, int]
class Split(enum.Enum):
TRAIN = 'train'
DEV = 'dev'
TEST = 'test'
class AnswerType(enum.Enum):
ALL = 'all'
ONE_IN_TABLE = '1_in_table'
ONE_IN_TEXT = '1_in_text'
MANY_IN_TABLE = '2+_in_table'
MANY_IN_TEXT = '2+_in_text'
TABLE_AND_TEXT = 'table_&_text'
NO_ANSWER = 'no_answer'
def get_hybridqa_references(
references_path):
"""Returns HybridQA references json as a python dict."""
# Example file at
# https://github.com/wenhuchen/HybridQA/blob/master/released_data/dev_reference.json.
with tf.io.gfile.GFile(references_path, 'r') as fp:
references = json.load(fp)
for key, values in tuple(references.items()):
references[key] = set(values)
return references
def _get_answer_type(question):
"""Compute a class of answer for a granular evluation of questions."""
table_answers = len(question.alternative_answers[0].answer_coordinates)
text_answers = len(question.answer.answer_coordinates) - table_answers
if table_answers and text_answers:
return AnswerType.TABLE_AND_TEXT
elif not table_answers and not text_answers:
return AnswerType.NO_ANSWER
elif table_answers == 1:
return AnswerType.ONE_IN_TABLE
elif text_answers == 1:
return AnswerType.ONE_IN_TEXT
elif table_answers:
return AnswerType.MANY_IN_TABLE
else:
return AnswerType.MANY_IN_TEXT
def get_best_cells(
probabilities,):
"""Groups tokens by cell and returns cell sorted by descending score."""
cell_probabilities = collections.defaultdict(list)
for column, row, probability in probabilities:
cell_probabilities[(row, column)].append(probability)
items = sorted(
cell_probabilities.items(),
reverse=True,
key=lambda pair: np.array(pair[1]).mean())
return [coordinate for coordinate, _ in items]
@dataclasses.dataclass(frozen=True)
class CellSelectionMetrics:
"""Helper class that holds the metrics when training a cell selection model.
recall: The fraction of the questions where a corrrect cell was selected.
precision: On the average question where cell was selected, the fraction of
selected cells that is correct.
non_empty: The fraction of the questions where any cell was selected.
answer_len: The average number of cells selected.
coverage: The fraction of the questions where a prediction exists in the file.
recall_at_1: The fraction of the questions where a corrrect cell was selected
if the best cell is picked.
recall_at_3: The fraction of the questions where a corrrect cell was selected
if the best 3 cells are picked.
recall_at_5: The fraction of the questions where a corrrect cell was selected
if the best 5 cells are picked.
"""
recall: float
precision: Optional[float]
non_empty: float
answer_len: float
coverage: float
recall_at_1: Optional[float] = None
recall_at_3: Optional[float] = None
recall_at_5: Optional[float] = None
def get_cell_selection_metrics(
interactions_file,
predictions_file,
):
"""Evaluates cell selection results in HybridQA experiment."""
questions = {}
for interaction in prediction_utils.iterate_interactions(interactions_file):
for question in interaction.questions:
# Do not evaluate hidden test set examples
if question.HasField('answer'):
questions[question.id] = question
if not questions:
return {}
cell_selection_metrics = dict(
eval_cell_selection(questions, predictions_file))[AnswerType.ALL]
return dataclasses.asdict(cell_selection_metrics)
def eval_cell_selection(
questions,
predictions_file,
):
"""Evaluates cell selection results in HybridQA experiment.
Args:
questions: A map of Question protos by their respective ids.
predictions_file: Path to a tsv file with predictions for a checkpoint.
Yields:
An AnswerType and its corresponding CellSelectionMetrics instance
"""
total = collections.Counter()
total_correct = collections.Counter()
total_correct_at_k = {k: collections.Counter() for k in _RECALL_KS}
total_seen = collections.Counter()
total_non_empty = collections.Counter()
total_coordinates = collections.Counter()
sum_precision = collections.defaultdict(float)
for question in questions.values():
for answer_type in [AnswerType.ALL, _get_answer_type(question)]:
total[answer_type] += 1
for row in prediction_utils.iterate_predictions(predictions_file):
question = questions.get(row['question_id'])
if question is None:
# The dataset lost some examples after an update.
continue
gold_coordinates = {(x.row_index, x.column_index)
for x in question.answer.answer_coordinates}
coordinates = prediction_utils.parse_coordinates(row['answer_coordinates'])
# We only care about finding one correct cell for the downstream model.
correct_coordinates = len(coordinates & gold_coordinates)
has_probabilities = 'token_probabilities' in row
if has_probabilities:
best_cells = get_best_cells(json.loads(row['token_probabilities']))
correct_at_k = {
k: bool(set(best_cells[:k]) & gold_coordinates) for k in _RECALL_KS
}
else:
correct_at_k = {}
for answer_type in [AnswerType.ALL, _get_answer_type(question)]:
total_coordinates[answer_type] += len(coordinates)
total_correct[answer_type] += bool(correct_coordinates)
total_seen[answer_type] += 1
for k, correct in correct_at_k.items():
total_correct_at_k[k][answer_type] += correct
if coordinates:
sum_precision[answer_type] += correct_coordinates / len(coordinates)
total_non_empty[answer_type] += 1
for answer_type in AnswerType:
if total[answer_type]:
recall_at_k = {
f'recall_at_{k}': (total_correct_at_k[k][answer_type] /
total[answer_type]) if has_probabilities else None
for k in _RECALL_KS
}
yield answer_type, CellSelectionMetrics(
recall=total_correct[answer_type] / total[answer_type],
non_empty=total_non_empty[answer_type] / total[answer_type],
coverage=total_seen[answer_type] / total[answer_type],
answer_len=total_coordinates[answer_type] / total[answer_type],
precision=((sum_precision[answer_type] / total_non_empty[answer_type])
if total_non_empty[answer_type] else None),
**recall_at_k,
)
def _parse_cell(
cell,
text,
links,
descriptions,
):
"""Parse text and links in a table cell."""
for link in collections.OrderedDict.fromkeys(links):
cell_annotation = cell.Extensions[_annotated_cell].annotations.add()
cell_annotation.identifier = urllib.parse.unquote(link)
if cell_annotation.identifier not in descriptions:
raise ValueError(f'{link} not found in requests')
cell.text = text
def parse_table(
json_dict,
descriptions,
):
"""Converts Table in JSON format to Table proto."""
table = interaction_pb2.Table()
table.table_id = str(json_dict['uid'])
table.document_title = json_dict['title']
table.document_url = json_dict['url']
for text, links in json_dict['header']:
_parse_cell(table.columns.add(), text, links, descriptions)
for row_data in json_dict['data']:
row = table.rows.add()
for text, links in row_data:
_parse_cell(row.cells.add(), text, links, descriptions)
return table
def _normalize(phrase):
"""Lower text and remove punctuation, articles and extra whitespace."""
tokens = phrase.translate(_TRANSLATION_TABLE).lower().split()
return ' '.join(token for token in tokens if token not in _ARTICLES)
def find_answer_coordinates(
answer_text, table, desc_map
):
"""Returns coordinates and matched identifiers for an answer text in a table."""
answer = _normalize(answer_text)
all_coordinates = set()
table_only_coordinates = []
matched_identifiers = collections.Counter()
for row_index, row in enumerate(table.rows):
for column_index, cell in enumerate(row.cells):
if text_utils.find_all_substrings(answer, _normalize(cell.text)):
all_coordinates.add((row_index, column_index))
table_only_coordinates.append((row_index, column_index))
else:
for annotation in cell.Extensions[_annotated_cell].annotations:
if text_utils.find_all_substrings(
answer, _normalize(desc_map[annotation.identifier])):
all_coordinates.add((row_index, column_index))
matched_identifiers[annotation.identifier] += 1
break
return all_coordinates, table_only_coordinates, matched_identifiers
def find_dataset_coordinates(
example_json_or_question
):
"""Yields tuples of coordinates from a dataset example.
Args:
example_json_or_question: Input example either a json dict of an example or
a Question proto.
"""
if isinstance(example_json_or_question, Mapping):
example_json = example_json_or_question # type: Mapping
for _, coordinates, _, _ in example_json.get('answer-node', []):
# The answer text may appear at this cell, or in paragraph linked here.
yield tuple(coordinates)
else: # isinstance of interaction_pb2.Question
question = example_json_or_question # type: interaction_pb2.Question
for coordinate in question.answer.answer_coordinates:
yield coordinate.row_index, coordinate.column_index
def _parse_interaction(
table,
descriptions,
example,
counters,
):
"""Converts a single example to an interaction with a single question.
Args:
table: Table proto for this interaction.
descriptions: The Wikipedia intro for each entity in the Table annotations.
example: Question parsed from input JSON file.
counters: Used for logging events as the interactions are parsed.
Returns:
Interaction proto.
"""
interaction = interaction_pb2.Interaction()
# We append -0 that corresponds to position annotator field
interaction.id = example['question_id'] + '-0'
interaction.table.CopyFrom(table)
desc_map = interaction.Extensions[_annotation_descriptions].descriptions
for key, value in descriptions.items():
desc_map[key] = value
question = interaction.questions.add()
# We append _0 that corresponds to SQA position field
question.id = f'{interaction.id}_0'
question.original_text = example['question']
# Reference answer for the question. The test set answers are hidden.
if 'answer-text' in example:
true_coordinates, table_only_coordinates, matched_identifiers = find_answer_coordinates(
example['answer-text'], table, desc_map)
question.answer.answer_texts.append(example['answer-text'])
# We use this field to store just the table answers
table_only_answer = question.alternative_answers.add()
for row_index, column_index in table_only_coordinates:
table_only_answer.answer_coordinates.add(
row_index=row_index, column_index=column_index)
for row_index, column_index in true_coordinates:
question.answer.answer_coordinates.add(
row_index=row_index, column_index=column_index)
# This is used to compare the examples we find against the ones in the data.
dataset_coordinates = frozenset(find_dataset_coordinates(example))
if true_coordinates > dataset_coordinates:
counters['Missing answers in dataset'] += 1
elif true_coordinates < dataset_coordinates:
counters['Missing answers in extraction'] += 1
elif true_coordinates == dataset_coordinates:
counters['Same answers'] += 1
else:
counters['Disjoint answers'] += 1
counters[f'Answer type is {_get_answer_type(question).value}'] += 1
if any(count > 1 for count in matched_identifiers.values()):
counters['Answers in repeated identifier'] += 1
if len(true_coordinates) > 1:
counters['Multiple answers'] += 1
return interaction
def extract_zip_archive(zip_path):
tmpdir = tempfile.mkdtemp()
with tf.io.gfile.GFile(zip_path, 'rb') as raw_zip:
with zipfile.ZipFile(raw_zip) as zip_object:
zip_object.extractall(tmpdir)
return tmpdir
def read_json_directory(path):
"""Read all files in directory and load them as JSON."""
result = {}
for filename in tf.io.gfile.listdir(path):
filepath = os.path.join(path, filename)
with tf.io.gfile.GFile(filepath, 'r') as filehandle:
json_map = json.load(filehandle)
key = os.path.splitext(filename)[0] # Removing the extension '.json'
if key in result:
raise ValueError(f'Duplicate id: {key}')
result[key] = json_map
return result
def _convert_split(
split, main_path, table_dict,
descriptions_dict
):
"""Convert a single JSON file to Interaction protos."""
filename = f'{split.value}.json'
if split in {Split.TRAIN, Split.DEV}:
filename = filename.replace('.json', '.traced.json')
file_path = os.path.join(main_path, filename)
with tf.io.gfile.GFile(file_path, 'r') as input_file:
examples = json.load(input_file)
counters = collections.Counter()
for example in examples:
table_id = str(example['table_id'])
interaction = _parse_interaction(
table=table_dict[table_id],
descriptions=descriptions_dict[table_id],
example=example,
counters=counters,
)
yield interaction
logging.info('Counters for %s: %s', split.value, counters)
def convert(
input_dir):
"""Reads examples and table in JSON format and converts to interactions."""
tmpdir = extract_zip_archive(os.path.join(input_dir, f'{_ZIP_NAME}.zip'))
# For each table, the Wikipedia intro of each entity linked to the table.
descriptions_dir = os.path.join(tmpdir, _ZIP_NAME, 'request_tok')
descriptions = read_json_directory(descriptions_dir)
table_dir = os.path.join(tmpdir, _ZIP_NAME, 'tables_tok')
tables = {
key: parse_table(json_map, descriptions[key])
for key, json_map in read_json_directory(table_dir).items()
}
main_path = os.path.join(input_dir, 'released_data')
return {
split.value: _convert_split(split, main_path, tables, descriptions)
for split in Split
}
|
<filename>hard/312-burst-balloons.py
'''
戳气球
有 n 个气球,编号为0 到 n - 1,每个气球上都标有一个数字,这些数字存在数组 nums 中。
现在要求你戳破所有的气球。戳破第 i 个气球,你可以获得 nums[i - 1] * nums[i] * nums[i + 1] 枚硬币。
这里的 i - 1 和 i + 1 代表和 i 相邻的两个气球的序号。如果 i - 1或 i + 1 超出了数组的边界,那么就当它是一个数字为 1 的气球。
求所能获得硬币的最大数量。
示例 1:
输入:nums = [3,1,5,8]
输出:167
解释:
nums = [3,1,5,8] --> [3,5,8] --> [3,8] --> [8] --> []
coins = 3*1*5 + 3*5*8 + 1*3*8 + 1*8*1 = 167
示例 2:
输入:nums = [1,5]
输出:10
提示:
n == nums.length
1 <= n <= 500
0 <= nums[i] <= 100
'''
from typing import List
'''
思路1,暴力回溯
暴力回溯算法,每层回溯都尝试从数组中挑选1个戳破
时间复杂度:O(n!)
空间复杂度:O(n)
思路2,动态规划
设动态规划数组dp[m][n],意思是区间nums[m...n]能戳破气球能获得的最大硬币数
要想获得区间内能获得的最大硬币数,需要有一个变量k指向最后戳破的气球,k在区间[m..n]之间
状态转移方程为:dp[m][n] = max(dp[m][k]+dp[k][n]+nums[m]*nums[k]*nums[n]),k需要遍历从m到n的位置
时间复杂度:O(n^3)
空间复杂度:O(n^2)
'''
class Solution:
# 思路1,暴力回溯
def maxCoins1(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
def backtrack(arr):
if len(arr) == 2:
return max(arr[0] + arr[0] * arr[1], arr[0] * arr[1] + arr[1])
maxCoins = 0
for i in range(len(arr)):
if i == 0:
coins = arr[i] * arr[i + 1] + backtrack(arr[1:])
elif i == len(arr) - 1:
coins = arr[i] * arr[i - 1] + backtrack(arr[:len(arr) - 1])
else:
coins = arr[i - 1] * arr[i] * arr[i + 1] + backtrack(arr[:i] + arr[i + 1:])
maxCoins = max(maxCoins, coins)
return maxCoins
return backtrack(nums)
# 思路2,动态规划
def maxCoins(self, nums: List[int]) -> int:
coins = [1] # 为简化计算原nums头尾各添加一个元素1
coins.extend(nums)
coins.append(1)
n = len(coins)
dp = [[0] * n for _ in range(n)]
for i in range(2, n): # 区间大小为i
for j in range(n - i): # 区间范围由2重循环确定,为[j..j+i],区间在0..n-i之间滑动
for k in range(j + 1, j + i): # 最后戳破的气球k,需要遍历区间内每一种可能
dp[j][j + i] = max(dp[j][j + i], dp[j][k] + dp[k][j + i] + coins[j] * coins[k] * coins[j + i])
return dp[0][n - 1]
s = Solution()
print(s.maxCoins([3, 1, 5, 8]))
print(s.maxCoins([1, 5]))
|
'''
Created on 14 feb 2016
@author: ghedinip
'''
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = ["DateTime", "Date", "Time", "Boolean", "Period", "Address", "Params", "ALL"]
import json
import dateutil.parser
import re
from datetime import time, date, datetime
try:
basestring
except NameError:
basestring = str
class Boolean(object):
'''
FHIR Time Type
'''
def __new__(self, json = None):
'''
FHIR Reference __new__
'''
if json == None:
return None
else:
return object.__new__(self)
def __init__(self, json = None):
'''
Bool Constructor
'''
str_arg = json
if isinstance(str_arg, basestring):
if (str_arg == "true") or (str_arg == "True"):
self.__bool = True
else:
self.__bool = False
elif isinstance(str_arg, bool):
self.__bool = str_arg
else:
print("FHIR Boolean: Bad Parameter")
self.__bool = None
@property
def json(self):
'''
json
'''
if self.__bool != None:
return self.__bool
else:
return None
def __repr__(self):
if self.__bool != None:
if self.__bool:
return "True"
else:
return "False"
else:
return None
class Date(object):
'''
FHIR Date Type
'''
def __new__(self, json = None, year = None, month = None, day = None):
'''
Date Constructor
'''
if (json == None) and (year == None) and (month == None) and (day == None):
return None
else:
return object.__new__(self)
def __init__(self, json = None, year = None, month = None, day = None):
'''
Date Constructor
'''
if json != None:
str_arg = json
if isinstance(str_arg, basestring):
self.__date = dateutil.parser.parse(str_arg).date()
self.year = self.__date.year
self.month = self.__date.month
self.day = self.__date.day
else:
print("FHIR Date: Bad Parameter")
print(str(json) + " - " +str(year) + " - " +str(month) + " - " +str(day))
self.__date = None
self.year = None
self.month = None
self.day = None
else:
if year:
self.year = year
else:
self.year = 0
if month:
self.month = month
else:
self.month = 0
if day:
self.day = day
else:
self.day = 0
self.__date = date(self.year, self.month, self.day)
@property
def json(self):
'''
json
'''
self.__date = date(self.year, self.month, self.day)
return self.__date.isoformat()
def __repr__(self):
return self.json
class DateTime(object):
'''
FHIR DateTime Type
'''
def __new__(self, json = None, year = None, month = None, day = None, hour = None, minute = None, second = None, microsec = None, tz = None):
'''
DateTime Constructor
'''
if (json == None) and (year == None) and (month == None) and (day == None) and \
(hour == None) and (minute == None) and (second == None) and (microsec == None) and (tz == None):
return None
else:
return object.__new__(self)
def __init__(self, json = None, year = None, month = None, day = None, hour = None, minute = None, second = None, microsec = None, tz = None):
'''
DateTime Constructor
'''
if json != None:
str_arg = json
if isinstance(str_arg, basestring):
self.__dt = dateutil.parser.parse(str_arg)
self.year = self.__dt.date().year
self.month = self.__dt.date().month
self.day = self.__dt.date().day
self.hour = self.__dt.time().hour
self.minute = self.__dt.time().minute
self.second = self.__dt.time().second
self.microsec = self.__dt.time().microsecond
self.tz = self.__dt.tzinfo
self.__datetime = datetime(self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsec, self.tz)
else:
print("FHIR DateTime: Bad Parameter")
self.year = None
self.month = None
self.day = None
self.hour = None
self.minute = None
self.second = None
self.microsec = None
self.tz = None
self.__datetime = None
else:
if year:
self.year = year
else:
self.year = 0
if month:
self.month = month
else:
self.month = 0
if day:
self.day = day
else:
self.day = 0
if hour:
self.hour = hour
else:
self.hour = 0
if minute:
self.minute = minute
else:
self.minute = 0
if second:
self.second = second
else:
self.second = 0
if microsec:
self.microsec = microsec
else:
self.microsec = 0
self.tz = tz
self.__datetime = datetime(self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsec, self.tz)
@property
def json(self):
'''
json
'''
self.__datetime = datetime(self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsec, self.tz)
return self.__datetime.isoformat()
def __repr__(self):
return self.json
class Time(object):
'''
FHIR Time Type
'''
def __new__(self, json = None, hour = None, minute = None, second = None, microsec = None):
'''
Time Constructor
'''
if (json == None) and (hour == None) and (minute == None) and (second == None) and (microsec == None):
return None
else:
return object.__new__(self)
def __init__(self, json = None, hour = None, minute = None, second = None, microsec = None):
'''
Time Constructor
'''
if json != None:
str_arg = json
if isinstance(str_arg, basestring):
#self.datetime = datetime.__new__(self, *dateutil.parser.parse(str_arg).timetuple()[0:6])
self.__ti = dateutil.parser.parse(str_arg).time()
self.hour = self.__ti.hour
self.minute = self.__ti.minute
self.second = self.__ti.second
self.microsec = self.__ti.microsecond
self.__time = time(self.hour, self.minute, self.second, self.microsec)
else:
print("FHIR DateTime: Bad Parameter")
self.hour = None
self.minute = None
self.second = None
self.microsec = None
self.__datetime = None
else:
if hour:
self.hour = hour
else:
self.hour = 0
if minute:
self.minute = minute
else:
self.minute = 0
if second:
self.second = second
else:
self.second = 0
if microsec:
self.microsec = microsec
else:
self.microsec = 0
self.__time = time(self.hour, self.minute, self.second, self.microsec)
@property
def json(self):
'''
json
'''
self.__time = time(self.hour, self.minute, self.second, self.microsec)
return self.__time.isoformat()
def __repr__(self):
return self.json
class Period(object):
'''
FHIR Period Object
'''
def __add(self, key):
if key in self.__json:
#return DateTime(self.__json[key])
return self.__json[key]
else:
return None
def __new__(self, json = None, start = None, end = None):
'''
FHIR Period __new__
'''
if (json == None) and (start == None) and (end == None):
return None
else:
return object.__new__(self)
def __init__(self, json = None, start = None, end = None):
'''
FHIR Period constructor
'''
if json:
self.__json = json
self.start = DateTime(self.__add('start'))
self.end = DateTime(self.__add('end'))
else:
self.start = DateTime(start)
self.end = DateTime(end)
self.__json = self.json
@property
def json(self):
self.__json = {}
if self.start:
self.__json['start'] = self.start.json
if self.end:
self.__json['end'] = self.end.json
return self.__json
def __repr__(self):
'''
FHIR Period string Representation
'''
return json.dumps(self.json, indent=4, separators=(',', ': '))
class Address(object):
'''
FHIR Address object
'''
def __add(self, key):
if key in self.__json:
return self.__json[key]
else:
return None
def __add_list(self, key):
if key in self.__json:
list_to_add = []
for obj in self.__json[key]:
list_to_add.append(obj)
return list_to_add
else:
return None
def __get_list(self, var):
if var == None:
return None
else:
list_obj = []
for obj in var:
if isinstance(obj, dict) or isinstance(obj, list) or isinstance(obj, basestring):
list_obj.append(obj)
else:
list_obj.append(obj.json)
return list_obj
def __new__(self, json = None, use = None, type_ = None, text = None, line = None, city = None,\
district = None, state = None, postalCode = None, country = None, period = None):
'''
FHIR Address __new__
'''
if (json == None) and (use == None) and (type_ == None) and (text == None) and\
(line == None) and (city == None) and (district == None) and\
(state == None) and (postalCode == None) and (country == None) and\
(period == None):
return None
else:
return object.__new__(self)
def __init__(self, json = None, use = None, type_ = None, text = None, line = None, city = None,\
district = None, state = None, postalCode = None, country = None, period = None):
'''
FHIR Address constructor
'''
if json:
self.__json = json
self.use = self.__add('use')
self.type = self.__add('type')
self.text = self.__add('text')
self.line = self.__add_list('line')
self.city = self.__add('city')
self.district = self.__add('district')
self.state = self.__add('state')
self.postalCode = self.__add('postalCode')
self.country = self.__add('country')
self.period = Period(self.__add('period'))
else:
self.use = use
self.type = type_
self.text = text
self.line = line
self.city = city
self.district = district
self.state = state
self.postalCode = postalCode
self.country = country
self.period = Period(period)
self.__json = self.json
@property
def json(self):
self.__json = {}
if self.use:
self.__json['use'] = self.use
if self.type:
self.__json['type'] = self.type
if self.text:
self.__json['text'] = self.text
if self.line:
self.__json['line'] = self.__get_list(self.line)
if self.city:
self.__json['city'] = self.city
if self.district:
self.__json['district'] = self.district
if self.state:
self.__json['state'] = self.state
if self.postalCode:
self.__json['postalCode'] = self.postalCode
if self.country:
self.__json['country'] = self.country
if self.period:
self.__json['period'] = self.period.json
return self.__json
def __repr__(self):
'''
FHIR Address string Representation
'''
return json.dumps(self.json, indent=4, separators=(',', ': '))
"""
class HumanName(object):
'''
FHIR HumanName Object
'''
def __add(self, key):
if key in self.__json:
return self.__json[key]
else:
return None
def __add_list(self, key):
if key in self.__json:
list_to_add = []
for obj in self.__json[key]:
list_to_add.append(obj)
return list_to_add
else:
return None
def __get_list(self, var):
if var == None:
return None
else:
list_obj = []
for obj in var:
if isinstance(obj, dict) or isinstance(obj, list) or isinstance(obj, basestring):
list_obj.append(obj)
else:
list_obj.append(obj.json)
return list_obj
def __new__(self, json = None, use = None, text = None, family = None, given = None, \
prefix = None, suffix = None, period = None):
'''
FHIR HumaName __new__
'''
if (json == None) and (use == None) and (text == None) and (family == None) and \
(given == None) and (prefix == None) and (suffix == None) and (period == None):
return None
else:
return object.__new__(self)
def __init__(self, json = None, use = None, text = None, family = None, given = None, \
prefix = None, suffix = None, period = None):
'''
FHIR HumanName constructor
'''
if json:
self.__json = json
self.use = self.__add('use')
self.text = self.__add('text')
self.family = self.__add_list('family')
self.given = self.__add_list('given')
self.prefix = self.__add_list('prefix')
self.suffix = self.__add_list('suffix')
self.period = Period(self.__add('period'))
else:
self.use = use
self.text = text
self.family = family
self.given = given
self.prefix = prefix
self.suffix = suffix
self.period = Period(period)
self.__json = self.json
@property
def json(self):
self.__json = {}
if self.use:
self.__json['use'] = self.use
if self.text:
self.__json['text'] = self.text
if self.family:
self.__json['family'] = self.__get_list(self.family)
if self.given:
self.__json['given'] = self.__get_list(self.given)
if self.prefix:
self.__json['prefix'] = self.__get_list(self.prefix)
if self.suffix:
self.__json['suffix'] = self.__get_list(self.suffix)
if self.period:
self.__json['period'] = self.period.json
return self.__json
def __repr__(self):
'''
FHIR HumanName string Representation
'''
return json.dumps(self.json, indent=4, separators=(',', ': '))
"""
class ALL(object):
'''
search on all Resources Object
Auxiliary Object to deal with queries
'''
class __Oper(object):
'''
Oper Object
Auxiliary Object to deal with queries
'''
def __repr(self, fhir_obj):
'''
Auxiliary function to represent String without eclosure
'''
if isinstance(fhir_obj, basestring):
return fhir_obj
else:
return repr(fhir_obj)
def __operandNULL(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: self.__repr(fhir_obj)}
def __operandEQ(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "eq" + self.__repr(fhir_obj)}
def __operandNE(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "ne" + self.__repr(fhir_obj)}
def __operandGT(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "gt" + self.__repr(fhir_obj)}
def __operandLT(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "lt" + self.__repr(fhir_obj)}
def __operandGE(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "ge" + self.__repr(fhir_obj)}
def __operandLE(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "le" + self.__repr(fhir_obj)}
def __operandSA(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "sa" + self.__repr(fhir_obj)}
def __operandEB(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "eb" + self.__repr(fhir_obj)}
def __operandAP(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param: "ap" + self.__repr(fhir_obj)}
def __operandContains(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":contains": self.__repr(fhir_obj)}
def __operandExact(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":exact": self.__repr(fhir_obj)}
def __operandText(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":text": self.__repr(fhir_obj)}
def __operandAbove(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":above": self.__repr(fhir_obj)}
def __operandBelow(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":below": self.__repr(fhir_obj)}
def __operandNot(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":not": self.__repr(fhir_obj)}
def __operandNotIn(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":not-in": self.__repr(fhir_obj)}
def __operandIn(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":in": self.__repr(fhir_obj)}
def __operandMissing(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {self.__param + ":missing": self.__repr(fhir_obj)}
def __sort_asc(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {"_sort:asc": self.__repr(fhir_obj)}
def __sort_desc(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
# la chiave del dictionary e' cio' che viene prima dell'uguale
# self.__repr(fhir_obj) e' il valore immesso da linea di comando
# self.param e' ad esempio name, cioe' l'attributo di ricerca di cui gestire il il modificatore e il valore
return {"_sort:desc": self.__repr(fhir_obj)}
def __count(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {"_count": self.__repr(fhir_obj)}
def __elements(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {"_elements": self.__repr(fhir_obj)}
def __contained(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {"_contained": self.__repr(fhir_obj)}
def __containedType(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {"_containedType": self.__repr(fhir_obj)}
def __summary_true(self):
'''
operand: Get fhir Object representation
'''
return {"_summary": "true"}
def __summary_text(self):
'''
operand: Get fhir Object representation
'''
return {"_summary": "text"}
def __summary_data(self):
'''
operand: Get fhir Object representation
'''
return {"_summary": "data"}
def __summary_count(self):
'''
operand: Get fhir Object representation
'''
return {"_summary": "count"}
def __summary_false(self):
'''
operand: Get fhir Object representation
'''
return {"_summary": "false"}
def __include(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {"_include": self.__repr(fhir_obj)}
def __revinclude(self, fhir_obj):
'''
operand: Get fhir Object representation
'''
return {"_revinclude": self.__repr(fhir_obj)}
def __init__(self, param, value):
'''
Oper Constructor
'''
self.__param = param
if (value == "number") or (value == "date") or (value == "quantity"):
str_op = [("Missing", self.__operandMissing), ("EQ", self.__operandEQ), ("NE", self.__operandNE), ("GT", self.__operandGT),
("LT", self.__operandGT), ("GE", self.__operandGE), ("LE", self.__operandLE),
("SA", self.__operandSA), ("EB", self.__operandEB), ("AP", self.__operandAP)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "reference"):
str_op = [("Missing", self.__operandMissing), ("EQ", self.__operandNULL)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "sort"):
# value e' il tipo del parametro di ricerca, che da' luogo alle operazioni possibili
# nel caso il tipo sia sort, sono disponibili i modificatori asc e desc
# che compaiono come tipo di operazioen possibile dopo l'attributo sort
str_op = [("asc", self.__sort_asc), ("desc", self.__sort_desc)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "count"):
str_op = [("EQ", self.__count)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "elements"):
str_op = [("EQ", self.__elements)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "contained"):
str_op = [("EQ", self.__contained)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "containedType"):
str_op = [("EQ", self.__containedType)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "summary"):
str_op = [("TrueOp", self.__summary_true), ("TextOp", self.__summary_text), ("DataOp", self.__summary_data),
("CountOp", self.__summary_count), ("FalseOp", self.__summary_false)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "include"):
str_op = [("EQ", self.__include)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "revinclude"):
str_op = [("EQ", self.__revinclude)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "token"):
str_op = [("EQ", self.__operandNULL), ("Text", self.__operandText), ("Not", self.__operandNot), ("Above", self.__operandAbove),
("Below", self.__operandBelow), ("In", self.__operandIn), ("Not_in", self.__operandNotIn), ("Missing", self.__operandMissing)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "string"):
str_op = [("EQ", self.__operandNULL), ("Contains", self.__operandContains), ("Exact", self.__operandExact), ("Text", self.__operandText),
("Missing", self.__operandMissing)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "text"):
str_op = [("In", self.__operandIn), ("Not_in", self.__operandNotIn),
("Missing", self.__operandMissing), ("Above", self.__operandAbove), ("Below", self.__operandBelow)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "uri"):
str_op = [("EQ", self.__operandNULL), ("Above", self.__operandAbove), ("Below", self.__operandBelow),
("Missing", self.__operandMissing)]
for par in str_op:
setattr(self, par[0], par[1])
id_ = __Oper("_id","string")
lastUpdated_ = __Oper("_lastUpdated","date")
tag_ = __Oper("_tag","token")
profile_ = __Oper("_profile","uri")
security_ = __Oper("_security","token")
text_ = __Oper("_text","string")
content_ = __Oper("_content","string")
list_ = __Oper("_list","string")
query_ = __Oper("_query","string")
sort_ = __Oper("_sort","sort")
count_ = __Oper("_count", "count")
elements_ = __Oper("_elements", "elements")
contained_ = __Oper("_contained", "contained")
containedType_ = __Oper("_containedType", "containedType")
include_ = __Oper("_include", "include")
revinclude_ = __Oper("_revinclude", "revinclude")
summary_ = __Oper("_summary", "summary")
def __init__(self):
'''
search_cond Constructor
'''
class Params(object):
'''
Params Object
Auxiliary Object to deal with queries
'''
def __norm(self, str_in):
#str_out = re.sub("_", "C_", str_in)
#str_out = re.sub("-", "_", str_out)
str_out = re.sub("-", "_", str_in)
return str_out
def __new__(self, *args, **kwargs):
'''
FHIR Period __new__
'''
if args[0] == None:
return None
else:
return object.__new__(self)
def __init__(self, params, resource):
'''
Params Constructor
'''
self.id_ = Oper("_id","string")
self.lastUpdated_ = Oper("_lastUpdated","date")
self.tag_ = Oper("_tag","token")
self.profile_ = Oper("_profile","uri")
self.security_ = Oper("_security","token")
self.text_ = Oper("_text","string")
self.content_ = Oper("_content","string")
self.list_ = Oper("_list","string")
self.query_ = Oper("_query","string")
for par in params:
setattr(self, self.__norm(par[0]), Oper(par[0], par[1], resource))
class Oper(object):
'''
Oper Object
Auxiliary Object to deal with queries
'''
def __repr(self, fhir_obj, full=None):
'''
Auxiliary function to represent String without eclosure
'''
if isinstance(fhir_obj, basestring):
return fhir_obj
else:
return repr(fhir_obj)
def __operandNULL(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: self.__repr(fhir_obj)}
else:
return {self.__param: self.__repr(fhir_obj)}
def __operandEQ(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "eq" + self.__repr(fhir_obj)}
else:
return {self.__param: "eq" + self.__repr(fhir_obj)}
def __operandNE(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "ne" + self.__repr(fhir_obj)}
else:
return {self.__param: "ne" + self.__repr(fhir_obj)}
def __operandGT(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "gt" + self.__repr(fhir_obj)}
else:
return {self.__param: "gt" + self.__repr(fhir_obj)}
def __operandLT(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "lt" + self.__repr(fhir_obj)}
else:
return {self.__param: "lt" + self.__repr(fhir_obj)}
def __operandGE(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "ge" + self.__repr(fhir_obj)}
else:
return {self.__param: "ge" + self.__repr(fhir_obj)}
def __operandLE(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "le" + self.__repr(fhir_obj)}
else:
return {self.__param: "le" + self.__repr(fhir_obj)}
def __operandSA(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "sa" + self.__repr(fhir_obj)}
else:
return {self.__param: "sa" + self.__repr(fhir_obj)}
def __operandEB(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "eb" + self.__repr(fhir_obj)}
else:
return {self.__param: "eb" + self.__repr(fhir_obj)}
def __operandAP(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param: "ap" + self.__repr(fhir_obj)}
else:
return {self.__param: "ap" + self.__repr(fhir_obj)}
def __operandContains(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":contains": self.__repr(fhir_obj)}
else:
return {self.__param + ":contains": self.__repr(fhir_obj)}
def __operandExact(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":exact": self.__repr(fhir_obj)}
else:
return {self.__param + ":exact": self.__repr(fhir_obj)}
def __operandText(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":text": self.__repr(fhir_obj)}
else:
return {self.__param + ":text": self.__repr(fhir_obj)}
def __operandAbove(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":above": self.__repr(fhir_obj)}
else:
return {self.__param + ":above": self.__repr(fhir_obj)}
def __operandBelow(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":below": self.__repr(fhir_obj)}
else:
return {self.__param + ":below": self.__repr(fhir_obj)}
def __operandNot(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":not": self.__repr(fhir_obj)}
else:
return {self.__param + ":not": self.__repr(fhir_obj)}
def __operandNotIn(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":not-in": self.__repr(fhir_obj)}
else:
return {self.__param + ":not-in": self.__repr(fhir_obj)}
def __operandIn(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":in": self.__repr(fhir_obj)}
else:
return {self.__param + ":in": self.__repr(fhir_obj)}
def __operandMissing(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
if full:
return {self.__res_param + ":missing": self.__repr(fhir_obj)}
else:
return {self.__param + ":missing": self.__repr(fhir_obj)}
def __sort_asc(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_sort:asc": self.__repr(fhir_obj)}
def __sort_desc(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
# la chiave del dictionary e' cio' che viene prima dell'uguale
# self.__repr(fhir_obj) e' il valore immesso da linea di comando
# self.param e' ad esempio name, cioe' l'attributo di ricerca di cui gestire il il modificatore e il valore
return {"_sort:desc": self.__repr(fhir_obj)}
def __count(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_count": self.__repr(fhir_obj)}
def __elements(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_elements": self.__repr(fhir_obj)}
def __contained(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_contained": self.__repr(fhir_obj)}
def __containedType(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_containedType": self.__repr(fhir_obj)}
def __summary(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_summary": self.__repr(fhir_obj)}
def __include(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_include": self.__repr(fhir_obj)}
def __revinclude(self, fhir_obj, full=None):
'''
operand: Get fhir Object representation
'''
return {"_revinclude": self.__repr(fhir_obj)}
def __init__(self, param, value, resource=None):
'''
Oper Constructor
'''
self.__param = param
if resource:
self.__res_param = resource + "." + param
else:
self.__res_param = param
if (value == "number") or (value == "date") or (value == "quantity"):
str_op = [("Missing", self.__operandMissing), ("EQ", self.__operandEQ), ("NE", self.__operandNE), ("GT", self.__operandGT),
("LT", self.__operandGT), ("GE", self.__operandGE), ("LE", self.__operandLE),
("SA", self.__operandSA), ("EB", self.__operandEB), ("AP", self.__operandAP)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "reference"):
str_op = [("Missing", self.__operandMissing), ("EQ", self.__operandNULL)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "sort"):
# value e' il tipo del parametro di ricerca, che da' luogo alle operazioni possibili
# nel caso il tipo sia sort, sono disponibili i modificatori asc e desc
# che compaiono come tipo di operazioen possibile dopo l'attributo sort
str_op = [("asc", self.__sort_asc), ("desc", self.__sort_desc)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "count"):
str_op = [("EQ", self.__count)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "elements"):
str_op = [("EQ", self.__elements)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "contained"):
str_op = [("EQ", self.__contained)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "containedType"):
str_op = [("EQ", self.__containedType)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "summary"):
str_op = [("EQ", self.__summary)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "include"):
str_op = [("EQ", self.__include)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "revinclude"):
str_op = [("EQ", self.__revinclude)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "token"):
str_op = [("EQ", self.__operandNULL), ("Text", self.__operandText), ("Not", self.__operandNot), ("Above", self.__operandAbove),
("Below", self.__operandBelow), ("In", self.__operandIn), ("Not_in", self.__operandNotIn), ("Missing", self.__operandMissing)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "string"):
str_op = [("EQ", self.__operandNULL), ("Contains", self.__operandContains), ("Exact", self.__operandExact), ("Text", self.__operandText),
("Missing", self.__operandMissing)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "text"):
str_op = [("In", self.__operandIn), ("Not_in", self.__operandNotIn),
("Missing", self.__operandMissing), ("Above", self.__operandAbove), ("Below", self.__operandBelow)]
for par in str_op:
setattr(self, par[0], par[1])
elif (value == "uri"):
str_op = [("EQ", self.__operandNULL), ("Above", self.__operandAbove), ("Below", self.__operandBelow),
("Missing", self.__operandMissing)]
for par in str_op:
setattr(self, par[0], par[1])
def __repr__(self):
return self.__param
|
import torch
import torch.nn.functional as F
from ...utils import box_utils, loss_utils
from .point_head_template import PointHeadTemplate
class VoxelSegHead(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__(model_cfg=model_cfg,
num_class=num_class)
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class
)
self.build_losses(self.model_cfg.LOSS_CONFIG)
def build_losses(self, losses_cfg):
self.add_module(
'cls_loss_func',
loss_utils.WeightedCrossEntropyLoss()
)
reg_loss_type = losses_cfg.get('LOSS_REG', None)
if reg_loss_type == 'smooth-l1':
self.reg_loss_func = F.smooth_l1_loss
elif reg_loss_type == 'l1':
self.reg_loss_func = F.l1_loss
elif reg_loss_type == 'WeightedSmoothL1Loss':
self.reg_loss_func = loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)
)
else:
self.reg_loss_func = F.smooth_l1_loss
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['voxel_seg_gt_labels'].view(-1).long()
point_cls_preds = self.forward_ret_dict['voxel_seg_pred_logits'].view(-1, self.num_class)
cls_count = point_cls_preds.new_zeros(self.num_class)
for i in range(self.num_class):
cls_count[i] = (point_cls_labels == i).float().sum()
positives = (point_cls_labels >= 0)
positive_labels = point_cls_labels[positives]
cls_weights = (1.0 * positives).float()
pos_normalizer = torch.zeros_like(positives.float())
pos_normalizer[positives] = cls_count[positive_labels]
cls_weights /= torch.clamp(pos_normalizer, min=20.0)
one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class)
one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)
cls_loss_src = self.cls_loss_func(point_cls_preds.unsqueeze(0),
one_hot_targets.unsqueeze(0),
weights=cls_weights).squeeze(0)
point_loss_cls = cls_loss_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict['voxel_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'voxel_seg_loss_cls': point_loss_cls.item(),
})
for i in range(self.num_class):
tb_dict.update({
f'point_seg_cls{i}_num': cls_count[i].item(),
})
return point_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def get_per_class_iou(self, batch_dict):
pred_dicts = self.get_evaluation_results(batch_dict)
ups, downs = pred_dicts[0]['ups'], pred_dicts[0]['downs']
for p in pred_dicts[1:]:
ups += p['ups']
downs += p['downs']
iou = ups / downs.clamp(min=1.0)
return iou
def get_evaluation_results(self, batch_dict):
pred_logits = self.forward_ret_dict['voxel_seg_pred_logits']
pred_scores = torch.sigmoid(pred_logits)
point_coords = batch_dict['point_coords']
pred_dicts = []
for i in range(batch_dict['batch_size']):
bs_mask = point_coords[:, 0] == i
pred_confidences, pred_labels = pred_scores[bs_mask].max(-1)
gt_labels = batch_dict['voxel_seg_labels'][bs_mask]
valid_mask = (gt_labels >= 0)
pred_labels = pred_labels[valid_mask]
gt_labels = gt_labels[valid_mask]
ups = pred_labels.new_zeros(self.num_class)
downs = pred_labels.new_zeros(self.num_class)
for cls in range(self.num_class):
pred_mask = pred_labels == cls
gt_mask = gt_labels == cls
ups[cls] = (pred_mask & gt_mask).sum()
downs[cls] = (pred_mask | gt_mask).sum()
record_dict = dict(ups=ups, downs=downs)
pred_dicts.append(record_dict)
return pred_dicts
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
point_features = batch_dict[self.point_feature_key]
point_pred_logits = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
'voxel_seg_pred_logits': point_pred_logits,
}
point_pred_scores = torch.sigmoid(point_pred_logits)
batch_dict['voxel_seg_pred_confidences'], batch_dict['voxel_seg_pred_labels'] = point_pred_scores.max(dim=-1)
if self.training:
ret_dict['voxel_seg_gt_labels'] = batch_dict['voxel_seg_labels']
batch_dict.update(ret_dict)
self.forward_ret_dict = ret_dict
return batch_dict
|
""" Tests for datamegh.util.object util. """
import pytest
from datamegh.util.object import (
linearize,
delinearize,
merge,
dict_to_list,
list_to_dict,
without_attr,
with_only,
)
def test_dict_to_list_returns_list_when_valid_arguments_is_dict():
"""
Test a dictionary converted to list.
"""
value = {
"foo": "bar",
"just": "test",
"hello": "world",
}
expected = [
{"name": "foo", "value": "bar"},
{"name": "just", "value": "test"},
{"name": "hello", "value": "world"},
]
assert dict_to_list(value) == expected
def test_dict_to_list_return_dict_with_custom_keys():
""" Cases of dict_to_list() with custom `keys` """
items = {"test101.txt": 23, "test201.txt": 24}
assert dict_to_list(items, "key", "size") == [
{"key": "test101.txt", "size": 23},
{"key": "test201.txt", "size": 24},
]
def test_dict_to_list_returns_empty_list_when_argument_is_empty_dict():
"""
Test an empty dictionary converted to empty list.
"""
assert dict_to_list({}) == []
def test_dict_to_list_raises_exception_when_argument_is_invalid():
"""
Test an invalid argument such as int to dict_to_list
"""
with pytest.raises(AttributeError) as ex:
dict_to_list(1)
assert (
ex.value.args[0]
== "Argument must be a dictionary, invalid argument received '1'."
)
def test_list_to_dict_returns_dict_when_valid_arguments_is_list():
""" Simple cases for list_to_dict() """
dict_items = {"mode": 16877, "size": 64, "name": "timesheet", "is_file": False}
list_items = [
{"name": "mode", "value": 16877},
{"name": "size", "value": 64},
{"name": "name", "value": "timesheet"},
{"name": "is_file", "value": False},
]
assert list_to_dict(list_items) == dict_items
def test_list_to_dict_return_dict_with_custom_keys():
""" Case of list_to_dict() with custom `keys` """
items = [{"key": "test101.txt", "size": 23}, {"key": "test201.txt", "size": 24}]
assert list_to_dict(items, "key", "size") == {"test101.txt": 23, "test201.txt": 24}
def test_list_to_dict_returns_empty_dict_when_argument_is_empty_list():
"""
Test an empty dictionary converted to empty list.
"""
assert list_to_dict([]) == {}
def test_list_to_dict_raises_exception_when_argument_is_invalid():
"""
Test an invalid argument such as int to list_to_dict
"""
with pytest.raises(AttributeError) as ex:
list_to_dict(1)
assert ex.value.args[0] == "Argument must be a list, invalid argument received '1'."
def test_linearize_1():
"""
Test a flat / linear dictionary
is returned as it is.
"""
value = {
"foo": "bar",
"just": "test",
"message": "Hello World!",
}
assert linearize(value) == value
def test_linearize_2():
"""
Test an already linerized dictionary
is returned as it is.
"""
value = {
"foo.bar": "Foo Bar",
"just.test": "Just Test",
"just.a.simple.message": "Hello World!",
"array[0]": "First",
"array[1]": "Second",
}
assert linearize(value) == value
def test_linearize_3():
"""
Test it linearizes the nested dictionary.
"""
value = {
"foo": {"bar": "Foo Bar"},
"just": {"test": "Just Test", "a": {"simple": {"message": "Hello World!"}}},
"array": ["First", "Second"],
}
expected = {
"foo.bar": "Foo Bar",
"just.test": "Just Test",
"just.a.simple.message": "Hello World!",
"array.0": "First",
"array.1": "Second",
}
assert linearize(value) == expected
def test_delinearize_1():
"""
Test a regular flat dictionary returned as it is.
"""
value = {
"foo": "bar",
"just": "test",
"message": "Hello World!",
}
assert delinearize(value) == value
def test_delinearize_2():
"""
Test it throws error if a
non-flat / nested dictionary is provided.
"""
value = {
"foo": {"bar": "Foo Bar"},
"just": {"test": "Just Test", "a": "b"},
"array": ["First", "Second"],
}
with pytest.raises(AssertionError) as ex:
delinearize(value)
assert ex.value.args[0] == "provided dict is not flat"
def test_delinearize_3():
"""
Test it delinearizes the linearized dictionary
"""
value = {
"foo.bar": "Foo Bar",
"just.test": "Just Test",
"just.a.simple.message": "Hello World!",
"array.0": "First",
"array.1": "Second",
}
expected = {
"foo": {"bar": "Foo Bar"},
"just": {"test": "Just Test", "a": {"simple": {"message": "Hello World!"}}},
"array": ["First", "Second"],
}
assert delinearize(value) == expected
def test_delinearize_4():
"""
Test it delinearizes an array.
"""
value = {
"array.0": "Test 1",
"array.1": "Test 2",
"array.2": "Test 3",
}
expected = {
"array": ["Test 1", "Test 2", "Test 3"],
}
assert delinearize(value) == expected
def test_merge_v0():
"""
Test for datamegh.util.merge()
Assert expected merge outcome when no conflicting keys are present.
"""
dict1 = {"key1": "value1", "key2": {"key3": "value3", "key4": "value4"}}
dict2 = {"keyA": "valueA", "key2": {"keyB": "valueB", "keyC": {"foo": "bar"}}}
expectedmerge = {
"key1": "value1",
"key2": {
"keyB": "valueB",
"key3": "value3",
"key4": "value4",
"keyC": {"foo": "bar"},
},
"keyA": "valueA",
}
merged = merge(dict1, dict2)
assert merged == expectedmerge
def test_merge_v1():
"""
Test for datamegh.util.merge()
Assert that second dictionary overrides conflicting keys during merge
"""
dict1 = {"key1": "value1", "key2": {"key3": "value3", "key4": "value4"}}
dict2 = {"key1": "valueA", "key2": {"keyB": "valueB"}}
expectedmerge = {
"key1": "valueA",
"key2": {"keyB": "valueB", "key3": "value3", "key4": "value4"},
}
merged = merge(dict1, dict2)
assert merged == expectedmerge
def test_merge_v2():
"""
Test merge() would show all the keys from the
initial dict, but also overwrite all the keys
found in both dict, even if the second value is
provided empty i.e None.
"""
merged = merge(
{
"foo": "bar",
"bar": "foo",
"baz": "Baz",
"test": {
"attr1": "Foo Bar",
"attr2": "Hello World!",
"attr3": ["value1", "value2"],
},
},
{"foo": None, "bar": "Foo", "test": {"attr2": None, "attr3": ["1", "2"]}},
)
assert merged == {
"foo": None,
"bar": "Foo",
"baz": "Baz",
"test": {"attr1": "Foo Bar", "attr2": None, "attr3": ["1", "2"]},
}
def test_with_only_returns_dictionary_when_both_src_and_attrs_are_valid_arguments():
"""
Test datamegh.util.with_only() would return a dictionary when a valid source
and attrs are provided to it
"""
assert with_only({"key1": "value1", "key2": "value2"}, ["key1"]) == {
"key1": "value1"
}
def test_with_only_returns_empty_dictionary_when_attrs_is_empty():
"""
Test datamegh.util.with_only() would return an empty dictionary when empty attrs
list is provided to it.
"""
assert with_only({"key1": "value1", "key2": "value2"}, []) == {}
def test_with_only_raises_attribute_error_when_first_argument_is_not_dictionary():
"""
Test datamegh.util.with_only() would return an Attribute error when src is an invalid
argument.
"""
with pytest.raises(AttributeError) as ex:
with_only(1, [])
assert (
ex.value.args[0]
== "First argument must be a dictionary, invalid argument received '1'."
)
def test_with_only_raises_attribute_error_when_second_argument_is_not_list():
"""
Test datamegh.util.with_only() would return an Attribute error when attrs is an invalid
argument.
"""
with pytest.raises(AttributeError) as ex:
with_only({}, 1)
assert (
ex.value.args[0]
== "Second argument must be a list, invalid argument received '1'."
)
def test_with_only_raises_type_error_when_first_argument_is_not_dictionary():
"""
Test datamegh.util.with_only() would return an Attribute error when no arguments
are provided to it.
"""
with pytest.raises(TypeError) as ex:
with_only()
assert (
ex.value.args[0]
== "with_only() missing 2 required positional arguments: 'src' and 'attrs'"
)
def test_without_attr_returns_list_without_the_attr():
"""
Test that without_attr() returns a list without the attr
"""
dict1 = {"Name": "Geeks", "Gender": "Male"}
dict2 = {"Gender": "Male"}
assert without_attr(dict1, ["Name"]) == dict2
def test_without_attr_return_list_without_the_attr_for_nested_dict():
"""
Test that without_attr() returns a list without the attr
for nested dictionaries
"""
dict1 = {
"Name": "Geeks",
"Gender": "Male",
"Age": "55",
"Address": {"Street": "Charkhal", "District": "Kathmandu",},
}
dict2 = {"Name": "Geeks", "Gender": "Male", "Address": {"Street": "Charkhal",}}
assert without_attr(dict1, ["District", "Age"]) == dict2
|
<filename>mmdet/models/backbones/ssd_vgg.py
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..necks import ssd_neck
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = [dict(type='Pretrained', checkpoint=pretrained)]
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
|
<gh_stars>0
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module provides implementations to load documentation information from
an identifier as it is specified in the `pydocmd.yml:generate` configuration
key. A loader basically takes care of loading the documentation content for
that name, but is not supposed to apply preprocessing.
"""
from __future__ import print_function
from .imp import import_object_with_scope
import collections
import inspect
import types
import uuid
from yapf.yapflib.yapf_api import FormatCode
function_types = (types.FunctionType, types.LambdaType, types.MethodType,
types.BuiltinFunctionType, types.BuiltinMethodType)
if hasattr(types, 'UnboundMethodType'):
function_types += (types.UnboundMethodType,)
NotSet = object()
def trim(docstring):
if not docstring:
return ''
lines = [x.rstrip() for x in docstring.split('\n')]
lines[0] = lines[0].lstrip()
indent = None
for i, line in enumerate(lines):
if i == 0 or not line: continue
new_line = line.lstrip()
delta = len(line) - len(new_line)
if indent is None:
indent = delta
elif delta > indent:
new_line = ' ' * (delta - indent) + new_line
lines[i] = new_line
return '\n'.join(lines)
class PythonLoader(object):
"""
Expects absolute identifiers to import with #import_object_with_scope().
"""
def __init__(self, config):
self.config = config
def load_section(self, section):
"""
Loads the contents of a #Section. The `section.identifier` is the name
of the object that we need to load.
# Arguments
section (Section): The section to load. Fill the `section.title` and
`section.content` values. Optionally, `section.loader_context` can
be filled with custom arbitrary data to reference at a later point.
"""
assert section.identifier is not None
obj, scope = import_object_with_scope(section.identifier)
if '.' in section.identifier:
default_title = section.identifier.rsplit('.', 1)[1]
else:
default_title = section.identifier
section.title = getattr(obj, '__name__', default_title)
section.content = trim(get_docstring(obj))
section.loader_context = {'obj': obj, 'scope': scope}
# Add the function signature in a code-block.
if callable(obj):
sig = get_function_signature(obj, scope if inspect.isclass(scope) else None)
section.content = '```python\n{}\n```\n'.format(sig.strip()) + section.content
def get_docstring(function):
if isinstance(function, (staticmethod, classmethod)):
return function.__func__.__doc__ or ''
elif hasattr(function, '__name__') or isinstance(function, property):
return function.__doc__ or ''
elif hasattr(function, '__call__'):
return function.__call__.__doc__ or ''
elif isinstance(function, type):
return function.__doc__ or ''
else:
return ''
def get_full_arg_spec(func):
if hasattr(inspect, 'getfullargspec'):
spec = inspect.getfullargspec(func)
return {k: getattr(spec, k) for k in dir(spec) if not k.startswith('_')}
spec = inspect.getargspec(func)
return {
'args': spec.args,
'varargs': spec.varargs,
'varkw': spec.keywords,
'defaults': spec.defaults,
'kwonlyargs': [],
'kwonlydefaults': [],
'annotations': {}
}
class Parameter(object):
POS = 'POS'
KWONLY = 'KWONLY'
VARARGS_POS = 'VARARGS_POS'
VARARGS_KW = 'VARARGS_KW'
Value = collections.namedtuple('Value', 'value')
def __init__(self, kind, name, annotation=None, default=None):
self.kind = kind
self.name = name
self.annotation = annotation
self.default = default
def __repr__(self):
return 'Parameter(kind={}, name={!r}, annotation={!r}, default={!r})'\
.format(self.kind, self.name, self.annotation, self.default)
def __str__(self):
result = self.name
if self.annotation:
result += ': ' + repr(self.annotation.value)
if self.default:
result += ' = ' + repr(self.default.value)
if self.kind == self.VARARGS_POS:
result = '*' + result
if self.kind == self.VARARGS_KW:
result = '**' + result
return result
def replace(self, name=NotSet, annotation=NotSet,
default=NotSet, kind=NotSet):
return Parameter(
self.kind if kind is NotSet else kind,
self.name if name is NotSet else name,
self.annotation if annotation is NotSet else annotation,
self.default if default is NotSet else default)
def get_paramaters_from_arg_spec(argspec, strip_self=False):
args = [Parameter(Parameter.POS, x) for x in argspec['args'][:]]
if argspec['defaults']:
offset = len(args) - len(argspec['defaults'])
for i, default in enumerate(argspec['defaults']):
args[i + offset] = args[i + offset].replace(default=Parameter.Value(default))
if argspec['varargs']:
args.append(Parameter(Parameter.VARARGS_POS, argspec['varargs']))
for name in argspec['kwonlyargs']:
args.append(Parameter(Parameter.KWONLY, name))
if argspec['varkw']:
args.append(Parameter(Parameter.VARARGS_KW, argspec['varkw']))
if strip_self and args and args[0].name == 'self':
args.pop(0)
for i, param in enumerate(args):
if param.name in argspec['annotations']:
annotation = argspec['annotations'][param.name]
args[i] = param.replace(annotation=Parameter.Value(annotation))
return args
def format_parameters_list(parameters):
found_kwonly = False
result = []
for param in parameters:
if param.kind == Parameter.KWONLY and not found_kwonly:
found_kwonly = True
result.append('*')
result.append(str(param))
return ', '.join(result)
def get_function_signature(
function,
owner_class=None,
show_module=False,
pretty=True,
):
# Get base name.
name_parts = []
if show_module:
name_parts.append(function.__module__)
if owner_class:
name_parts.append(owner_class.__name__)
if hasattr(function, '__name__'):
name_parts.append(function.__name__)
else:
name_parts.append(type(function).__name__)
name_parts.append('__call__')
function = function.__call__
name = '.'.join(name_parts)
try:
argspec = get_full_arg_spec(function)
except TypeError:
parameters = []
else:
parameters = get_paramaters_from_arg_spec(argspec, strip_self=owner_class)
# Prettify annotations.
class repr_str(str):
def __repr__(self):
return self
def prettify(val): # type: (Parameter.Value) -> Parameter.Value
if isinstance(val.value, type):
val = Parameter.Value(repr_str(val.value.__name__))
return val
for i, param in enumerate(parameters):
if param.annotation:
param = param.replace(annotation=prettify(param.annotation))
if param.default:
param = param.replace(default=prettify(param.default))
parameters[i] = param
if pretty:
# Replace annotations and defaults with placeholders that are valid syntax.
supplements = {}
counter = [0]
def _add_supplement(value):
annotation_id = '_{}'.format(counter[0])
annotation_id += '_' * (len(repr(value)) - len(annotation_id))
supplements[annotation_id] = value
counter[0] += 1
return repr_str(annotation_id)
for i, param in enumerate(parameters):
if param.annotation:
param = param.replace(
annotation=Parameter.Value(_add_supplement(param.annotation.value)))
if param.default:
param = param.replace(
default=Parameter.Value(_add_supplement(param.default.value)))
parameters[i] = param
# Use a placeholder in pretty mode as the generated *name* may also
# sometimes not be valid syntax (eg. if it includes the class name).
name_placeholder = '_PH_' + '_' * (len(name)-2)
sig = (name_placeholder if pretty else name) + '(' + format_parameters_list(parameters) + ')'
if pretty:
sig, _ = FormatCode('def ' + sig + ': pass', style_config='pep8')
sig = sig[4:].rpartition(':')[0]
# Replace the annotation and default placeholders with the actual values.
for placeholder, annotation in supplements.items():
sig = sig.replace(placeholder, repr(annotation))
# Replace the placeholder and fix indents.
sig = sig.replace(name_placeholder, name)
delta = len(name_placeholder) - len(name)
lines = sig.split('\n')
for i, line in enumerate(lines[1:], 1):
indent = len(line) - len(line.lstrip()) - delta - 4 # 4 for "def "
if indent <= 0 and line.strip() != ')':
indent = 4
lines[i] = ' ' * indent + line.lstrip()
sig = '\n'.join(lines)
return sig
|
from django.db import models
from django.contrib.auth.models import Group, User
from django.utils import timezone
# Create your models here.
# 個別のビデオを表現するクラス
class Media(models.Model):
name = models.TextField() # ビデオの名前
order = models.IntegerField(default= 0) # 表示順位
vid = models.CharField(max_length= 10) # Vimeo上でのビデオID
lecturer = models.CharField(max_length= 60) # 講師の名前
theme = models.CharField(max_length=40) # テーマ
thumb_url = models.CharField(max_length=128, default='', blank=True)# サムネールのURL (自動登録)
duration = models.IntegerField(default= 0) # ビデオの長さ(秒数)
enabled = models.BooleanField(default=True) # ビデオがあるかないか(再生できるかどうか)
viewCount = models.IntegerField(default= 0) # 視聴回数
likeCount = models.IntegerField(default= 0) # いいね回数
def __str__(self):
return "["+str(self.order).zfill(2)+"]"+self.name+":"+self.lecturer+"("+str(self.viewCount)+"view)"
# 講義一式を表現するクラス
class Course(models.Model):
name = models.TextField() # 講義名称
order = models.IntegerField(default= 0) # 表示順位
mlist = models.ManyToManyField(Media) # Media 一覧
group = models.ForeignKey(Group, on_delete=models.PROTECT) # 対応するグループ
def __str__(self):
return self.name+ ":MediaCount "+str(len(self.mlist.all()))
class Ticket(models.Model):
ticketName = models.CharField(max_length=40) # チケット名称
ticketGroup = models.CharField(max_length=20) # チケットが権限を付与する Group
ticketKeyword = models.CharField(max_length=20) # チケットのキーワード
ticketType = models.CharField(max_length=20,default='') # チケット種別(追加)
ticketUntil = models.DateTimeField(default=timezone.now) # チケットの期限
ticketCourse = models.ForeignKey(Course, default=1,blank=True, on_delete=models.PROTECT) # 見られるコース
ticketCount = models.IntegerField(default= 0) # チケット受付数(あくまで参考値)
def __str__(self):
return self.ticketName+":Key:"+self.ticketKeyword
# ビデオの視聴状況を使うためのデータ (再生が始まったタイミングで作成され, ページ遷移前に保存 )
# これが作成されたタイミングで Media の Viewカウントを追加
class MediaViewCount(models.Model):
# media = models.OneToOneField(Media, on_delete = models.PROTECT) # 対応ビデオ <- 失敗
media = models.ForeignKey(Media, default=0, on_delete=models.PROTECT) # 対応メディアこっちでやるべき
currentTime = models.IntegerField(default= 0) # どこまで視聴したか(最後の状況)
is_like = models.BooleanField(default=False) # Likeかどうか
totalViewSec = models.IntegerField(default=0) # 全視聴時間(推定)
view_speed = models.FloatField(default=1.0) # 視聴速度
viewstart_time = models.DateTimeField(default=timezone.now) #最初の視聴時間
lastview_time = models.DateTimeField(default=timezone.now) #最後の視聴時間
def __str__(self):
return self.media.name+":"+str(self.userprofile_set.all()[0])+"["+str(int(100*self.currentTime/self.media.duration))+"%/"+str(int(100*self.totalViewSec/self.media.duration))+"%] last view:"+str(self.lastview_time)[:19]
# ユーザ プロフィール
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE, related_name = "profile") # 対応するユーザ
affi = models.CharField(max_length=60) # 所属・部局
position = models.CharField(max_length = 40) # 役職・学年
zip = models.CharField(max_length = 10) # 郵便番号
city = models.CharField(max_length = 30) # 県・市
# Video の視聴カウント
viewcount = models.ManyToManyField(MediaViewCount) # 視聴したMediaViewCount 一覧
# チケット一覧
tickets = models.ManyToManyField(Ticket) # 所持中のチケット
regist_date = models.DateTimeField(default=timezone.now) #登録日
lastlogin_date = models.DateTimeField(default=timezone.now) #最終ログイン日
def __str__(self):
return self.user.username+":"+self.user.first_name+" "+self.user.last_name+"["+str(len(self.viewcount.all()))+"video] last visit:"+str(self.lastlogin_date)[:19]
# 特定のユーザからのチケット登録 OKならGroup登録してTrue
def setTicket(user,keyw):
if not user.is_authenticated:
return False
tickets = Ticket.objects.filter(ticketKeyword__exact=keyw)
print("Filter Ticket",keyw,tickets)
if len(tickets) == 0 :
# print("No ticket")
return False
else:
tk = tickets[0]
# ここで、 従来のグループか、新チケットかを確認
# 2種類のチケットの存在を意識すべき
if len(tk.ticketType)==0: # 旧チケット
gp = Group.objects.filter(name__exact=tk.ticketGroup)
# print("find Group!", gp)
if len(gp) > 0:
user.groups.add(gp[0])
tk.ticketCount += 1
tk.save()
user.save()
return True
elif tk.ticketType == "limit":
# 期限つきチケット(UP直接)
up = user.profile
up.tickets.add(tk)
tk.ticketCount += 1
tk.save()
up.save()
return True
return False
|
#!/usr/bin/python3
import praw
from colorama import Fore
import topSecretInfo
# Variables. Don't forget to change before publishing!!!
clientID = topSecretInfo.clientID
clientSecret = topSecretInfo.clientSecret
username = topSecretInfo.username
password = topSecretInfo.password
subName = topSecretInfo.subName
userAgent = topSecretInfo.userAgent
# Message Replies
muckReply = "Will you please shut the fuck up?"
goodBotReply = "Thanks!"
badBotReply = "F. Can you tell me what i did wrong?"
replyBody = "\n\n ^(I'm just a stupid bot (also a real person but idk)) ^(trying to fix some of this sub's problems. [Here is my source code](https://github.com/AwptiK/AntiMuckBot))"
# Stuff...
bannedWords = ["muck", "umck," "cmuk", "mcuk," "ucmk", "cumk", "kumc", "ukmc", "mkuc", "kmuc", "umkc", "mukc",
"mcku", "cmku", "kmcu", "mkcu", "ckmu", "kcmu", "kcum", "ckum", "ukcm", "kucm", "cukm", "uckm",
"not giving a shit"]
logfileName = "AlreadyRepliedToComments.txt"
characterWhitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ')
# Add the comment ID to a text file just in case it somehow wants to reply to it again. Which should NEVER happen
def AddCommentIDToList(comment_id):
file = open(logfileName, "a")
file.write(comment_id)
file.write("\n")
file.close()
def process_comment(_comment, depth=0):
"""Generate comment bodies and depths."""
yield _comment.body, depth
for _reply in _comment.replies:
# depth += 1
if depth == 2:
_reply.reply("Shut the fuck up")
print("Mucker has been shut up")
break
yield from process_comment(_reply, depth + 1)
def get_post_comments(post, more_limit=32):
"""Get a list of (body, depth) pairs for the comments in the post."""
_comments = []
post.comments.replace_more(limit=more_limit)
for top_level in post.comments:
_comments.extend(process_comment(top_level))
return _comments
# Constantly run this shit
while True:
# Try to shut a mucker up
try:
# TryToShutMucker()
# TryToShutChain()
reddit = praw.Reddit(
client_id=clientID,
client_secret=clientSecret, # Connect to Reddit #
password=password,
user_agent=userAgent,
username=username,
)
subreddit = reddit.subreddit(subName)
print(Fore.YELLOW + reddit.user.me().__str__())
print(Fore.LIGHTBLUE_EX + "Connected to subreddit of name: " + Fore.YELLOW + subreddit.display_name)
# for submission in subreddit.stream.submissions():
# for comment in submission.comments:
# comment.replies.replace_more(limit=4)
# if comment.replies[1] is not None:
# reply = comment.replies[1]
# reply.reply("Shut the fuck up please")
# print("Mucker has been shut up")
# break
# else:
# break
for submission in subreddit.stream.submissions():
comments = get_post_comments(post=submission, more_limit=10)
print(comments)
# for comment in comments:
# if comment == ('muck', 3):
# print("Mucker has been shut up")
# If the user presses Control and C, exit the program/stop the bot
except KeyboardInterrupt: # Ctrl + C exits
print(Fore.RED + "Exiting...")
break
# if error, tell error
except Exception as error: # What did you do idiot?
print(Fore.RED + "Well...fuck, something done fucked up")
print(Fore.RED + "Error is: " + error.__str__())
print(Fore.RED + "Trying to restart...")
|
<reponame>kjarkko/Heron<filename>application/chats/views.py
from application import app, db, login_required
from application.chats.models import Chat
from application.chats.forms import ChatForm, AddUserForm
from application.chatusers.models import ChatUser
from application.messages.models import Message
from application.messages.forms import MessageForm
from application.users.models import User
from flask import request, redirect, render_template, url_for, jsonify, abort
from flask_login import current_user, login_manager
@app.route("/chats/<chat_id>/management/add", methods=["POST"])
@login_required()
def chats_add_user(chat_id):
if not _admin_of(current_user.id, chat_id):
abort(403)
form = AddUserForm(request.form)
if form.validate():
user_id = User.get(form.name.data).id
if ChatUser.find(user_id, chat_id) is None:
ChatUser.create(user_id, chat_id)
return redirect(url_for('chats_management', chat_id=chat_id))
@app.route("/chats/new", methods=["GET", "POST"])
@login_required("ANY")
def chats_create():
form = ChatForm()
if form.validate_on_submit():
chat = Chat.create(form.name.data)
ChatUser.create(current_user.id, chat.id, True)
return redirect("/chats/" + str(chat.id))
return render_template("/chats/new.html", form=form)
@app.route("/chats/all")
@login_required()
def chats_all():
if current_user.is_admin():
return render_template(
"chats/all.html",
chats=Chat.all()
)
abort(403)
app.jinja_env.globals.update(chats_all=Chat.find_by_user)
@app.route("/chats/post/<chat_id>", methods=["POST"])
@login_required()
def chats_post(chat_id):
if not _member_of(current_user.id, chat_id):
abort(403)
form = MessageForm(request.form)
Message.create(
ChatUser.find(current_user.id, chat_id).id,
form.text.data
)
return redirect("/chats/" + chat_id)
@app.route("/chats/<chat_id>/", methods=["GET"])
@login_required()
def chats_view(chat_id):
if not _member_of(current_user.id, chat_id):
abort(403)
chat = Chat.get(chat_id)
if not chat:
return redirect(url_for('chats_all'))
return render_template(
"chats/view.html",
chat=chat,
messages=Message.find_all_in_chat(chat_id),
form=MessageForm()
)
@app.route("/chats/_m/")
@login_required()
def chats_get_messages():
chat_id = request.args.get('chat_id', 0, type=int)
if not _member_of(current_user.id, chat_id):
abort(403)
return jsonify(
messages=render_template(
'messages/messages.html',
messages=Message.find_all_in_chat(chat_id)
)
)
@app.route("/chats/<chat_id>/management")
@login_required()
def chats_management(chat_id):
if not _member_of(current_user.id, chat_id):
abort(403)
chat = Chat.get(chat_id)
users = User.find_members(chat_id)
return render_template(
"chats/management.html",
users=users,
chat=chat,
form=AddUserForm()
)
@app.route("/chats/delete/<chat_id>", methods=["POST"])
@login_required()
def chats_delete(chat_id):
if not current_user.is_admin():
abort(403)
Chat.delete(chat_id)
return redirect(url_for('chats_all'))
@app.route("/chats/<chat_id>/management/delete/<user_id>", methods=["POST"])
@login_required()
def chats_delete_user(chat_id, user_id):
if not _admin_of(current_user.id, chat_id):
abort(403)
cu = ChatUser.find(user_id, chat_id)
if cu is not None:
ChatUser.delete(cu)
return redirect(url_for('chats_management', chat_id=chat_id))
def _member_of(user_id, chat_id):
return ChatUser.find(user_id, chat_id) is not None
def _admin_of(user_id, chat_id):
cu = ChatUser.find(user_id, chat_id)
return cu.is_moderator()
|
<filename>histogram.py<gh_stars>1-10
import copy
import rangelist
from globalconstants import VERTLINE
from plainenglish import Labels
from indexclass import Index
from generalutilities import abridge, show_list
import nformat
from indexutilities import index_reduce
from rangelist import split_up_range
labels = Labels()
def formkeys(entry_temp):
""" combines format key and transpose keys """
return nformat.format_keys(transpose_keys(entry_temp))
class histogram:
def __init__(self,displayobject=None,
for_indexes=True,
db_connection_cursor=None,
notebookname=''):
self.histo_dict = {}
self.displayobject = displayobject
self.for_indexes=for_indexes
self.database_mode = False
self.db_connection_cursor = db_connection_cursor
self.notebookname = notebookname
def load_dictionary(self,entrydictionary=None,
flag="w",
histo_word_dict = None,
histo_key_dict = None,
histo_tag_dict = None,
projects=None,
func=lambda x,y:x,
truncatespecs=''): #idnta index date number text all
#flag 'w' for words
#flag 'k' for keys
#flag 't' for tags
self.histo_word_dict = histo_word_dict
self.histo_key_dict = histo_key_dict
self.histo_tag_dict = histo_tag_dict
print('TRUNCATING: ',truncatespecs)
def trunckey (x):
if '@' not in x:
return x
else:
for spec in truncatespecs:
if spec == 'p' and x.split('@')[0] in projects:
return x.split('@')[0]+'@'
elif spec == 'i' and '@_' in x: #For index sequences
return x.split('@_')[0]+'@'
elif spec == 'd' and '@#' in x: #Dor date sequences
return x.split('@#')[0]+'@' #For numeric
elif ((spec == 'n' and x.split('@')[1].replace('.','').isnumeric()) or
(spec in ['t'] and not x.split('@')[1].replace('.','').isnumeric()) or
(spec in ['a'])): #For others
return x.split('@')[0]+'@'
return x
if entrydictionary:
self.histo_dict = copy.deepcopy(entrydictionary)
else:
if 'w' in flag:
if not self.histo_word_dict or 'n' in flag:
self.displayobject.noteprint(('ATTENTION',
'Making temporary word dictionary!'))
value_tuple = (self.notebookname,)
self.db_connection_cursor.execute("SELECT word "
+"FROM word_to_indexes "
+"WHERE notebook=?;",
value_tuple)
fetched = self.db_connection_cursor.fetchall()
for word in fetched:
value_tuple = (self.notebookname,word[0],)
self.db_connection_cursor.execute("SELECT note_index "
+"FROM word_to_indexes "
+"WHERE notebook=? and word=?;",
value_tuple)
fetched = self.db_connection_cursor.fetchall()
if fetched:
indexes = {index[0].strip() for index in fetched}
self.histo_dict[word[0]] = indexes
self.displayobject.noteprint(('ATTENTION','Word dictionary finished!'))
self.histo_word_dict = copy.deepcopy(self.histo_dict)
else:
self.displayobject.noteprint(('Using word dictionary'))
self.histo_dict = self.histo_word_dict
if 'k' in flag:
if not self.histo_key_dict or 'n' in flag:
self.displayobject.noteprint(('ATTENTION',
'Making temporary key dictionary!'))
value_tuple = (self.notebookname,)
self.db_connection_cursor.execute("SELECT keyword"
+" FROM keys_to_indexes"
+" WHERE notebook=?;",
value_tuple)
fetched = self.db_connection_cursor.fetchall()
for key in func([x[0] for x in fetched],projects):
value_tuple = (self.notebookname,key,)
self.db_connection_cursor.execute("SELECT note_index "
+"FROM keys_to_indexes "
+"WHERE notebook=? and keyword=?;",
value_tuple)
fetched = self.db_connection_cursor.fetchall()
if fetched:
indexes = {index[0].strip() for index in fetched}
self.histo_dict[trunckey(key)] = indexes
self.displayobject.noteprint(('ATTENTION','Key dictionary finished!'))
self.histo_key_dict = copy.deepcopy(self.histo_dict)
else:
self.displayobject.noteprint(('Using Existing Key Dictionary'))
self.histo_dict = self.histo_key_dict
if 't' in flag:
if not self.histo_tag_dict or 'n' in flag:
self.displayobject.noteprint(('ATTENTION',
'Making temporary tag dictionary!'))
value_tuple = (self.notebookname,)
self.db_connection_cursor.execute("SELECT tag"
+" FROM tags_to_keys"
+" WHERE notebook=?;",value_tuple)
fetched = self.db_connection_cursor.fetchall()
for tag in fetched:
value_tuple = (self.notebookname,tag[0],)
self.db_connection_cursor.execute("SELECT keyword "
+"FROM tags_to_keys"
+" WHERE notebook=? and tag=?;",
value_tuple)
fetched = self.db_connection_cursor.fetchall()
if fetched:
keys = {key[0].strip() for key in fetched}
self.histo_dict[tag[0]] = keys
self.displayobject.noteprint(('ATTENTION','Tag dictionary finished!'))
self.histo_tag_dict = copy.deepcopy(self.histo_dict)
else:
self.displayobject.noteprint(('Using existing tag dctionary'))
self.histo_dict = self.histo_tag_dict
return self.histo_word_dict, self.histo_key_dict, self.histo_tag_dict
def contract(self,entrylist):
if entrylist:
entryset = set(entrylist)
for key in list(self.histo_dict.keys()):
self.histo_dict[key] = self.histo_dict[key].intersection(entryset)
if not self.histo_dict[key]:
del self.histo_dict[key]
def implode (self,entrylist):
for key in list(self.histo_dict):
if key not in entrylist:
del self.histo_dict[key]
def show (self):
def dict_format(x_temp):
"""formats output of the list of search results"""
if self.for_indexes:
shown_indexes = rangelist.range_find([Index(a_temp)
for a_temp in x_temp[1]],
reduce=True)
else:
shown_indexes = formkeys({abridge(index_reduce(x_temp),
maxlength=20)
for x_temp in x_temp[1]})
if len(shown_indexes) < 20:
return (abridge(x_temp[0],maxlength=20)
+VERTLINE
+shown_indexes)
returnlist = []
sp_temp = split_up_range(shown_indexes,seg_length=3)
returnlist.append(abridge(x_temp[0],maxlength=20)
+VERTLINE+sp_temp[0])
for s_temp in sp_temp[1:]:
returnlist.append(VERTLINE+s_temp)
return returnlist
list_to_show = []
for key in sorted(self.histo_dict):
list_to_show.append((key,self.histo_dict[key]))
show_list(list_to_show,
labels.CONCORDANCE,
0, 30,
func=dict_format,
present=True,
display=self.displayobject)
|
# pylint: disable=C0330,W1401
from urllib.parse import urljoin
from requests import Request
import requests
from .server import Settings
from .utils import purge_document
class Client:
"""Allows to easily perform read and write operations against a remote
RESTful web service which is powered by the Eve_ REST framework.
This class wraps the Requests_ library, specifically the
:class:`requests.Session` class which, amongst other features, leverages
:obj:`urllib3`’s connection pooling. So if you’re making several requests to the
same host, the underlying TCP connection will be reused, which can result
in a significant performance increase.
Basic Usage::
>>> from eve_requests import Client, Settings
>>> settings = Settings('https://myapi.com/)
>>> client = Client(settings)
>>> client.post('contacts', {"name": "<NAME>"}, auth=('user', 'pw'))
<Response [201]>
.. _Eve:
http://python-eve.org/
.. _Requests:
http://python-requests.org/
"""
def __init__(self, settings=None):
#: Instance of :class:`requests.Session` used internally to perform
#: HTTP requests.
self.session = requests.Session()
if settings:
#: Remote service settings. Make sure these are properly set before
#: invoking any of the read and write methods.
#: Defaults to a new instance of :class:`Settings`.
self.settings = settings
else:
self.settings = Settings()
def post(self, endpoint, payload, **kwargs):
"""Sends a POST request.
:param endpoint: Target endpoint relative to the base URL of the
remote service.
:param payload: JSON data to send as the body of the request.
:param \*\*kwargs: Optional arguments that :obj:`requests.Request`
takes.
:returns: The :class:`requests.Response` object, which contains a
server’s response to an HTTP request.
:raises ValueError: If :any:`settings` is not set.
"""
req = self._build_post_request(endpoint, payload, **kwargs)
return self._prepare_and_send_request(req)
def put(self, endpoint, payload, unique_id=None, etag=None, **kwargs):
"""Sends a PUT request.
:param endpoint: Target endpoint relative to the base URL of the
remote service.
:param payload: JSON data to send as the body of the request. If the
JSON contains any :any:`Settings.meta_fields`, these will be
stripped before the request is sent over to the remote service.
:param unique_id: Optional id of the document being replaced on the
remote service. If omitted, the id will be inferred from the
payload.
:param etag: Optional document ETag. If omitted, the ETag will be
inferred from the payload.
:param \*\*kwargs: Optional arguments that :obj:`requests.Request`
takes.
:returns: The :class:`requests.Response` object, which contains a
server’s response to an HTTP request.
:raises ValueError: If the unique id is missing.
:raises ValueError: If ETag is missing and :any:`Settings.if_match` is
enabled.
:raises ValueError: If :any:`settings` is not set.
"""
req = self._build_put_request(endpoint, payload, unique_id, etag, **kwargs)
return self._prepare_and_send_request(req)
def patch(self, endpoint, payload, unique_id=None, etag=None, **kwargs):
"""Sends a PATCH request.
:param endpoint: Target endpoint relative to the base URL of the
remote service.
:param payload: JSON data to send as the body of the request. If the
JSON contains any :any:`Settings.meta_fields`, these will be
stripped before the request is sent over to the remote service.
:param unique_id: Optional id of the document being updated on the
remote service. If omitted, the id will be inferred from the
payload.
:param etag: Optional document ETag. If omitted, the ETag will be
inferred from the payload.
:param \*\*kwargs: Optional arguments that :obj:`requests.Request`
takes.
:returns: The :class:`requests.Response` object, which contains a
server’s response to an HTTP request.
:raises ValueError: If the unique id is missing.
:raises ValueError: If ETag is missing and :any:`Settings.if_match` is
enabled.
:raises ValueError: If :any:`settings` is not set.
"""
req = self._build_patch_request(endpoint, payload, unique_id, etag, **kwargs)
return self._prepare_and_send_request(req)
def delete(self, endpoint, etag, unique_id, payload=None, **kwargs):
"""Sends a DELETE request.
:param endpoint: Target endpoint relative to the base URL of the
remote service.
:param unique_id: Optional id of the document being deleted on the
remote service. If omitted, the id will be inferred from the
payload.
:param etag: Optional document ETag. If omitted, the ETag will be
inferred from the payload.
:param payload: Optional JSON data used to infer document id and ETag
when they are not provided as arguments.
:param \*\*kwargs: Optional arguments that :obj:`requests.Request`
takes.
:returns: The :class:`requests.Response` object, which contains a
server’s response to an HTTP request.
:raises ValueError: If the unique id is missing.
:raises ValueError: If ETag is missing and :any:`Settings.if_match`
is enabled.
:raises ValueError: If :any:`settings` is not set.
"""
req = self._build_delete_request(endpoint, payload, etag, unique_id, **kwargs)
return self._prepare_and_send_request(req)
def get(self, endpoint, etag=None, unique_id=None, payload=None, **kwargs):
"""Sends a GET request.
:param endpoint: Target endpoint relative to the base URL of the
remote service.
:param etag: Optional document ETag. If present, a `If-None-Match`
header with the etag will be included with the request.
:param unique_id: Optional id of the document being retrieved from the
remote service.
:param payload: Optional JSON data used to infer document id and ETag
when they are not provided as arguments.
:param \*\*kwargs: Optional arguments that :obj:`requests.Request`
takes.
:returns: The :class:`requests.Response` object, which contains a
server’s response to an HTTP request.
:raises ValueError: If ETag is missing and :any:`Settings.if_match` is
enabled.
:raises ValueError: If :any:`settings` is not set.
"""
req = self._build_get_request(endpoint, etag, unique_id, payload, **kwargs)
return self._prepare_and_send_request(req)
def _build_post_request(self, endpoint, payload, **kwargs):
self.__validate()
url = self._resolve_url(endpoint)
return Client.__build_request("POST", url, json=payload, **kwargs)
def _build_put_request(
self, endpoint, payload, unique_id=None, etag=None, **kwargs
):
self.__validate()
url = self._resolve_url(endpoint, payload, unique_id, id_required=True)
headers = self._resolve_ifmatch_header(payload, etag)
json = purge_document(payload)
return Client.__build_request("PUT", url, json=json, headers=headers, **kwargs)
def _build_patch_request(
self, endpoint, payload, unique_id=None, etag=None, **kwargs
):
self.__validate()
url = self._resolve_url(endpoint, payload, unique_id, id_required=True)
headers = self._resolve_ifmatch_header(payload, etag)
json = purge_document(payload)
return Client.__build_request(
"PATCH", url, json=json, headers=headers, **kwargs
)
def _build_delete_request(
self, endpoint, payload=None, unique_id=None, etag=None, **kwargs
):
self.__validate()
url = self._resolve_url(
endpoint, payload=payload, unique_id=unique_id, id_required=True
)
headers = self._resolve_ifmatch_header(payload=payload, etag=etag)
return Client.__build_request("DELETE", url, headers=headers, **kwargs)
def _build_get_request(
self, endpoint, etag=None, unique_id=None, payload=None, **kwargs
):
self.__validate()
url = self._resolve_url(endpoint, payload=payload, unique_id=unique_id)
if payload or etag:
headers = self._resolve_if_none_match_header(etag=etag, payload=payload)
else:
headers = None
return Client.__build_request("GET", url, headers=headers, **kwargs)
def _resolve_url(self, endpoint, payload=None, unique_id=None, id_required=False):
if unique_id:
endpoint = "/".join([endpoint, unique_id])
elif payload and self.settings.id_field in payload:
endpoint = "/".join([endpoint, payload[self.settings.id_field]])
else:
if id_required:
raise ValueError("Unique id is required")
return urljoin(self.settings.base_url, endpoint)
def _resolve_if_none_match_header(self, payload=None, etag=None):
return_value = self._resolve_etag(payload, etag)
return {"If-None-Match": return_value} if return_value else None
def _resolve_ifmatch_header(self, payload=None, etag=None):
return_value = self._resolve_etag(payload, etag)
return {"If-Match": return_value} if return_value else None
def _resolve_etag(self, payload=None, etag=None):
if not self.settings.if_match:
return None
if etag:
return etag
if payload and self.settings.etag in payload:
return payload[self.settings.etag]
raise ValueError("ETag is required")
def _prepare_and_send_request(self, request):
request = self.session.prepare_request(request)
return self.session.send(request)
def __validate(self):
if not self.settings:
raise ValueError("Settings are required")
@classmethod
def __build_request(cls, method, url, json=None, headers=None, **kwargs):
return Request(method, url, json=json, headers=headers, **kwargs)
|
level3_labels = {
"n/a": "No data",
111: "Cultivated Terrestrial Vegetation",
112: "Natural Terrestrial Vegetation",
123: "Cultivated Aquatic Vegetation",
124: "Natural Aquatic Vegetation",
215: "Artificial Surface",
216: "Natural Bare Surface",
220: "Water"
}
level4_labels = {
"n/a": "No data",
1: 'Cultivated Terrestrial Vegetated',
3: 'Cultivated Terrestrial Vegetated: Herbaceous',
4: 'Cultivated Terrestrial Vegetated: Closed (> 65 %)',
5: 'Cultivated Terrestrial Vegetated: Open (40 to 65 %)',
6: 'Cultivated Terrestrial Vegetated: Open (15 to 40 %)',
7: 'Cultivated Terrestrial Vegetated: Sparse (4 to 15 %)',
8: 'Cultivated Terrestrial Vegetated: Scattered (1 to 4 %)',
14: 'Cultivated Terrestrial Vegetated: Herbaceous Closed (> 65 %)',
15: 'Cultivated Terrestrial Vegetated: Herbaceous Open (40 to 65 %)',
16: 'Cultivated Terrestrial Vegetated: Herbaceous Open (15 to 40 %)',
17: 'Cultivated Terrestrial Vegetated: Herbaceous Sparse (4 to 15 %)',
18: 'Cultivated Terrestrial Vegetated: Herbaceous Scattered (1 to 4 %)',
19: 'Natural Terrestrial Vegetated',
20: 'Natural Terrestrial Vegetated: Woody',
21: 'Natural Terrestrial Vegetated: Herbaceous',
22: 'Natural Terrestrial Vegetated: Closed (> 65 %)',
23: 'Natural Terrestrial Vegetated: Open (40 to 65 %)',
24: 'Natural Terrestrial Vegetated: Open (15 to 40 %)',
25: 'Natural Terrestrial Vegetated: Sparse (4 to 15 %)',
26: 'Natural Terrestrial Vegetated: Scattered (1 to 4 %)',
27: 'Natural Terrestrial Vegetated: Woody Closed (> 65 %)',
28: 'Natural Terrestrial Vegetated: Woody Open (40 to 65 %)',
29: 'Natural Terrestrial Vegetated: Woody Open (15 to 40 %)',
30: 'Natural Terrestrial Vegetated: Woody Sparse (4 to 15 %)',
31: 'Natural Terrestrial Vegetated: Woody Scattered (1 to 4 %)',
32: 'Natural Terrestrial Vegetated: Herbaceous Closed (> 65 %)',
33: 'Natural Terrestrial Vegetated: Herbaceous Open (40 to 65 %)',
34: 'Natural Terrestrial Vegetated: Herbaceous Open (15 to 40 %)',
35: 'Natural Terrestrial Vegetated: Herbaceous Sparse (4 to 15 %)',
36: 'Natural Terrestrial Vegetated: Herbaceous Scattered (1 to 4 %)',
55: 'Natural Aquatic Vegetated',
56: 'Natural Aquatic Vegetated: Woody',
57: 'Natural Aquatic Vegetated: Herbaceous',
58: 'Natural Aquatic Vegetated: Closed (> 65 %)',
59: 'Natural Aquatic Vegetated: Open (40 to 65 %)',
60: 'Natural Aquatic Vegetated: Open (15 to 40 %)',
61: 'Natural Aquatic Vegetated: Sparse (4 to 15 %)',
62: 'Natural Aquatic Vegetated: Scattered (1 to 4 %)',
63: 'Natural Aquatic Vegetated: Woody Closed (> 65 %)',
64: 'Natural Aquatic Vegetated: Woody Closed (> 65 %) Water > 3 months (semi-) permanent',
65: 'Natural Aquatic Vegetated: Woody Closed (> 65 %) Water < 3 months (temporary or seasonal)',
66: 'Natural Aquatic Vegetated: Woody Open (40 to 65 %)',
67: 'Natural Aquatic Vegetated: Woody Open (40 to 65 %) Water > 3 months (semi-) permanent',
68: 'Natural Aquatic Vegetated: Woody Open (40 to 65 %) Water < 3 months (temporary or seasonal)',
69: 'Natural Aquatic Vegetated: Woody Open (15 to 40 %)',
70: 'Natural Aquatic Vegetated: Woody Open (15 to 40 %) Water > 3 months (semi-) permanent',
71: 'Natural Aquatic Vegetated: Woody Open (15 to 40 %) Water < 3 months (temporary or seasonal)',
72: 'Natural Aquatic Vegetated: Woody Sparse (4 to 15 %)',
73: 'Natural Aquatic Vegetated: Woody Sparse (4 to 15 %) Water > 3 months (semi-) permanent',
74: 'Natural Aquatic Vegetated: Woody Sparse (4 to 15 %) Water < 3 months (temporary or seasonal)',
75: 'Natural Aquatic Vegetated: Woody Scattered (1 to 4 %)',
76: 'Natural Aquatic Vegetated: Woody Scattered (1 to 4 %) Water > 3 months (semi-) permanent',
77: 'Natural Aquatic Vegetated: Woody Scattered (1 to 4 %) Water < 3 months (temporary or seasonal)',
78: 'Natural Aquatic Vegetated: Herbaceous Closed (> 65 %)',
79: 'Natural Aquatic Vegetated: Herbaceous Closed (> 65 %) Water > 3 months (semi-) permanent',
80: 'Natural Aquatic Vegetated: Herbaceous Closed (> 65 %) Water < 3 months (temporary or seasonal)',
81: 'Natural Aquatic Vegetated: Herbaceous Open (40 to 65 %)',
82: 'Natural Aquatic Vegetated: Herbaceous Open (40 to 65 %) Water > 3 months (semi-) permanent',
83: 'Natural Aquatic Vegetated: Herbaceous Open (40 to 65 %) Water < 3 months (temporary or seasonal)',
84: 'Natural Aquatic Vegetated: Herbaceous Open (15 to 40 %)',
85: 'Natural Aquatic Vegetated: Herbaceous Open (15 to 40 %) Water > 3 months (semi-) permanent',
86: 'Natural Aquatic Vegetated: Herbaceous Open (15 to 40 %) Water < 3 months (temporary or seasonal)',
87: 'Natural Aquatic Vegetated: Herbaceous Sparse (4 to 15 %)',
88: 'Natural Aquatic Vegetated: Herbaceous Sparse (4 to 15 %) Water > 3 months (semi-) permanent',
89: 'Natural Aquatic Vegetated: Herbaceous Sparse (4 to 15 %) Water < 3 months (temporary or seasonal)',
90: 'Natural Aquatic Vegetated: Herbaceous Scattered (1 to 4 %)',
91: 'Natural Aquatic Vegetated: Herbaceous Scattered (1 to 4 %) Water > 3 months (semi-) permanent',
92: 'Natural Aquatic Vegetated: Herbaceous Scattered (1 to 4 %) Water < 3 months (temporary or seasonal)',
93: 'Artificial Surface',
94: 'Natural Surface',
95: 'Natural Surface: Sparsely vegetated',
96: 'Natural Surface: Very sparsely vegetated',
97: 'Natural Surface: Bare areas, unvegetated',
98: 'Water',
99: 'Water',
100: 'Water: Tidal area',
101: 'Water: Perennial (> 9 months)',
102: 'Water: Non-perennial (7 to 9 months)',
103: 'Water: Non-perennial (4 to 6 months)',
104: 'Water: Non-perennial (1 to 3 months)',
}
lifeform_labels = {
"n/a": "No data",
1: "Woody",
2: "Herbaceous",
}
cover_labels = {
"n/a": "No data",
10: "Closed (> 65 %)",
12: "Open (40 to 65 %)",
13: "Open (15 to 40 %)",
15: "Sparse (4 to 15 %)",
16: "Scattered (1 to 4 %)",
}
water_seasonality_labels = {
"n/a": "No data",
1: "Water > 3 months (semi-) permenant",
2: "Water < 3 months (temporary or seasonal)",
}
water_persistence_labels = {
"n/a": "No data",
1: "Perennial (> 9 months)",
7: "Non-perennial (7 to 9 months)",
8: "Non-perennial (4 to 6 months)",
9: "Non-perennial (1 to 3 months)",
}
bare_grad_labels = {
"n/a": "No data",
10: "Sparsely vegetated",
12: "Very sparsely vegetated",
15: "Bare areas, unvegetated",
}
def class_labels(data):
return {
"level3_label": level3_labels[data["level3"]],
"level4_label": level4_labels[data["level4"]],
}
def env_descriptor_labels(data):
return {
"lifeform_label": lifeform_labels[data["lifeform"]],
"canopyco_label": cover_labels[data["canopy_cover"]],
"watersea_label": water_seasonality_labels[data["water_seasonality"]],
"waterper_label": water_persistence_labels[data["water_persistence"]],
"baregrad_label": bare_grad_labels[data["bare_gradation"]],
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial, reduce
from models.attention import PAM_Module, CAM_Module
import pdb
def norm(planes, mode='bn', groups=16):
if mode == 'bn':
return nn.BatchNorm3d(planes, momentum=0.95, eps=1e-03)
elif mode == 'gn':
return nn.GroupNorm(groups, planes)
else:
return nn.Sequential()
class CBR(nn.Module):
def __init__(self, nIn, nOut, kSize=(3, 3, 3), stride=1, dilation=1):
super(CBR, self).__init__()
padding = (
int((kSize[0] - 1) / 2) * dilation, int((kSize[1] - 1) / 2) * dilation, int((kSize[2] - 1) / 2) * dilation)
self.conv = nn.Conv3d(nIn, nOut, kSize, stride=stride, padding=padding,
bias=False, dilation=dilation)
self.bn = norm(nOut)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
output = self.act(output)
return output
class CB(nn.Module):
def __init__(self, nIn, nOut, kSize=(3, 3, 3), stride=1, dilation=1):
super(CB, self).__init__()
padding = (
int((kSize[0] - 1) / 2) * dilation, int((kSize[1] - 1) / 2) * dilation, int((kSize[2] - 1) / 2) * dilation)
self.conv = nn.Conv3d(nIn, nOut, kSize, stride=stride, padding=padding,
bias=False, dilation=dilation)
self.bn = norm(nOut)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return output
class C(nn.Module):
def __init__(self, nIn, nOut, kSize=(3, 3, 3), stride=1, dilation=1):
super(C, self).__init__()
padding = (
int((kSize[0] - 1) / 2) * dilation, int((kSize[1] - 1) / 2) * dilation, int((kSize[2] - 1) / 2) * dilation)
self.conv = nn.Conv3d(nIn, nOut, kSize, stride=stride, padding=padding,
bias=False, dilation=dilation)
def forward(self, input):
return self.conv(input)
class BR(nn.Module):
def __init__(self, nIn):
super(BR, self).__init__()
self.bn = norm(nIn)
self.act = nn.ReLU(True)
def forward(self, input):
return self.act(self.bn(input))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, nIn, nOut, kernel_size=(3, 3, 3), prob=0.03, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.c1 = CBR(nIn, nOut, kernel_size, stride, dilation)
self.c2 = CB(nOut, nOut, kernel_size, 1, dilation)
self.act = nn.ReLU(True)
self.downsample = None
if nIn != nOut or stride != 1:
self.downsample = nn.Sequential(
nn.Conv3d(nIn, nOut, kernel_size=1, stride=stride, bias=False),
norm(nOut)
)
def forward(self, input):
output = self.c1(input)
output = self.c2(output)
if self.downsample is not None:
input = self.downsample(input)
output = output + input
output = self.act(output)
return output
class DownSample(nn.Module):
def __init__(self, nIn, nOut, pool='max'):
super(DownSample, self).__init__()
if pool == 'conv':
self.pool = CBR(nIn, nOut, 3, 2)
elif pool == 'max':
pool = nn.MaxPool3d(kernel_size=2, stride=2)
self.pool = pool
if nIn != nOut:
self.pool = nn.Sequential(pool, CBR(nIn, nOut, 1, 1))
def forward(self, input):
output = self.pool(input)
return output
class Upsample(nn.Module):
def __init__(self, nIn, nOut):
super(Upsample, self).__init__()
self.conv = CBR(nIn, nOut)
def forward(self, x):
p = F.upsample(x, scale_factor=2, mode='trilinear')
return self.conv(p)
class UNet(nn.Module):
def __init__(self, segClasses=2, k=64):
super(UNet, self).__init__()
# Encode
self.layer1 = CBR(nIn=1, nOut=k, kSize=(3, 3, 3), stride=1, dilation=1)
self.pool1 = DownSample(nIn=k, nOut=k, pool='max')
self.layer2 = CBR(nIn=k, nOut=2 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.pool2 = DownSample(nIn=2 * k, nOut=2 * k, pool='max')
self.layer3_1 = CBR(nIn=2 * k, nOut=4 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.layer3_2 = CBR(nIn=4 * k, nOut=4 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.pool3 = DownSample(nIn=4 * k, nOut=4 * k, pool='max')
self.layer4_1 = CBR(nIn=4 * k, nOut=8 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.layer4_2 = CBR(nIn=8 * k, nOut=8 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.pool4 = DownSample(nIn=8 * k, nOut=8 * k, pool='max')
self.layer5_1 = CBR(nIn=8 * k, nOut=8 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.layer5_2 = CBR(nIn=8 * k, nOut=8 * k, kSize=(3, 3, 3), stride=1, dilation=1)
# Decode
self.class1_1 = nn.Sequential(
nn.ConvTranspose3d(8 * k, 8 * k, kernel_size=(3, 3, 3), stride=2, padding=1, output_padding=1),
norm(8 * k),
nn.ReLU())
self.class1_2 = CBR(8 * k + 8 * k, 4 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.class2_1 = nn.Sequential(
nn.ConvTranspose3d(4 * k, 4 * k, kernel_size=(3, 3, 3), stride=2, padding=1, output_padding=1),
norm(4 * k),
nn.ReLU())
self.class2_2 = CBR(4 * k + 4 * k, 2 * k, kSize=(3, 3, 3), stride=1, dilation=1)
self.class3_1 = nn.Sequential(
nn.ConvTranspose3d(2 * k, 2 * k, kernel_size=(3, 3, 3), stride=2, padding=1, output_padding=1),
norm(2 * k),
nn.ReLU())
self.class3_2 = CBR(2 * k + 2 * k, k, kSize=(3, 3, 3), stride=1, dilation=1)
self.class4_1 = nn.Sequential(
nn.ConvTranspose3d(k, k, kernel_size=(3, 3, 3), stride=2, padding=1, output_padding=1),
norm(k),
nn.ReLU())
self.class4_2 = CBR(k + k, int(k / 2), kSize=(3, 3, 3), stride=1, dilation=1)
self.class5 = nn.Conv3d(int(k / 2), segClasses + 1, kernel_size=1, bias=False)
def forward(self, x):
output1_1 = self.layer1(x)
output1_2 = self.pool1(output1_1)
output2_1 = self.layer2(output1_2)
output2_2 = self.pool2(output2_1)
output3_1 = self.layer3_2(self.layer3_1(output2_2))
output3_2 = self.pool3(output3_1)
output4_1 = self.layer4_2(self.layer4_1(output3_2))
output4_2 = self.pool4(output4_1)
output5 = self.layer5_2(self.layer5_1(output4_2))
deconv1_1 = self.class1_1(output5)
deconv1_2 = self.class1_2(torch.cat([deconv1_1, output4_1], 1))
deconv2_1 = self.class2_1(deconv1_2)
deconv2_2 = self.class2_2(torch.cat([deconv2_1, output3_1], 1))
deconv3_1 = self.class3_1(deconv2_2)
deconv3_2 = self.class3_2(torch.cat([deconv3_1, output2_1], 1))
deconv4_1 = self.class4_1(deconv3_2)
deconv4_2 = self.class4_2(torch.cat([deconv4_1, output1_1], 1))
output = self.class5(deconv4_2)
return output
class DANetHead(nn.Module):
def __init__(self, in_channels, out_channels):
super(DANetHead, self).__init__()
inter_channels = in_channels // 4
self.conv5a = nn.Sequential(nn.Conv3d(in_channels, inter_channels, 3, padding=1, bias=False),
norm(inter_channels),
nn.ReLU())
self.conv5c = nn.Sequential(nn.Conv3d(in_channels, inter_channels, 3, padding=1, bias=False),
norm(inter_channels),
nn.ReLU())
self.sa = PAM_Module(inter_channels)
self.sc = CAM_Module(inter_channels)
self.conv51 = nn.Sequential(nn.Conv3d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm(inter_channels),
nn.ReLU())
self.conv52 = nn.Sequential(nn.Conv3d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm(inter_channels),
nn.ReLU())
self.conv6 = nn.Sequential(nn.Dropout3d(0.05, False), nn.Conv3d(inter_channels, out_channels, 1),
nn.ReLU())
self.conv7 = nn.Sequential(nn.Dropout3d(0.05, False), nn.Conv3d(inter_channels, out_channels, 1),
nn.ReLU())
self.conv8 = nn.Sequential(nn.Dropout3d(0.05, False), nn.Conv3d(inter_channels, out_channels, 1),
nn.ReLU())
def forward(self, x):
feat1 = self.conv5a(x)
sa_feat = self.sa(feat1)
sa_conv = self.conv51(sa_feat)
sa_output = self.conv6(sa_conv)
feat2 = self.conv5c(x)
sc_feat = self.sc(feat2)
sc_conv = self.conv52(sc_feat)
sc_output = self.conv7(sc_conv)
feat_sum = sa_conv + sc_conv
sasc_output = self.conv8(feat_sum)
return sasc_output
class DAResNet3d(nn.Module):
def __init__(self, classes=2, k=16):
super(DAResNet3d, self).__init__()
self.layer0 = nn.Sequential(OrderedDict([
('conv1', nn.Conv3d(1, k, kernel_size=3, stride=2, padding=1, bias=False)),
('bn1', norm(k)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv3d(k, k, kernel_size=3, stride=1, padding=1, bias=False)),
('bn2', norm(k)),
('relu2', nn.ReLU(inplace=True))]
))
self.inplanes = k
self.layer1 = self._make_layer(BasicBlock, k, 3, kernel_size=(3, 3, 3), stride=1)
self.layer2 = self._make_layer(BasicBlock, 2 * k, 3, kernel_size=(3, 3, 1), stride=2)
self.layer3 = self._make_layer(BasicBlock, 4 * k, 3, kernel_size=(3, 3, 1), stride=(2, 2, 1))
self.layer4 = self._make_layer(BasicBlock, 8 * k, 3, kernel_size=(3, 3, 3), stride=(2, 2, 2))
self.class4 = DANetHead(8 * k, 8 * k)
self.up3 = nn.Sequential(
nn.ConvTranspose3d(8 * k, 8 * k, kernel_size=(2, 2, 2), stride=(2, 2, 2)),
norm(8 * k),
nn.ReLU(inplace=False)
)
self.class3 = nn.Sequential(
CBR(4 * k + 8 * k, 4 * k, (3, 3, 1))
)
self.up2 = nn.Sequential(
nn.ConvTranspose3d(4 * k, 4 * k, kernel_size=(2, 2, 1), stride=(2, 2, 1)),
norm(4 * k),
nn.ReLU(inplace=False)
)
self.class2 = nn.Sequential(
CBR(2 * k + 4 * k, 2 * k, (3, 3, 3)),
)
self.up1 = nn.Sequential(
nn.ConvTranspose3d(2 * k, 2 * k, kernel_size=2, stride=2),
norm(2 * k),
nn.ReLU(inplace=False)
)
self.class1 = nn.Sequential(
CBR(k + 2 * k, 2 * k),
nn.Conv3d(2 * k, classes, kernel_size=1, bias=False),
)
self.map = nn.Sequential(
CBR(k + 2 * k, 2 * k),
nn.Conv3d(2 * k, k, kernel_size=1, bias=False),
)
self._init_weight()
def forward(self, x):
x_size = x.size()
x = self.layer0(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.class4(self.layer4(x3))
x = self.class3(torch.cat([self.up3(x4), x3], 1))
x = self.class2(torch.cat([self.up2(x), x2], 1))
x = torch.cat([self.up1(x), x1], 1)
out = self.class1(x)
fmap = self.map(x)
out = F.interpolate(out, x_size[2:], mode='trilinear', align_corners=True)
fmap = F.interpolate(fmap, x_size[2:], mode='trilinear', align_corners=True)
return out, fmap
def _make_layer(self, block, planes, blocks, kernel_size=(3, 3, 3), stride=1, dilation=1):
layers = []
layers.append(block(self.inplanes, planes, kernel_size=kernel_size, stride=stride, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, kernel_size=kernel_size))
return nn.Sequential(*layers)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_in')
elif isinstance(m, nn.BatchNorm3d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
class DAResUNet(nn.Module):
def __init__(self, segClasses=2, k=16):
super(DAResUNet, self).__init__()
self.layer0 = CBR(1, k, (7, 7, 7), 1)
self.class0 = nn.Sequential(
BasicBlock(k + 2 * k, 2 * k),
nn.Conv3d(2 * k, segClasses, kernel_size=1, bias=False)
)
self.pool1 = DownSample(k, k, 'max')
self.layer1 = nn.Sequential(
BasicBlock(k, 2 * k),
BasicBlock(2 * k, 2 * k)
)
self.class1 = nn.Sequential(
BasicBlock(2 * k + 4 * k, 4 * k),
CBR(4 * k, 2 * k, (1, 1, 1))
)
self.pool2 = DownSample(2 * k, 2 * k, 'max')
self.layer2 = nn.Sequential(
BasicBlock(2 * k, 4 * k),
BasicBlock(4 * k, 4 * k)
)
self.class2 = nn.Sequential(
BasicBlock(4 * k + 8 * k, 8 * k),
CBR(8 * k, 4 * k, (1, 1, 1))
)
self.pool3 = DownSample(4 * k, 4 * k, 'max')
self.layer3 = nn.Sequential(
BasicBlock(4 * k, 8 * k, dilation=1),
BasicBlock(8 * k, 8 * k, dilation=2),
BasicBlock(8 * k, 8 * k, dilation=4)
)
self.class3 = DANetHead(8 * k, 8 * k)
self.up1 = nn.Upsample(scale_factor=2, mode='trilinear')
self.up2 = nn.Upsample(scale_factor=2, mode='trilinear')
self.up3 = nn.Upsample(scale_factor=2, mode='trilinear')
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
output0 = self.layer0(x)
output1_0 = self.pool1(output0)
output1 = self.layer1(output1_0)
output2_0 = self.pool2(output1)
output2 = self.layer2(output2_0)
output3_0 = self.pool3(output2)
output3 = self.layer3(output3_0)
output = self.class3(output3)
output = self.up3(output)
output = self.class2(torch.cat([output2, output], 1))
output = self.up2(output)
output = self.class1(torch.cat([output1, output], 1))
output = self.up1(output)
output = self.class0(torch.cat([output0, output], 1))
return output
|
import torch
import numpy as np
import torch.nn.functional as F
SMOOTH = 1e-6
classes = ["car", "motorcycle", "bus", "bicycle", "truck", "pedestrian", "other_vehicle", "animal", "emergency_vehicle"]
def iou_pytorch(pred, target, n_classes = 9, print_table = True):
"""
PyTorch IoU implementation
from:
"""
ious = []
pred = pred.view(-1)
target = target.view(-1)
pred = torch.Tensor(pred).cuda().round()
# Ignore IoU for background class ("0")
for cls in range(1, n_classes): # This goes from 1:n_classes-1 -> class "0" is ignored
pred_inds = pred == cls
target_inds = target == cls
intersection = (pred_inds[target_inds]).long().sum().data.cpu()[0] # Cast to long to prevent overflows
union = pred_inds.long().sum().data.cpu()[0] + target_inds.long().sum().data.cpu()[0] - intersection
if union == 0:
ious.append(float('nan')) # If there is no ground truth, do not include in evaluation
else:
ious.append(float(intersection) / float(max(union, 1)))
if print_table:
print(f'classes: {classes}')
print(f'ious: {ious}, mean {np.nanmean(ious)}')
return np.array(ious), np.mean(ious)
def precision_at(threshold, iou):
""" Get true positive, false positive, false negative at iou threshold
"""
matches = iou >= threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
def binarize_predictions(prediction):
"""Binarise output masksfor one-hot classes"""
num_classes = NUM_CLASSES
#prediction = prediction.cpu().numpy()
output = np.rint(prediction).astype(np.uint8)
output_one_hot = (output[:, :, None] -1 == np.arange(num_classes)[None, None, :]).astype(np.uint8)
return output_one_hot
def iou_numpy(outputs: np.array, labels: np.array, classes = classes, num_classes = 9, print_table = False):
"""
Multiclass IoU
Numpy version
"""
SMOOTH = 1e-6
ious = []
#outputs = outputs.squeeze(1)
outputs = np.rint(outputs).astype(np.uint8)
for num in range(1, num_classes+1):
intersection = ((outputs==num) * (labels==num)).sum()
union = (outputs==num).sum() + (labels==num).sum() - intersection
if union == 0:
ious.append(float('nan')) # if there is no class in ground truth, do not include in evaluation
else:
ious.append((intersection + SMOOTH) / (union + SMOOTH))
#thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10
if print_table:
print(f'classes: {classes}')
print(f'ious: {ious}, mean {np.nanmean(ious)}')
return ious, np.nanmean(ious)
def test_iou_pytorch():
a = np.array([[1.1, 1.4, 5.3, 0],
[2.1, 4.6, 2.3, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
b = np.array([[1, 1, 5, 4],
[2, 2, 1, 1],
[7, 3, 3, 3],
[0, 0, 0, 0]])
a_rounded = torch.Tensor(a).cuda().round().type(torch.int8)
a_rounded= a_rounded.dtype(torch.int8)
print(f'a {a}\n b {b} \n a_rounded {a_rounded}, a_rounded.dtype {a_rounded.dtype}')
ios, iou = iou_pytorch(a, b)
print(f"IoUs: {ious} mean IoU: {iou}")
def test_iou_multiclass():
A = torch.tensor([
[[1.1, 1.2, 2.3, 2.1],
[1, 1, 2, 3],
[1, 1, 3, 3]]
])
B = torch.tensor([
[[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 3, 3, 2]]
])
A = torch.Tensor(A).cuda().round().type(torch.int8)
A_oh = F.one_hot(A)
B_oh = F.one_hot(B)
print(f'a {A_oh},\n b {A_oh}')
int_AB = A_oh & B_oh
union_AB = A_oh | B_oh
iou = int_AB.sum(1).sum(1).type(torch.float32) / union_AB.sum(1).sum(1).type(torch.float32)
print(iou[:, 1:])
def test_iou_sample():
a = np.array([[1.1, 1.4, 5.3, 0],
[2.1, 4.6, 2.3, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
b = np.array([[1, 1, 5, 4],
[2, 2, 1, 1],
[7, 3, 3, 3],
[0, 0, 0, 0]])
print(f'a {a},\n b {b}')
a = np.rint(a).astype(np.uint8)
ious, iou = iou_numpy(a,b)
rint(f"IoUs: {ious} mean IoU: {iou}")
def main():
test_iou_sample()
test_iou_multiclass()
test_iou_pytorch()
if __name__ == "__main__":
main()
|
<filename>mockredis/tests/test_script.py
from unittest import TestCase, skipUnless
from hashlib import sha1
from mockredis.exceptions import RedisError
from mockredis.redis import MockRedis
from mockredis.tests.test_constants import (
LIST1, LIST2,
VAL1, VAL2, VAL3, VAL4,
LPOP_SCRIPT
)
def has_lua():
"""
Test that lua is available.
"""
try:
import lua # noqa
return True
except ImportError:
return False
@skipUnless(has_lua(), "mockredispy was not installed with lua support")
class TestScript(TestCase):
"""
Tests for MockRedis scripting operations
"""
def setUp(self):
self.redis = MockRedis()
self.LPOP_SCRIPT_SHA = sha1(LPOP_SCRIPT).hexdigest()
def test_register_script_lpush(self):
# lpush two values
script_content = "redis.call('LPUSH', KEYS[1], ARGV[1], ARGV[2])"
script = self.redis.register_script(script_content)
script(keys=[LIST1], args=[VAL1, VAL2])
# validate insertion
self.assertEquals([VAL2, VAL1], self.redis.lrange(LIST1, 0, -1))
def test_register_script_lpop(self):
self.redis.lpush(LIST1, VAL2, VAL1)
# lpop one value
script_content = "return redis.call('LPOP', KEYS[1])"
script = self.redis.register_script(script_content)
list_item = script(keys=[LIST1])
# validate lpop
self.assertEquals(VAL1, list_item)
self.assertEquals([VAL2], self.redis.lrange(LIST1, 0, -1))
def test_register_script_rpoplpush(self):
self.redis.lpush(LIST1, VAL2, VAL1)
self.redis.lpush(LIST2, VAL4, VAL3)
# rpoplpush
script_content = "redis.call('RPOPLPUSH', KEYS[1], KEYS[2])"
script = self.redis.register_script(script_content)
script(keys=[LIST1, LIST2])
#validate rpoplpush
self.assertEqual([VAL1], self.redis.lrange(LIST1, 0, -1))
self.assertEqual([VAL2, VAL3, VAL4], self.redis.lrange(LIST2, 0, -1))
def test_register_script_rpop_lpush(self):
self.redis.lpush(LIST1, VAL2, VAL1)
self.redis.lpush(LIST2, VAL4, VAL3)
# rpop from LIST1 and lpush the same value to LIST2
script_content = """
local tmp_item = redis.call('RPOP', KEYS[1])
redis.call('LPUSH', KEYS[2], tmp_item)
"""
script = self.redis.register_script(script_content)
script(keys=[LIST1, LIST2])
#validate rpop and then lpush
self.assertEqual([VAL1], self.redis.lrange(LIST1, 0, -1))
self.assertEqual([VAL2, VAL3, VAL4], self.redis.lrange(LIST2, 0, -1))
def test_register_script_client(self):
# lpush two values in LIST1 in first instance of redis
self.redis.lpush(LIST1, VAL2, VAL1)
# create script on first instance of redis
script_content = LPOP_SCRIPT
script = self.redis.register_script(script_content)
# lpush two values in LIST1 in redis2 (second instance of redis)
redis2 = MockRedis()
redis2.lpush(LIST1, VAL4, VAL3)
# execute LPOP script on redis2 instance
list_item = script(keys=[LIST1], client=redis2)
# validate lpop from LIST1 in redis2
self.assertEquals(VAL3, list_item)
self.assertEquals([VAL4], redis2.lrange(LIST1, 0, -1))
self.assertEquals([VAL1, VAL2], self.redis.lrange(LIST1, 0, -1))
def test_eval_lpush(self):
# lpush two values
script_content = "redis.call('LPUSH', KEYS[1], ARGV[1], ARGV[2])"
self.redis.eval(script_content, 1, LIST1, VAL1, VAL2)
# validate insertion
self.assertEquals([VAL2, VAL1], self.redis.lrange(LIST1, 0, -1))
def test_eval_lpop(self):
self.redis.lpush(LIST1, VAL2, VAL1)
# lpop one value
script_content = "return redis.call('LPOP', KEYS[1])"
list_item = self.redis.eval(script_content, 1, LIST1)
# validate lpop
self.assertEquals(VAL1, list_item)
self.assertEquals([VAL2], self.redis.lrange(LIST1, 0, -1))
def test_evalsha(self):
self.redis.lpush(LIST1, VAL1)
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
# validator error when script not registered
with self.assertRaises(RedisError) as redisError:
self.redis.evalsha(self.LPOP_SCRIPT_SHA, 1, LIST1)
self.assertEqual("Sha not registered", str(redisError.exception))
self.assertRaises(RedisError, self.redis.evalsha, self.LPOP_SCRIPT_SHA, 1, LIST1)
# load script and then evalsha
self.assertEquals(sha, self.redis.script_load(script))
self.assertEquals(VAL1, self.redis.evalsha(sha, 1, LIST1))
self.assertEquals(0, self.redis.llen(LIST1))
def test_script_exists(self):
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
self.assertEquals([False], self.redis.script_exists(sha))
self.redis.register_script(script)
self.assertEquals([True], self.redis.script_exists(sha))
def test_script_flush(self):
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
self.redis.register_script(script)
self.assertEquals([True], self.redis.script_exists(sha))
self.redis.script_flush()
self.assertEquals([False], self.redis.script_exists(sha))
def test_script_load(self):
script = LPOP_SCRIPT
sha = self.LPOP_SCRIPT_SHA
self.assertEquals([False], self.redis.script_exists(sha))
self.assertEquals(sha, self.redis.script_load(script))
self.assertEquals([True], self.redis.script_exists(sha))
|
<reponame>deltapsifi/Ganitansh
import random
from math import *
import time
import sympy as sym
from PIL import Image
import pyttsx3 # have to install this and pypiwin32
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from sympy import *
from IPython.display import display, Math, Latex
from threading import Timer
from colorama import Fore, Back, Style, init
import cv2
#def story():
def speak(text):
eng.say(text)
eng.runandwait()
def basic():
print(Back.BLACK + Fore.RED + 'This will include questions that are to be solved using Vedic Maths in a given time limit')
print(Fore.RED+Back.BLACK+'These will help you develop your speed of calculation')
speak('This will include questions that are to be solved using Vedic Maths in a given time limit and they will help you develop your speed of calculation')
print(Fore.BLUE + Back.BLACK+'You will have 60 seconds to attempt each question')
speak('You will have 60 seconds to attempt each question')
while True:
time.sleep(1)
print(Fore.GREEN + Back.BLACK + """1 --> Double Digit Multiplication
2 --> Multiplication of a two-digit number by 11
3--> Finding square of double digit numbers ending with 5
4 --> Go Back""")
speak('Type one of the following options. 1 Double Digit Multiplication; 2 Multiplication by number of the form 11; 3 Go Back')
print(Fore.GREEN + Back.BLACK + "1 --> Double Digit Multiplication; 2 --> Multiplication of a two-digit number by 11... ; 3 --> Go Back")
speak('Type one of the following options. 1 Double Digit Multiplication; 2 Multiplication by number of the form 11; 3 Go Back')
print( Fore.GREEN + Back.BLACK +"Choose one of the following options.")
choice = int(input())
if choice== 1: dd()
if choice== 2: elev()
if choice==3: sqr5()
if choice== 4: break
def dd():
print(Back.BLACK +'Do you know about various tricks which are given by Vedic Maths?')
speak('Do you know about various tricks which are given by Vedic Maths?')
print(Fore.RED + Back.BLACK + """
We are going to learn about one of the basic rules of vedic maths.""" + Fore.BLUE + Back.BLACK + """
This involves multiplication of two digit numbers.
Say you have any two digit numbers : ab and cd, where a is in tenth place of number "ab" and b is in ones place, similarly for "cd".
So the trick is to take the ones digit of both numbers and multiply it (i.e. b x d) and write the ones digit of their product as the ones digit of a new number.
And carry forward the tens place digit.
""" )
print(Fore.GREEN + """
Now take the product of "a" and "d" and add it to the product of "b" and "c" (i.e. a x d + b x c).
Now you need to add the tens place digit of the number obtained on multiplying "b" and "d".
now take the ones place digit of the number obtained and put it in the tens place of the number and carry forward the tens place digit.
Now multiply a and c (i.e. a x c) and add the tens place digit that was obtained in the previous step, now write the obtained number in the hundreds place of the new number.
The number that you have now obtained is the product of ab and cd.)
""")
speak("We are going to learn about one of the basic rules of vedic maths.")
speak("This involves multiplication of two digit numbers.")
speak("Say you have any two digit numbers : ab and cd, where a is in tenth place of number 'ab' and b is in ones place, similarly for 'cd'.")
speak("So the trick is to take the ones digit of both numbers and multiply it (i.e. b x d) and write the ones digit of their product as the ones digit of a new number.")
speak("And carry forward the tens place digit.")
speak("Now take the product of 'a' and 'd' and add it to the product of 'b' and 'c'")
speak("Now you need to add the tens place digit of the number obtained on multiplying 'b' and 'd'.")
speak("now take the ones place digit of the number obtained and put it in the tens place of the number and carry forward the tens place digit.")
speak("Now multiply a and c and add the tens place digit that was obtained in the previous step, now write the obtained number in the hundreds place of the new number.")
speak("The number that you have now obtained is the product of a b and c d.")
time.sleep(1)
print()
scr = 0
while True:
for i1 in range(4):
no1 = floor(random.uniform(10, 99))
no2 = floor(random.uniform(10, 99))
print( Style.DIM +'Find the product when',no1,'is multiplied by',no2)
speak('Find the product on multiplication of: ')
speak(no1)
speak('and')
speak(no2)
speak("Enter the answer . If you don't know just press enter")
right_answer = no1 * no2
#t= Timer(60, time_up) #x is amount of allowed time in seconds then execute the provided function
#t.start() #start the timerx = np.linspace(-2,2,100)
ans = input('Enter the answer, if you dont know it just press enter: ')
if ans == str(right_answer):
#t.cancel()
scr += 1
print( Fore.GREEN +'Well Done! Your answer is correct')
speak('Well Done! Your answer is correct')
elif ans == "":
#t.cancel()
print(Back.BLACK + "The correct answer is {}".format(right_answer))
speak("The correct answer is {}".format(right_answer))
elif ans != str(right_answer):
#t.cancel()
print(Back.BLACK + 'Sorry, incorrect answer')
speak('Sorry, incorrect answer')
print(Back.BLACK + 'The correct answer is {}'.format(no1 * no2))
speak('The correct answer is {}'.format(no1 * no2))
print()
break
print("You got {} questions right out of 4".format(scr))
perc = ((int(scr)/4)*100)
if perc >= 75:
print("Which means you scored" + Fore.CYAN + " {} %".format(perc))
print(Fore.GREEN + "WOW !! Nice Score")
else:
print("You have scored {} percentage".format(perc))
print("We know that you can score better."+ Fore.RED +"You should try again")
def had():
print(Fore.RED + Back.BLACK + '''
Welcome to the Everyday Maths Module.
Here we will learn how some basic mathematical concepts can be used in our daily life.
We all make use of distances and heights in daily life.
For example: Measuring the height of our shadows, Measuring distances between 2 points and much more.
Its not practically possible to measure large distances. So, we make use of " Trigonometry " for such purposes. Also, trigonometry is majorly used in aviation. It helps in determining distances covered,heghts achieved by plane when it took off at a certain angle.
Trigonometric ratios are ratios of different sides of a right angled triangle. A right angled triangle has perpendicular(P),base(B) and hypotenuse(H).
Let us call the angle opposite perpendicular A. So,we call the ratio between
P/B as tanA
B/P as cotA
P/H as sinA
B/H as cosA These are some basic ratios that we use in solving height and distance measurement questions. We try to analyse daily life situations and try to apply these concepts there.
''' )
lhd = {"A street light pole stands between two parallel roads. The height of the pole is 10m. From the top of the tower, the angles of depression of the roads are 45° and 30°. Find the distance between the roads.":27.32,
"A tree of height 24 cm stands in my vicinity. On a stormy night, the tree broke and fell in such a way that the upper part remained attached to the stem, forming an angle of 30° with the ground. Find the height from where it broke.":8,
"The shadow formed by a man on a sunny day is 1/3 times the actual height. Find the sun's angle of elevation?":60,
"The actual height of a man is 6 feet but due to the position of Sun he casts a shadow of 4 feet. He is standing next to an electricity tower and notices that it casts a shadow of 36 feet. Find the height of the electricity tower.":54,
}
for i in range(4):
hd1 = list(lhd.keys())[i]
print(Fore.RED + Back.BLACK + "This is your question 👇", hd1, sep='\n')
print("Please enter answer without the units")
anshd = input("Enter the answer: ")
lmfao = anshd.replace(' ', '').lower()
try:
if float(lmfao) == float(list(lhd.values())[i]) :
print("You got the correct answer!")
except:
if lmfao.find('pi') != -1 or lmfao.find('Pi') != -1:
if lmfao.find('/') != -1 :
jjj = float(lmfao[lmfao.find('/')+1:lmfao.find('/')+2])
sos = 180/jjj
else:
sosiph = float(lmfao.lower().replace('pi',''))
sos = sosiph*180
if sos == float(list(lhd.values())[i]):
print("You got the correct answer!")
elif lmfao == '':
print('You did not enter anything.')
else: print("Your answer is incorrect.", "Better luck next time.", sep='\n')
i+=1
if i == 4 : break
print()
time.sleep(1)
def ci():
print('''
Let's learn about compound interest another mathematical concept used in our daily life.
When we deposit some money (P) for some time(t) at some rate of interest(R), provided that it is compounded at periodic intervals of time this ,means that after every interval the principal of the next interval is the amount(A) at the previous interval.Thus , it gets compounded after intervals.
Therefore, the formula for ''' )
listi = ["Mr. Narendra", "Reliance Industries", "Laksh"]
for i in range(4):
rnnr = random.randint(1,4)
if rnnr == 1:
n=1
nmx='yearly'
t = 3
if rnnr == 2:
n=2
nmx="halfyearly"
t = 2
if rnnr == 3:
n=4
nmx = "quarterly"
t = 1
lii = random.choice(listi)
r = random.randint(5,16)
p = random.choice(list(range(10000, 60000, 10000)))
a = lambda p,r,n,t : p*(1+r/n)**(n*t)
print("{} invests ₹{} for a period of {} years. Find the total amount if it is compounded {} at a rate of {}".format(lii,p,t,nmx,r))
time.sleep(1)
answerpls = float(input("Enter your answer : ₹ "))
if answerpls == round(float(a(p,r,n,t)),2):
print("Well Done!")
print("You got the correct answer.")
else: print("Uh-Oh, Incorrect answer."); print("The correct answer is {}".format(round(float(a(p,r,n,t))),2))
print()
time.sleep(1)
def elev():
print(Back.BLACK +'Do you know about various tricks which are given by Vedic Maths?')
speak('Do you know about various tricks which are given by Vedic Maths?')
print( Fore.RED + Back.BLACK +"""
We will learn about the a trick to ease our calculations when we multiply any 2-digit number by 11.
When we multiply a 2-digit number for example-43 by 11,using the trick the answer comes out to be 473.
Let us see how,
The tens digit(4) of the multiplicant is placed at hundreds place of the product and ones digit (3) of
the multiplicant is placed at the ones place of product.The sum of the digits of multiplicant is placed
at tens place in the product.
Let's take another case our numbers are 67 and 11,
Here, we will proceed with same steps, the answer will be 737.
But as we proceed with the 2nd step,the sum of multiplicants (6+7) is 13.So, we put the ones digit(3) at tens place
and tens digit(1) will be added to the hundreds digit of the answer(6+1)37.
""")
speak('''We will learn about the a trick to ease our calculations when we multiply any 2-digit number by 11.
When we multiply a 2-digit number for example-43 by 11,using the trick the answer comes out to be 473.
Let us see how,
The tens digit(4) of the multiplicant is placed at hundreds place of the product and ones digit (3) of
the multiplicant is placed at the ones place of product.The sum of the digits of multiplicant is placed
at tens place in the product.
Let's take another case our numbers are 67 and 11,
Here, we will proceed with same steps, the answer will be 737.
But as we proceed with the 2nd step,the sum of multiplicants (6+7) is 13.So, we put the ones digit(3) at tens place
and tens digit(1) will be added to the hundreds digit of the answer(6+1)37.''')
time.sleep(4)
speak('Let\'s Practice')
print( Fore.GREEN +"Let's Practice")
time.sleep(2)
scr1 = 0
while True:
for ii in range(4):
no3 = floor(random.uniform(11, 100))
lol = int(input('Enter how many digits of 1 you want in the multiplier: '))
r = '1'*lol
print( Back.BLACK + Fore.MAGENTA + 'Find the product when', no3,'is multiplied by', int(r))
speak('Find the product when')
speak(no3)
speak('is multiplied by', int(r))
speak('Enter the answer. If you dont know just press enter: ')
ans = input("Enter the answer. If you dont know just press enter:")
# t = Timer(60, time_up) # x is amount of allowed time in seconds then execute the provided function
# t.start() #start the timer
rgtans = no3 * int(r)
if ans == str(rgtans):
# t.cancel()
scr1 += 1
print( Back.BLACK + 'Well Done! Your answer is correct')
speak('Well Done! Your answer is correct')
elif ans == "":
print( Back.BLACK + "The correct answer is {}".format(rgtans))
speak("The correct answer is {}".format(rgtans))
elif ans != str(rgtans) :
# t.cancel()
print( Fore.RED +'Your answer is incorrect')
speak('Your answer is incorrect')
print( Fore.GREEN + 'The correct answer is', rgtans)
speak('The correct answer is')
speak(rgtans)
print()
break
print("You got {} questions right out of 4".format(scr1))
perc1 = ((int(scr1)/4)*100)
if perc1 >= 75:
print("Which means you scored" + Fore.CYAN + " {} %".format(perc1))
print(Fore.GREEN + "WOW !! Nice Score")
else:
print("You have scored {} percentage".format(perc1))
print("We know that you can score better."+ Fore.RED +"You should try again")
def sqr5():
print(Back.BLACK +'Do you know about various tricks which are given by Vedic Maths?')
eng.say('Do you know about various tricks which are given by Vedic Maths?')
eng.runAndWait()
print(Fore.RED + Back.BLACK + """
We are going to learn about one of the basic rules of vedic maths.""" + Fore.BLUE + Back.BLACK + """
This involves easily finding squares of 2 digit numbers ending with digit 5 .
STEP-1
As we all know square of any number ending with 5 must have 25 as digits on it's tens and ones places respectively.
STEP-2
Multiply the tens digit of the number with its successor and write down the number obtained in front of 25.
You got the square of the number.""" )
print(Fore.GREEN + """
Example -
Let the number be 65.
So, at tens and ones digit we put 25.
Next we multiply 6 X (6+1)
6 X 7= 42
So, square of 65 comes out to be 4225.""")
eng.say("We are going to learn about one of the basic rules of vedic maths.")
eng.runAndWait()
eng.say("This involves easily finding squares of 2 digit numbers ending with digit 5 .")
eng.say("STEP-1 As we all know square of any number ending with 5 must have 25 as digits on it's tens and ones places respectively.")
eng.runAndWait()
eng.say("STEP-2 Multiply the tens digit of the number with its successor and write down the number obtained in front of 25.You got the square of the number.")
eng.runAndWait()
eng.say("Example")
eng.runAndWait()
eng.say("Let the number be 65.So, at tens and ones digit we put 25.")
eng.runAndWait()
eng.say("Next we multiply 6 X (6+1) 6 X 7= 42")
eng.runAndWait()
eng.say("So, square of 65 comes out to be 4225.")
eng.runAndWait()
time.sleep(1)
print()
scr = 0
while True:
for i1 in range(4):
no1 = floor(randrange(15,96,10 ))
print( Style.DIM +'Find the square of',no1)
eng.say('Find the square of ')
eng.runAndWait()
eng.say(no1)
eng.runAndWait()
eng.say("Enter the answer . If you don't know just press enter")
eng.runAndWait()
right_answer = no1 **2
#t= Timer(60, time_up) #x is amount of allowed time in seconds then execute the provided function
#t.start() #start the timerx = np.linspace(-2,2,100)
ans = input('Enter the answer, if you dont know it just press enter: ')
if ans == str(right_answer):
#t.cancel()
scr += 1
print( Fore.GREEN +'Well Done! Your answer is correct')
eng.say('Well Done! Your answer is correct')
eng.runAndWait()
elif ans == "":
#t.cancel()
print(Back.BLACK + "The correct answer is {}".format(right_answer))
eng.say("The correct answer is {}".format(right_answer))
eng.runAndWait()
elif ans != str(right_answer):
#t.cancel()
print(Back.BLACK + 'Sorry, incorrect answer')
eng.say('Sorry, incorrect answer')
eng.runAndWait()
print(Back.BLACK + 'The correct answer is {}'.format(no1**2))
eng.say('The correct answer is {}'.format(no1 **2))
eng.runAndWait()
print()
break
print("You got {} questions right out of 4".format(scr))
perc = ((int(scr)/4)*100)
if perc >= 75:
print("Which means you scored" + Fore.CYAN + " {} %".format(perc))
print(Fore.GREEN + "WOW !! Nice Score")
else:
print("You have scored {} percentage".format(perc))
print("We know that you can score better."+ Fore.RED +"You should try again")
# def linear():
# print(Fore.RED + 'These questions will test your basic knowledge of Linear Equations')
# print(Fore.RED + 'A linear equation of the form ax + by + c will be given')
# eng.say('These questions will test your basic knowledge of Linear Equations. A linear equation of the form ax + by + c will be given')
# eng.runAndWait()
# print(Style.DIM + 'You have to find the value of x only')
# eng.say('You have to find the value of x only')
# eng.runAndWait()
# time.sleep(2)
# scr2 = 0
# while True:
# for i in range(4):
# num1 = floor(random.uniform(-10, 10))
# num2 = floor(random.uniform(-10, 10))
# con = floor(random.uniform(-10, 10))
# var = floor(random.uniform(-10, 10))
# sym.init_printing()
# x,y = sym.symbols('x,y')
# a = sym.Eq(num1*x + num2*y + con,0)
# b = sym.Eq((num1+var)*x + num2*var*y + (con/var),0)
# d = {}
# d = sym.solve([a,b],(x,y))
# print( Style.DIM + 'equation 1 is', num1,x, '+', num2,y, '+', con, '=', '0')
# eng.say('equation 1 is')
# eng.runAndWait()
# eng.say(num1)
# eng.runAndWait()
# eng.say('x plus')
# eng.runAndWait()
# eng.say(num2)
# eng.runAndWait()
# eng.say('y plus')
# eng.runAndWait()
# eng.say(con)
# eng.runAndWait()
# eng.say('equal to zero')
# eng.runAndWait()
# print( Back.BLACK + 'equation 2 is', (num1+var),x, '+', (num2*var)+y, '+', con/var, '=', '0')
# print( Fore.CYAN + 'Enter the value of x')
# #display(Math(r'nudef cg():
# print( Back.BLACK + 'These questions will test your basic knowledge of Coordinate Geometry more specifically the section formula')
# print( Back.BLACK + 'The coordinates of the two end points and the ratio with which the line is internally divided will be given')
# print( Back.BLACK + 'You have to find the coordinates of the point which divides the line internally in the given ratio')
# scr3 = 0
# while True:
# for i2 in range(4):
# x1 = floor(random.uniform(-10, 10))
# x2 = floor(random.uniform(-10, 10))
# y1 = floor(random.uniform(-10, 10))
# y2 = floor(random.uniform(-10, 10))
# m = floor(random.uniform(-10, 10))
# n = floor(random.uniform(-10, 10))
# lmb = lambda x1,x2,m,n : (x1*n + x2*m)/m+n
# lmb1 = lambda y1,y2,m,n : (y1*n + y2*m)/m+n
# print(Fore.CYAN + 'coordinates of point1 are', '(',x1,',',y1,')')
# print( Fore.CYAN + 'coordinates of point2 are', '(',x2,',',y2,')')
# print( Back.BLACK +'The line is internally divided in a ratio of m:n where m = {} and n = {}'.format(m,n))
# x3 = int(input('Enter the x coordinate of point '))
# y3 = int(input('Enter the y coordinate of point '))
# kaw = floor(lmb(x1,x2,m,n))
# koo = floor(lmb1(y1,y2,m,n))
# t= Timer(120, time_up) #x is amount of allowed time in seconds then execute the provided function
# t.start() #start the timer
# if x3 == kaw and y3 == koo:
# t.cancel()
# scr3 += 1
# print( Fore.GREEN + 'Good Job! Your answer is correct')
# eng.say('Good Job! Your answer is correct')
# eng.runAndWait()
# else :
# t.cancel()
# print( Style.DIM + 'Your answer is incorrect')
# eng.say('Your answer is incorrect')
# eng.runAndWait()
# print( Fore.YELLOW + 'The correct answer is, x coordinate = {} and y coordinate = {}'.format(kaw, koo))
# eng.say('The correct answer is, x coordinate = {} and y coordinate = {}'.format(kaw, koo))
# eng.runAndWait()
# print()
# time.sleep(5)
# break
# print('You got {} questions right out of 4'.format(scr3))
# perc3 = int((scr3)/4)*100
# if perc3 >= 75:
# print("Which means you scored" + Fore.CYAN + {} + "percentage".format(perc3))
# print(Fore.GREEN + "WOW !! Nice Score")
# else:
# print("You have scored {} percentage".format(perc3))
# print("We know that you can score better."+ Fore.RED +"You should try again")
# m1 x + num2 y + con = 0'))
# # num1,x, '+', num2,y, '+', con, '=', '0')
# eng.say('equation 2 is')
# eng.runAndWait()
# eng.say(num1+var)
# eng.runAndWait()
# eng.say('x plus')
# eng.runAndWait()
# eng.say(num2*var)
# eng.runAndWait()
# eng.say('y plus')
# eng.runAndWait()
# eng.say(con/var)
# eng.runAndWait()
# eng.say('equal to zero')
# eng.runAndWait()
# eng.say("Enter the value of x. If you don't know the answer press space")
# eng.runAndWait()
# inp = int(input())
# k = floor(d[x])
# #t= Timer(120, time_up) #x is amount of allowed time in seconds then execute the provided function
# #t.start() #start the timer
# if inp == k:
# #t.cancel()
# scr2 += 1
# eng.say('Enter the value of x and wait for 2 minutes')
# eng.runAndWait()
# print( Fore.GREEN +'Good Job! Your answer is correct')
# elif inp == "":
# print(Fore.MAGENTA+"The correct answer is {}".format(k))
# eng.say("The correct answer is {}".format(k))
# eng.runAndWait()
# else :
# #t.cancel()
# print( Fore.RED + 'Your answer is incorrect')
# eng.say('Your answer is incorrect')
# eng.runAndWait()
# print( Back.BLACK + 'The correct answer is', k)
# eng.say('The correct answer is' + str(floor(d[x])))
# eng.runAndWait()
# eng.say(floor(d[x]))
# eng.runAndWait()
# print()
# time.sleep(2)
# break
# print('You got {} questions right out of 4'.format(scr2))
# perc2 = int((scr2)/4)*100
# if perc2 >= 75:
# print("Which means you scored" + Fore.CYAN + {} + "percentage".format(perc2))
# print(Fore.GREEN + "WOW !! Nice Score")
# else:
# print("You have scored {} percentage".format(perc2))
# print("We know that you can score better."+ Fore.RED +"You should try again")
def cg():
print("Do you know about the method to calculate the coordinates of any point dividing a line in a particular ratio?")
speak("Do you know about the method to calculate the coordinates of any point dividing a line in a particular ratio?")
time.sleep(2)
print("""Consider a line with the coordinates of the end points as (x1, y1) and (x2, y2), which is divided in a ratio m:n by a point (x3, y3).
Now there is a formula known as the section formula :- x3 = (m*x2 + n*x1)/(m + n), similarly in order to find y3 just replace "x" with "y".
""")
speak("""Consider a line with the coordinates of the end points as (x1, y1) and (x2, y2), which is divided in a ratio m:n by a point (x3, y3).
Now there is a formula known as the section formula :- x3 = (m*x2 + n*x1)/(m + n), similarly in order to find y3 just replace "x" with "y".
""")
print(Back.BLACK + "These questions will test your basic knowledge of Coordinate Geometry more specifically the section formula")
print(Back.BLACK + "The coordinates of the two end points and the ratio with which the line is internally divided will be given")
print(Back.BLACK + "You have to find the coordinates of the point which divides the line internally in the given ratio")
speak('These questions will test your basic knowledge of Coordinate Geometry more specifically the section formula. The coordinates of the two end points and the ratio with which the line is internally divided will be given. You have to find the coordinates of the point which divides the line internally in the given ratio')
scr3 = 0
while True:
for i2 in range(4):
x1 = floor(random.uniform(-10, 10))
x2 = floor(random.uniform(-10, 10))
y1 = floor(random.uniform(-10, 10))
y2 = floor(random.uniform(-10, 10))
m = floor(random.uniform(-10, 10))
n = floor(random.uniform(-10, 10))
lmb = lambda x1,x2,m,n : (x1*n + x2*m)/m+n
lmb1 = lambda y1,y2,m,n : (y1*n + y2*m)/m+n
print(Fore.CYAN + "coordinates of point1 are", "(",x1,",",y1,")")
print( Fore.CYAN + "coordinates of point2 are", "(",x2,",",y2,")")
print( Back.BLACK +"The line is internally divided in a ratio of m:n where m = {} and n = {}".format(m,n))
x3 = int(input('Enter the x coordinate of point '))
y3 = int(input('Enter the y coordinate of point '))
kaw = floor(lmb(x1,x2,m,n))
koo = floor(lmb1(y1,y2,m,n))
t= Timer(120, time_up) #x is amount of allowed time in seconds then execute the provided function
t.start() #start the timer
if x3 == kaw and y3 == koo:
t.cancel()
scr3 += 1
print( Fore.GREEN + 'Good Job! Your answer is correct')
speak('Good Job! Your answer is correct')
else :
t.cancel()
print( Style.DIM + 'Your answer is incorrect')
speak('Your answer is incorrect')
print( Fore.YELLOW + 'The correct answer is, x coordinate = {} and y coordinate = {}'.format(kaw, koo))
speak('The correct answer is, x coordinate = {} and y coordinate = {}'.format(kaw, koo))
print()
time.sleep(1)
break
print("You got {} questions right out of 4".format(scr3))
perc3 = ((int(scr3)/4)*100)
if perc3 >= 75:
print("Which means you scored" + Fore.CYAN + " {} %".format(perc3))
print(Fore.GREEN + "WOW !! Nice Score")
else:
print("You have scored {} percentage".format(perc3))
print("We know that you can score better."+ Fore.RED +"You should try again")
def mod():
print(Fore.YELLOW + 'There will be a total of 4 questions')
print(Fore.YELLOW + 'You will have 2 minutes to solve each question, after the time ends the answer will be displayed.')
print(Back.BLACK + 'While answering the question enter the closest integer value')
print()
while True:
print(Fore.YELLOW + 'Choose one of the following options.')
print(Fore.RED + '''1 --> Linear Equations
2 --> Coordinate Geometry (section formula)
3 --> Visualise Equations
4 --> Go Back''')
c1 = int(input())
if c1 == 1: into() #; linear()
if c1 == 2: cg()
if c1 == 3:
visualise_eqn1()
visualise_eqn2()
if c1 == 4: break
def calculus():
print( Back.BLACK + 'These questions will test your basic knowledge of differentiation')
print( Style.DIM + 'A function will be given, you will have to find the derivative of that function')
scr4 = 0
while True:
for i5 in range(4):
coeff1 = floor(random.uniform(-3, 3))
coeff2 = floor(random.uniform(-3, 3))
coeff3 = floor(random.uniform(-3, 3))
x = symbols('x')
init_printing(use_unicode=True)
expression = coeff1*exp(coeff2*x**coeff1)*sin(coeff3*x**coeff2)+cos(x)
l =[]
l.append(diff(expression))
l.append(coeff1*diff(expression)+coeff1)
l.append(diff(expression)/coeff2+coeff3)
op1 = random.choice(l)
l.remove(op1)
op2 = random.choice(l)
l.remove(op2)
op3 = random.choice(l)
print( Back.BLACK + "Find the derivative of f(x) =" ,expression)
speak("Find the derivative of f(x) =" ,expression)
print( Fore.CYAN + "Choose the correct option. If you don't know just press enter")
print(Back.BLACK +'1 -->', op1)
print(Back.BLACK +'2 -->', op2)
print(Back.BLACK +'3 -->', op3)
speak("Choose the correct option. If you don't know just press enter")
print(Back.BLACK +'1 -->', op1, '; 2 -->', op2, '; 3 -->', op3)
speak("Choose the correct option. If you don't know just press enter")
it = input()
if it == "1":
annn = op1
elif it == "":
print("The correct option is".format(diff(expression)))
elif it == "2":
annn = op2
elif it == "3":
annn = op3
#t = Timer(150, time_up) #x is amount of allowed time in seconds then execute the provided function
#t.start()
if annn == diff(expression):
# t.cancel()
scr4 += 1
print('Good Job! your answer is correct')
speak('Good Job! your answer is correct')
else:
# t.cancel()
print('Sorry, incorrect answer')
print("The correct answer is", diff(expression))
speak('Sorry, incorrect answer')
print()
time.sleep(1)
break
print("You got {} questions right out of 4".format(scr4))
perc4 = ((int(scr4)/4)*100)
if perc4 >= 75:
print("Which means you scored" + Fore.CYAN + " {} %".format(perc4))
print(Fore.GREEN + "WOW !! Nice Score")
else:
print("You have scored {} percentage".format(perc4))
print("We know that you can score better."+ Fore.RED +"You should try again")
def quad():
print("""A quadratic equation is an algebraic equation which can be rearranged in the following standard form: ax^2 + bx + c.
The solution to such equations can be easily found by using <NAME>'s formula,
which goes, x = (-b + sqrt(b^2 - 4ac))/(2a) and x = (-b - sqrt(b^2 - 4ac))/(2a).""")
print()
time.sleep(1)
print(Style.DIM + 'These questions will test your basic knowledge of Quadratic Equations')
print(Back.BLACK + Fore.RED + 'An Equations of the form ax^2 + bx + c = 0 will be given')
print( Back.BLACK +'You have to find the value of x and input it in the closest possible integer')
scr5 = 0
while True:
for i3 in range(4):
a1 = floor(random.uniform(-10, 10))
b1 = floor(random.uniform(-10, 10))
co = floor(random.uniform(-10, 10))
v = floor(random.uniform(-10, 10))
sym.init_printing()
x = sym.symbols('x')
rr = sym.Eq(a1*(x**2) + b1*x + co,0)
dict = {}
dict = sym.solve([rr],(x))
print( Back.BLACK + 'equation is', a1,'x^2', '+', '('+str(b1)+')','x', '+', '('+str(co)+')', '=', '0')
answer = str(input("Enter the value of x, if i(iota) is coming in the solution then write it as 'I'. If you don't know just press enter : "))
hehe = floor(dict[0][0])
huh = floor(dict[1][0])
#t = Timer(5, time_up)
#t.start()
if answer == str(hehe) or answer == str(huh):
# t.cancel()
scr5 += 1
print( Back.BLACK + 'Good Job! Your answer is correct')
if answer == "":
# t.cancel()
print("The correct answer is", hehe, "or", huh)
else:
# t.cancel()
print(Back.BLACK + 'Your answer is incorrect')
print("The correct answer is", hehe, "or", huh)
print()
time.sleep(2)
break
print("You got {} questions right out of 4".format(scr5))
perc5 = ((int(scr5)/4)*100)
if perc5 >= 75:
print("Which means you scored" + Fore.CYAN + " {} %".format(perc5))
print(Fore.GREEN + "WOW !! Nice Score")
else:
print("You have scored {} percentage".format(perc5))
print("We know that you can score better."+ Fore.RED +"You should try again")
def adv():
print('There will be a total of 4 questions')
print('You will have 2 minutes and 30 seconds to answer each question')
print('While answering the question enter the closest integer value')
while True:
print(Back.BLACK + 'Choose one of the following options')
print( Fore.RED + '''1 --> Calculus
2 --> Quadratic Equations
3 --> Go Back''') # 2 --> Quadratic Equations;
c2 = int(input())
if c2 == 1: calculus()
if c2 == 2: quad()
elif c2 == 3: break
def evd():
print('There will be a total of 4 questions')
while True :
print(Back.BLACK + 'Choose one of the following options')
print("""1 --> Heights and Distances
2 --> Compound Interest
3 --> Go Back""")
cwfl = int(input())
if cwfl == 1: had()
if cwfl == 2: ci()
if cwfl == 3: break
def into():
speak("Do you know what an algebric equation is ")
intro_ques = input("Do you know what an algebric equation is (yes/no): ")
if intro_ques == "yes":
print( Style.DIM + "Ok then lets directly move to visualsing linear equations")
visualise_eqn1()
if intro_ques == "no":
print( Fore.GREEN+ "well its basically just a simple equation with variables")
speak("well its basically just a simple equation with variables")
speak("Do you know what variables are")
intro_ques1 = input("Do you know what variables are (yes/no): ")
if intro_ques1 == "no":
print( Back.BLACK + """
In simple terms variables is something whose value can change(or vary).
Let me explain you this in simpler terms for example Ram has 5 apples. Now if he will never eat those apples the quantity of those apples will never change
Those apples are 5 and will remain 5 untill someone eats them. So here we have a constant 5 But now Shyam comes
and he says that he has "some" apples now how can we define that "some" we don't have any definate to put in the place
of "some" so "some" is a variable quantity it can be 2,3,4,5.... anything.
""")
speak("In simple terms variables is something whose value can't change.")
speak("""Let me explain you this in simpler terms for
example Ram has 5 apples. Now if he will never eat those apples the quantity of those apples will never change
Those apples are 5 and will remain 5 untill someone eats them. So here we have a constant 5 But now Shyam comes
and he says that he has "some" apples now how can we define that "some" we don't have any definate to put in the place
of "some". So "some" is a variable quantity it can be 2,3,4,5.... anything.
""" )
print( Back.BLACK + """Now if we ask ourself,how we can we identify variables,
well just ask yourself, can we define that particular value? if yes then it's a constant else a variable""")
speak("Now if we ask ourself,how we can we identify variables, well just ask yourself, can we define that particular value? if yes then it's a constant else a variable")
print(Fore.CYAN + "Lets move to ")
speak("Lets move to")
print("examples of linear equations. :")
speak("examples of linear equations. ")
ln_examples = cv2.imread('Types-of-linear-equation.png',1)
not_ln_examples = cv2.imread('not-linear-equations.png',1)
cv2.imshow("Types-of-linear-equation",ln_examples)
k = cv2.waitKey(33)
print("These are the examples of types of linear equation. The window will automatically after 6 seconds ")
speak("These are the examples of types of linear equation. The window will close automatically after 6 seconds")
time.sleep(6)
cv2.destroyWindow("Types-of-linear-equation")
cv2.imshow("Not linear equations",not_ln_examples)
k = cv2.waitKey(33)
print("They are not linear equation. The window will close automatically after 6 seconds")
speak("They are not linear equation. The window will close automatically after 6 seconds")
time.sleep(6)
cv2.destroyWindow("Not linear equations")
# examples to be added using image or anything
def img():
graph_table = cv2.imread("table.png",1)
cv2.imshow("How to get coordinates of linear equation",graph_table)
k = cv2.waitKey(33)
print("This table explains, how we have to find coordinates. The window will close automatically after 6 seconds")
speak("This table explains, how we have to find coordinates. The window will close automatically after 6 seconds")
time.sleep(6)
cv2.destroyWindow("How to get coordinates of linear equation")
def visualise_eqn1():
print( Back.BLACK + "Lets say if we want a graph of y = 2x+3. How to make that ?")
speak("Lets say if we want a graph of y = 2x+3. How to make that ?")
print( Fore.YELLOW + """
we basically see variation in x with respect to y. This means that we put y = 2x+3 and we will put random value to x like 1,2,3,4 .... and will see what is the value of y.
When y becomes 0 we say that the equation satisfies and we have a zero for that equation.
""")
speak("we basically see variation in x with respect to y. This means that we put y = 2x+3 and we will put random value to x like 1,2,3,4 .... and will see what is the value of y. When y becomes 0 we say that the equation satisfies and we have a zero for that equation.")
img()
print("So we plot using these points/coordinates we found i.e. (0,3), (1,5), (2,7) and (3,9). We join these points and we get the graph of the equation")
speak("So we plot using these points/coordinates we found that is 0 comma 3 1 comma 5 2 comma 7 and 3 comma 9 . We join these points to get the graph of the equation")
def visualise_eqn2():
x = np.linspace(-2,2,100)
# the function, which is y = x^3 here
y = 2*x+3
roots =[]
x1 = 0
y1 = 2*x1+3
x2 = -3/2
y2 = 0
# setting the axes at the centre
fig = plt.figure(figsize=(12,7))
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
# explain linear equations with graph more
plt.plot(x,y,'purple')
plt.plot(x1,y1,marker="o")
plt.plot(x2,y2,marker="o")
plt.grid()
plt.savefig("GRAPH.jpeg")
time.sleep(2)
print("If you don't see a graph pop out please check the folder for a file named GRAPH.png")
eng.say("If you don't see a graph pop out please check the folder for a file named GRAPH.png")
eng.runAndWait()
print(Fore.RED + """
The orange dot represents the zero of the linear equation here the value of whole equation becomes 0 the blue dot just represents
the point where the line intersects with Y axis. After you have viewed the program please close it to move forward""") # explain how to find zeros theoretically
speak("The orange dot represents the zero of the linear equation here the value of whole equation becomes 0 the blue dot just represents the point where the line intersects with Y axis.After you have viewed the program please close it to move forward")
img = cv2.imread("GRAPH.jpeg",1)
cv2.imshow("GRAPH.jpeg",img)
print("This is how the graph will look like. The window will close automatically after 6 seconds")
speak("This is how the graph will look like. The window will close automatically after 6 seconds")
k = cv2.waitKey(33)
time.sleep(6)
cv2.destroyWindow("GRAPH.jpeg")
# eng.say("Do you want to plot a of your own equation:")
# eng.runAndWait()
# user_input()
# def user_input(): # this is how you do recusrsion babyyyy
# inp1 = input("Do you want to plot a of your own equation(yes/no): ")
# if inp1 == "":
# user_input()
# if inp1 == "yes":
# print( Back.BLACK + """
# Write your equation using following rules or the application will not work and may crash
# **Rules**
# Use * for multiplication example 4x = 4*x and (4)(3) = 4*3
# Use ** for exponent example if you want to represent x² = x**2 ; for 2x² = 2*x**2
# use / for divisiond
# use + for addition and - for subtraction
# Write equation in one variable using only 'x'
# Do not use equal to sign, just write LHS part of that equation.
# """) # cahnge colour for last 2 . To highlight them
# eng.say("Write your equation using following rules or the application will not work and may crash")
# eng.runAndWait()
# eng.say("Rules")
# eng.runAndWait()
# eng.say("Use astericks for multiplication")
# eng.runAndWait()
# eng.say("use slash for division")
# eng.runAndWait()
# eng.say("use plus for addition and minus for subtraction")
# eng.runAndWait()
# eng.say("Write equation in one variable using only 'x'")
# eng.runAndWait()
# eng.say("Do not use equal to sign, just write LHS part of that equation.")
# eng.runAndWait()
# x = np.linspace(-2,2,100)
# Y = input("enter your equation here using above rules: ")
# plt.plot(x,Y,color="Brown",title="Your Equation's plot")
# plt.grid()
# plt.show(block=True)
# plt.savefig("Your-Graph.png")
# your_graph = Image.open("Your-Graph.png")
# your_graph.show()
# print("If you don't see a graph pop out please check the folder for a file named GRAPH.png")
# eng.say("If you don't see a graph pop out please check the folder for a file named GRAPH.png")
# eng.runAndWait()
# if inp1 == "no":
# pass
def time_up():
answer= None
option2 = "You failed to answer within the given time limit" #what to do if the player does not enter an answer within the allowed time
option1 = 'You failed to input an answer within the time limit'
optimize = 'You took too long to answer the time is over'
arr = [option1, option2, optimize]
r = random.choice(arr)
print(r)
speak(r)
def welcome():
print(Fore.CYAN+Back.BLACK+
"""
██╗ ██╗███████╗██╗ ██████╗ ██████╗ ███╗ ███╗███████╗
██║ ██║██╔════╝██║ ██╔════╝██╔═══██╗████╗ ████║██╔════╝
██║ █╗ ██║█████╗ ██║ ██║ ██║ ██║██╔████╔██║█████╗
██║███╗██║██╔══╝ ██║ ██║ ██║ ██║██║╚██╔╝██║██╔══╝
╚███╔███╔╝███████╗███████╗╚██████╗╚██████╔╝██║ ╚═╝ ██║███████╗
╚══╝╚══╝ ╚══════╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝
"""
)
def game_over():
print(Fore.CYAN + Back.BLACK +
"""
▄██████▄ ▄████████ ▄▄▄▄███▄▄▄▄ ▄████████ ▄██████▄ ▄█ █▄ ▄████████ ▄████████
███ ███ ███ ███ ▄██▀▀▀███▀▀▀██▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███
███ █▀ ███ ███ ███ ███ ███ ███ █▀ ███ ███ ███ ███ ███ █▀ ███ ███
▄███ ███ ███ ███ ███ ███ ▄███▄▄▄ ███ ███ ███ ███ ▄███▄▄▄ ▄███▄▄▄▄██▀
▀▀███ ████▄ ▀███████████ ███ ███ ███ ▀▀███▀▀▀ ███ ███ ███ ███ ▀▀███▀▀▀ ▀▀███▀▀▀▀▀
███ ███ ███ ███ ███ ███ ███ ███ █▄ ███ ███ ███ ███ ███ █▄ ▀███████████
███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███
████████▀ ███ █▀ ▀█ ███ █▀ ██████████ ▀██████▀ ▀██████▀ ██████████ ███ ███
███ ███
"""
)
def main():
while True:
print(Fore.YELLOW + "Choose one of the following options.")
print(Fore.RED + '''1 --> Basic Level (VEDIC MATHS)
2 --> Moderate Level(6TH -8TH STANDARD)
3 --> Advance Level(9TH TO 12TH STANDARD)
4 --> Everyday Mathematics
5 --> End Game''')
c = int(input())
if c == 1:
basic()
if c == 2:
mod()
if c == 3:
adv()
if c == 4 : # scr+scr1+scr2+scr3+scr4+scr5 is not working so removed
evd()
if c == 5
print( Fore.RED + 'You have successfully exited the game.')
speak("exit")
break
if __name__ == '__main__':
# text to speech engine
init()
eng = pyttsx3.init() # not making a function
volume = eng.getProperty('volume') # volume
print(Fore.RED + "Your current volume level is:" + str(volume))
vol_inp = float(input("We recommend you to set volume to max. Max volume is 1 and minimum is 0 you can choose either of them or you can choose between them using decimal: "))
eng.setProperty('volume', vol_inp) # max volume is 1.0
eng.setProperty('rate',132)
welcome()
main()
game_over()
|
# -*- coding: utf-8 -*-
"""Untitled4.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1jf4Y6d9_9hy0bUGm0I-xlzdKOJO0YVag
"""
import numpy as np
import matplotlib.pyplot as plt
def perpendicular(x_val, y_val):
"""
This function gives the intercepts and slope of the perpendicular bisector of a given line segment.
Parameters
----------
x_val, y_vals : coordinates of the two points of the line segments(x_val = [x1, x2], y_val = [y1, y2]).
Return
----------
cx, cy : intercepts of the perpendicular bisector.
slope : slope of perpendicular bisector.
"""
slope = (y_val[1] - y_val[0])/(x_val[1] - x_val[0])
q = (y_val[1] + y_val[0])/2
p = (x_val[1] + x_val[0])/2
slope = -1/slope
cy = -slope * p + q
cx = p - q/ slope
return cx, cy, slope
def slope(x_val, y_val):
"""
This function returns the slope of a line segment.
Parameters
----------
x_val, y_vals : coordinates of the two points of the line segments(x_val = [x1, x2], y_val = [y1, y2]).
Return
----------
slope of line segment.
"""
return ((y_val[1]-y_val[0])/(x_val[1]-x_val[0]))
def base_circle_centre(m, y, c):
"""
This function returns the intersection of perpendicular bisector with the base line.
Parameters
----------
m : slope
y : y-cooordinate of the base line.
c : y-intercept of the perpendicular bisector.
"""
return (y-c)/m
def circle_intersection(x_val, y_val, x1, y1, y0, r):
"""
This function returns the left most point of the intersection of the line starting from A\' parallel to O\'A and the circle.
Parameters
----------
x_val, y_val : coordinates of the two points of the line segments(x_val = [x1, x2], y_val = [y1, y2]).
x1, y1 : centre of the circle
y0 : y-intercept of the required line
r : radius of the circle
Return
----------
xf, yf : coordinates of the intersection
deg: angle between y-axis and the line joining origin and (xf, yf).
"""
m0 = slope([x_val[0], x_val[1]], [y_val[0], y_val[1]])
a = m0*(y0 - y1) - x1
c = m0*m0 + 1
b = np.sqrt(np.square(a)-(x1*x1 + (y0-y1)**2 - r*r)*c)
xp1 = (-a + b)/c
xp2 = (-a - b)/c
if xp1 < xp2:
xf = xp1
else:
xf = xp2
yf = m0*xf + y0
deg = np.arccos(yf/np.sqrt(yf**2 + xf**2))*180/np.pi
return xf, yf, deg
def plot_circle_diagram(ax, i_A=11, i_B=100, pfA=0.2, pfB=0.4, w_o=18920, w_sv=27172.17, scaler = 1, x=0.5):
"""
This function draws the circle diagram of the induction motor based on the values entered.
Paremeters
----------
ax : Matplotlib axis on which the diagram will be drawn.
i_A : no load current.
i_B : short circuit current.
pfA : power factor of no load test.
pfB : power factor of short circuit test.
w_o : power rating.
w_sv : power consumed.
x : rated cu loss factor.
Return
--------
None, the function draws the diagram on the axis in the window.
"""
scaler = 1
org = [0, 0]
iA = np.array([i_A * (np.sqrt(1-np.square(pfA))), i_A * pfA]) / scaler
iB = np.array([i_B * (np.sqrt(1-np.square(pfB))), i_B * pfB]) / scaler
lim = 1.5 * np.sqrt(iB[0]**2 + iB[1]**2)
ax.set_xlim([0, lim])
ax.set_ylim([0, lim])
ax.plot([org[0], iA[0]],[org[1], iA[1]])
ax.plot([org[0], iB[0]],[org[1], iB[1]])
ax.plot([iA[0], iB[0]],[iA[1], iB[1]])
ax.annotate('O\'', (iA[0], iA[1]))
ax.annotate('A', (iB[0], iB[1]))
ax.axhline(y= iA[1], xmin=iA[0]/lim, xmax=1, linestyle=':')
cx, cy, m = perpendicular([iA[0], iB[0]], [iA[1], iB[1]])
ax.plot([0, cx], [cy, 0], linestyle="--")
y1 = iA[1]
x1 = cx * (1 - (y1/cy))
theta = np.linspace(0, np.pi, 10000)
r = x1 - iA[0]
c = plt.Circle((x1, y1), radius= r, fill=False)
ax.add_patch(c)
ax.annotate('B', (iA[0]+2*r, iA[1]))
ax.annotate('C', (x1, y1))
ax.annotate('D', (iB[0], iA[1]))
ax.axvline(x=iB[0], ymax=iB[1]/lim)
ax.axvline(x=iB[0], ymin=iB[1]/lim, ymax=(iB[1]*(1+(w_o/w_sv)))/lim)
ax.plot([iA[0], iB[0]],[iA[1], (iA[1] + iB[1])*x])
ax.annotate('E', (iB[0], (iB[1]+iA[1])*x))
y0 = -slope([iA[0], iB[0]], [iA[1], iB[1]])*iB[0] + iB[1]*(1+(w_o/w_sv))
ax.plot([0,iB[0]], [y0, iB[1]*(1+(w_o/w_sv))])
ax.annotate('A\'', (iB[0],iB[1]*(1+(w_o/w_sv))))
xp, yp, deg = circle_intersection([iA[0], iB[0]], [iA[1], iB[1]], x1, y1, y0, r)
ax.plot([org[0], xp], [org[0], yp])
ax.axvline(x=xp, ymax= yp/lim)
ax.annotate('P', (xp,yp))
xq = xp
yq = ((iB[1]-iA[1])/(iB[0]-iA[0]))*(xp-iA[0])+iA[1]
ax.annotate('Q', (xq,yq))
xr = xp
yr = (((iB[1]-iA[1])*(1-x))/(iB[0]-iA[0]))*(xp-iA[0])+iA[1]
ax.annotate('R', (xr,yr))
ax.annotate('S', (xp,iA[1]))
ax.annotate('T', (xp,0))
iL = round(np.sqrt(xp**2 + yp**2), 2)
slip = round((yq-yr)/(yp-yr),3)
eff = round((yp-yq)/yp, 4) * 100
pf = round(yp/iL,2)
xt = lim - 40
yt = lim - 5
ax.text(xt, yt, 'Load Current = '+str(iL)+' A', size = 10)
ax.text(xt, yt-5, 'Slip = '+str(slip), size = 10)
ax.text(xt, yt-10, 'Efficiency = '+str(eff) + '%', size = 10)
ax.text(xt, yt-15, 'Power Factor = '+str(pf), size = 10)
|
"""Tests for Groebner bases. """
from sympy.polys.distributedpolys import (
sdp_from_dict,
)
from sympy.polys.groebnertools import (
sdp_groebner, sig, sig_key, sig_cmp,
lbp, lbp_cmp, lbp_key, critical_pair,
cp_cmp, cp_key, is_rewritable_or_comparable,
Sign, Polyn, Num, s_poly, f5_reduce,
_basis, _representing_matrices,
matrix_fglm,
)
from sympy.polys.monomialtools import (
lex, grlex, grevlex,
)
from sympy.polys.polyerrors import (
ExactQuotientFailed, DomainError,
)
from sympy.polys.domains import ZZ, QQ
from sympy import S, Symbol, symbols, groebner
from sympy.utilities.pytest import raises, skip, XFAIL
from sympy.polys.polyconfig import setup
def helper_test_sdp_groebner():
f = sdp_from_dict({(1,2): QQ(2,), (2,0): QQ(1)}, lex)
g = sdp_from_dict({(0,3): QQ(2), (1,1): QQ(1), (0,0): QQ(-1)}, lex)
a = sdp_from_dict({(1,0): QQ(1,1)}, lex)
b = sdp_from_dict({(0,3): QQ(1,1), (0,0): QQ(-1,2)}, lex)
assert sdp_groebner((f, g), 1, lex, QQ) == [a, b]
f = sdp_from_dict({(2,1): QQ(2,), (0,2): QQ(1)}, lex)
g = sdp_from_dict({(3,0): QQ(2), (1,1): QQ(1), (0,0): QQ(-1)}, lex)
a = sdp_from_dict({(0,1): QQ(1,1)}, lex)
b = sdp_from_dict({(3,0): QQ(1,1), (0,0): QQ(-1,2)}, lex)
assert sdp_groebner((f, g), 1, lex, QQ) == [b, a]
f = sdp_from_dict({(0,0,2): QQ(-1), (1,0,0): QQ(1)}, lex)
g = sdp_from_dict({(0,0,3): QQ(-1), (0,1,0): QQ(1)}, lex)
assert sdp_groebner((f, g), 1, lex, QQ) == [f, g]
f = sdp_from_dict({(3,0): QQ(1), (1,1): QQ(-2)}, grlex)
g = sdp_from_dict({(2,1): QQ(1), (0,2): QQ(-2), (1,0): QQ(1)}, grlex)
a = sdp_from_dict({(2,0): QQ(1)}, grlex)
b = sdp_from_dict({(1,1): QQ(1)}, grlex)
c = sdp_from_dict({(0,2): QQ(1), (1, 0): QQ(-1,2)}, grlex)
assert sdp_groebner((f, g), 1, grlex, QQ) == [a, b, c]
f = sdp_from_dict({(2,0,0): -QQ(1), (0,1,0): QQ(1)}, lex)
g = sdp_from_dict({(3,0,0): -QQ(1), (0,0,1): QQ(1)}, lex)
assert sdp_groebner((f, g), 2, lex, QQ) == [
sdp_from_dict({(2,0,0): QQ(1), (0,1,0): -QQ(1)}, lex),
sdp_from_dict({(1,1,0): QQ(1), (0,0,1): -QQ(1)}, lex),
sdp_from_dict({(1,0,1): QQ(1), (0,2,0): -QQ(1)}, lex),
sdp_from_dict({(0,3,0): QQ(1), (0,0,2): -QQ(1)}, lex),
]
f = sdp_from_dict({(2,0,0): -QQ(1), (0,1,0): QQ(1)}, grlex)
g = sdp_from_dict({(3,0,0): -QQ(1), (0,0,1): QQ(1)}, grlex)
assert sdp_groebner((f, g), 2, grlex, QQ) == [
sdp_from_dict({(0,3,0): QQ(1), (0,0,2): -QQ(1)}, grlex),
sdp_from_dict({(2,0,0): QQ(1), (0,1,0): -QQ(1)}, grlex),
sdp_from_dict({(1,1,0): QQ(1), (0,0,1): -QQ(1)}, grlex),
sdp_from_dict({(1,0,1): QQ(1), (0,2,0): -QQ(1)}, grlex),
]
f = sdp_from_dict({(2,0,0): -QQ(1), (0,0,1): QQ(1)}, lex)
g = sdp_from_dict({(3,0,0): -QQ(1), (0,1,0): QQ(1)}, lex)
assert sdp_groebner((f, g), 2, lex, QQ) == [
sdp_from_dict({(2,0,0): QQ(1), (0,0,1): -QQ(1)}, lex),
sdp_from_dict({(1,1,0): QQ(1), (0,0,2): -QQ(1)}, lex),
sdp_from_dict({(1,0,1): QQ(1), (0,1,0): -QQ(1)}, lex),
sdp_from_dict({(0,2,0): QQ(1), (0,0,3): -QQ(1)}, lex),
]
f = sdp_from_dict({(2,0,0): -QQ(1), (0,0,1): QQ(1)}, grlex)
g = sdp_from_dict({(3,0,0): -QQ(1), (0,1,0): QQ(1)}, grlex)
assert sdp_groebner((f, g), 2, grlex, QQ) == [
sdp_from_dict({(0,0,3): QQ(1), (0,2,0): -QQ(1)}, grlex),
sdp_from_dict({(2,0,0): QQ(1), (0,0,1): -QQ(1)}, grlex),
sdp_from_dict({(1,1,0): QQ(1), (0,0,2): -QQ(1)}, grlex),
sdp_from_dict({(1,0,1): QQ(1), (0,1,0): -QQ(1)}, grlex),
]
f = sdp_from_dict({(0,2,0): -QQ(1), (1,0,0): QQ(1)}, lex)
g = sdp_from_dict({(0,3,0): -QQ(1), (0,0,1): QQ(1)}, lex)
assert sdp_groebner((f, g), 2, lex, QQ) == [
sdp_from_dict({(1,0,0): QQ(1), (0,2,0): -QQ(1)}, lex),
sdp_from_dict({(0,3,0): QQ(1), (0,0,1): -QQ(1)}, lex),
]
f = sdp_from_dict({(0,2,0): -QQ(1), (1,0,0): QQ(1)}, grlex)
g = sdp_from_dict({(0,3,0): -QQ(1), (0,0,1): QQ(1)}, grlex)
assert sdp_groebner((f, g), 2, grlex, QQ) == [
sdp_from_dict({(2,0,0): QQ(1), (0,1,1): -QQ(1)}, grlex),
sdp_from_dict({(1,1,0): QQ(1), (0,0,1): -QQ(1)}, grlex),
sdp_from_dict({(0,2,0): QQ(1), (1,0,0): -QQ(1)}, grlex),
]
f = sdp_from_dict({(0,0,2): -QQ(1), (1,0,0): QQ(1)}, lex)
g = sdp_from_dict({(0,0,3): -QQ(1), (0,1,0): QQ(1)}, lex)
assert sdp_groebner((f, g), 2, lex, QQ) == [
sdp_from_dict({(1,0,0): QQ(1), (0,0,2): -QQ(1)}, lex),
sdp_from_dict({(0,1,0): QQ(1), (0,0,3): -QQ(1)}, lex),
]
f = sdp_from_dict({(0,0,2): -QQ(1), (1,0,0): QQ(1)}, grlex)
g = sdp_from_dict({(0,0,3): -QQ(1), (0,1,0): QQ(1)}, grlex)
assert sdp_groebner((f, g), 2, grlex, QQ) == [
sdp_from_dict({(2,0,0): QQ(1), (0,1,1): -QQ(1)}, grlex),
sdp_from_dict({(1,0,1): QQ(1), (0,1,0): -QQ(1)}, grlex),
sdp_from_dict({(0,0,2): QQ(1), (1,0,0): -QQ(1)}, grlex),
]
f = sdp_from_dict({(0,2,0): -QQ(1), (0,0,1): QQ(1)}, lex)
g = sdp_from_dict({(0,3,0): -QQ(1), (1,0,0): QQ(1)}, lex)
assert sdp_groebner((f, g), 2, lex, QQ) == [
sdp_from_dict({(1,0,0): QQ(1), (0,1,1): -QQ(1)}, lex),
sdp_from_dict({(0,2,0): QQ(1), (0,0,1): -QQ(1)}, lex),
]
f = sdp_from_dict({(0,2,0): -QQ(1), (0,0,1): QQ(1)}, grlex)
g = sdp_from_dict({(0,3,0): -QQ(1), (1,0,0): QQ(1)}, grlex)
assert sdp_groebner((f, g), 2, grlex, QQ) == [
sdp_from_dict({(0,0,3): QQ(1), (2,0,0): -QQ(1)}, grlex),
sdp_from_dict({(1,1,0): QQ(1), (0,0,2): -QQ(1)}, grlex),
sdp_from_dict({(0,2,0): QQ(1), (0,0,1): -QQ(1)}, grlex),
sdp_from_dict({(0,1,1): QQ(1), (1,0,0): -QQ(1)}, grlex),
]
f = sdp_from_dict({(0,0,2): -QQ(1), (0,1,0): QQ(1)}, lex)
g = sdp_from_dict({(0,0,3): -QQ(1), (1,0,0): QQ(1)}, lex)
assert sdp_groebner((f, g), 2, lex, QQ) == [
sdp_from_dict({(1,0,0): QQ(1), (0,0,3): -QQ(1)}, lex),
sdp_from_dict({(0,1,0): QQ(1), (0,0,2): -QQ(1)}, lex),
]
f = sdp_from_dict({(0,0,2): -QQ(1), (0,1,0): QQ(1)}, grlex)
g = sdp_from_dict({(0,0,3): -QQ(1), (1,0,0): QQ(1)}, grlex)
assert sdp_groebner((f, g), 2, grlex, QQ) == [
sdp_from_dict({(0,3,0): QQ(1), (2,0,0): -QQ(1)}, grlex),
sdp_from_dict({(1,0,1): QQ(1), (0,2,0): -QQ(1)}, grlex),
sdp_from_dict({(0,1,1): QQ(1), (1,0,0): -QQ(1)}, grlex),
sdp_from_dict({(0,0,2): QQ(1), (0,1,0): -QQ(1)}, grlex),
]
f = sdp_from_dict({(2,2): QQ(4), (1,1): QQ(4), (0,0): QQ(1)}, lex)
g = sdp_from_dict({(2,0): QQ(1), (0,2): QQ(1), (0,0):-QQ(1)}, lex)
assert sdp_groebner((f, g), 1, lex, QQ) == [
sdp_from_dict({(1,0): QQ(1,1), (0,7): QQ(-4,1), (0,5): QQ(8,1), (0,3): QQ(-7,1), (0,1): QQ(3,1)}, lex),
sdp_from_dict({(0,8): QQ(1,1), (0,6): QQ(-2,1), (0,4): QQ(3,2), (0,2): QQ(-1,2), (0,0): QQ(1,16)}, lex),
]
raises(DomainError, "sdp_groebner([], 1, lex, ZZ)")
def test_sdp_groebner():
setup('GB_METHOD', 'f5b')
helper_test_sdp_groebner()
setup('GB_METHOD', 'buchberger')
helper_test_sdp_groebner()
def helper_test_benchmark_minpoly():
x, y, z = symbols('x,y,z')
I = [x**3 + x + 1, y**2 + y + 1, (x + y) * z - (x**2 + y)]
assert groebner(I, x, y, z, order='lex') == [
-975 + 2067*x + 6878*z - 11061*z**2 + 6062*z**3 - 1065*z**4 + 155*z**5,
-308 + 159*y + 1043*z - 1161*z**2 + 523*z**3 - 91*z**4 + 12*z**5,
13 - 46*z + 89*z**2 - 82*z**3 + 41*z**4 - 7*z**5 + z**6,
]
assert groebner(I, x, y, z, order='lex', field=True) == [
-S(25)/53 + x + 6878*z/2067 - 3687*z**2/689 + 6062*z**3/2067 - 355*z**4/689 + 155*z**5/2067,
-S(308)/159 + y + 1043*z/159 - 387*z**2/53 + 523*z**3/159 - 91*z**4/159 + 4*z**5/53,
13 - 46*z + 89*z**2 - 82*z**3 + 41*z**4 - 7*z**5 + z**6,
]
def test_benchmark_minpoly():
setup('GB_METHOD', 'f5b')
helper_test_benchmark_minpoly()
setup('GB_METHOD', 'buchberger')
helper_test_benchmark_minpoly()
@XFAIL
def test_benchmark_coloring():
skip('takes too much time')
V = range(1, 12+1)
E = [(1,2),(2,3),(1,4),(1,6),(1,12),(2,5),(2,7),(3,8),(3,10),
(4,11),(4,9),(5,6),(6,7),(7,8),(8,9),(9,10),(10,11),
(11,12),(5,12),(5,9),(6,10),(7,11),(8,12),(3,4)]
V = [Symbol('x' + str(i)) for i in V]
E = [(V[i-1], V[j-1]) for i, j in E]
x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12 = V
I3 = [x**3 - 1 for x in V]
Ig = [x**2 + x*y + y**2 for x, y in E]
I = I3 + Ig
assert groebner(I[:-1], V, order='lex') == [
x1 + x11 + x12,
x2 - x11,
x3 - x12,
x4 - x12,
x5 + x11 + x12,
x6 - x11,
x7 - x12,
x8 + x11 + x12,
x9 - x11,
x10 + x11 + x12,
x11**2 + x11*x12 + x12**2,
x12**3 - 1,
]
assert groebner(I, V, order='lex') == [1]
def helper_test_benchmark_katsura_3():
x0, x1, x2 = symbols('x:3')
I = [x0 + 2*x1 + 2*x2 - 1,
x0**2 + 2*x1**2 + 2*x2**2 - x0,
2*x0*x1 + 2*x1*x2 - x1]
assert groebner(I, x0, x1, x2, order='lex') == [
-7 + 7*x0 + 8*x2 + 158*x2**2 - 420*x2**3,
7*x1 + 3*x2 - 79*x2**2 + 210*x2**3,
x2 + x2**2 - 40*x2**3 + 84*x2**4,
]
assert groebner(I, x0, x1, x2, order='grlex') == [
7*x1 + 3*x2 - 79*x2**2 + 210*x2**3,
-x1 + x2 - 3*x2**2 + 5*x1**2,
-x1 - 4*x2 + 10*x1*x2 + 12*x2**2,
-1 + x0 + 2*x1 + 2*x2,
]
def test_benchmark_katsura3():
setup('GB_METHOD', 'f5b')
helper_test_benchmark_katsura_3()
setup('GB_METHOD', 'buchberger')
helper_test_benchmark_katsura_3()
def helper_test_benchmark_katsura_4():
x0, x1, x2, x3 = symbols('x:4')
I = [x0 + 2*x1 + 2*x2 + 2*x3 - 1,
x0**2 + 2*x1**2 + 2*x2**2 + 2*x3**2 - x0,
2*x0*x1 + 2*x1*x2 + 2*x2*x3 - x1,
x1**2 + 2*x0*x2 + 2*x1*x3 - x2]
assert groebner(I, x0, x1, x2, x3, order='lex') == [
5913075*x0 - 159690237696*x3**7 + 31246269696*x3**6 + 27439610544*x3**5 - 6475723368*x3**4 - 838935856*x3**3 + 275119624*x3**2 + 4884038*x3 - 5913075,
1971025*x1 - 97197721632*x3**7 + 73975630752*x3**6 - 12121915032*x3**5 - 2760941496*x3**4 + 814792828*x3**3 - 1678512*x3**2 - 9158924*x3,
5913075*x2 + 371438283744*x3**7 - 237550027104*x3**6 + 22645939824*x3**5 + 11520686172*x3**4 - 2024910556*x3**3 - 132524276*x3**2 + 30947828*x3,
128304*x3**8 - 93312*x3**7 + 15552*x3**6 + 3144*x3**5 - 1120*x3**4 + 36*x3**3 + 15*x3**2 - x3,
]
assert groebner(I, x0, x1, x2, x3, order='grlex') == [
393*x1 - 4662*x2**2 + 4462*x2*x3 - 59*x2 + 224532*x3**4 - 91224*x3**3 - 678*x3**2 + 2046*x3,
-x1 + 196*x2**3 - 21*x2**2 + 60*x2*x3 - 18*x2 - 168*x3**3 + 83*x3**2 - 9*x3,
-6*x1 + 1134*x2**2*x3 - 189*x2**2 - 466*x2*x3 + 32*x2 - 630*x3**3 + 57*x3**2 + 51*x3,
33*x1 + 63*x2**2 + 2268*x2*x3**2 - 188*x2*x3 + 34*x2 + 2520*x3**3 - 849*x3**2 + 3*x3,
7*x1**2 - x1 - 7*x2**2 - 24*x2*x3 + 3*x2 - 15*x3**2 + 5*x3,
14*x1*x2 - x1 + 14*x2**2 + 18*x2*x3 - 4*x2 + 6*x3**2 - 2*x3,
14*x1*x3 - x1 + 7*x2**2 + 32*x2*x3 - 4*x2 + 27*x3**2 - 9*x3,
x0 + 2*x1 + 2*x2 + 2*x3 - 1,
]
def test_benchmark_kastura_4():
setup('GB_METHOD', 'f5b')
helper_test_benchmark_katsura_4()
setup('GB_METHOD', 'buchberger')
helper_test_benchmark_katsura_4()
def helper_test_benchmark_czichowski():
x, t = symbols('x t')
I = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9, (-72 - 72*t)*x**7 + (-256 - 252*t)*x**6 + (192 + 192*t)*x**5 + (1280 + 1260*t)*x**4 + (312 + 312*t)*x**3 + (-404*t)*x**2 + (-576 - 576*t)*x + 96 + 108*t]
assert groebner(I, x, t, order='lex') == [
-160420835591776763325581422211936558925462474417709511019228211783493866564923546661604487873*t**7 - 1406108495478033395547109582678806497509499966197028487131115097902188374051595011248311352864*t**6 - 5241326875850889518164640374668786338033653548841427557880599579174438246266263602956254030352*t**5 - 10758917262823299139373269714910672770004760114329943852726887632013485035262879510837043892416*t**4 - 13119383576444715672578819534846747735372132018341964647712009275306635391456880068261130581248*t**3 - 9491412317016197146080450036267011389660653495578680036574753839055748080962214787557853941760*t**2 - 3767520915562795326943800040277726397326609797172964377014046018280260848046603967211258368000*t + 3725588592068034903797967297424801242396746870413359539263038139343329273586196480000*x - 632314652371226552085897259159210286886724229880266931574701654721512325555116066073245696000,
610733380717522355121*t**8 + 6243748742141230639968*t**7 + 27761407182086143225024*t**6 + 70066148869420956398592*t**5 + 109701225644313784229376*t**4 + 109009005495588442152960*t**3 + 67072101084384786432000*t**2 + 23339979742629593088000*t + 3513592776846090240000
]
assert groebner(I, x, t, order='grlex') == [
16996618586000601590732959134095643086442*t**3*x - 32936701459297092865176560282688198064839*t**3 + 78592411049800639484139414821529525782364*t**2*x - 120753953358671750165454009478961405619916*t**2 + 120988399875140799712152158915653654637280*t*x - 144576390266626470824138354942076045758736*t + 60017634054270480831259316163620768960*x**2 + 61976058033571109604821862786675242894400*x - 56266268491293858791834120380427754600960,
576689018321912327136790519059646508441672750656050290242749*t**4 + 2326673103677477425562248201573604572527893938459296513327336*t**3 + 110743790416688497407826310048520299245819959064297990236000*t**2*x + 3308669114229100853338245486174247752683277925010505284338016*t**2 + 323150205645687941261103426627818874426097912639158572428800*t*x + 1914335199925152083917206349978534224695445819017286960055680*t + 861662882561803377986838989464278045397192862768588480000*x**2 + 235296483281783440197069672204341465480107019878814196672000*x + 361850798943225141738895123621685122544503614946436727532800,
-117584925286448670474763406733005510014188341867*t**3 + 68566565876066068463853874568722190223721653044*t**2*x - 435970731348366266878180788833437896139920683940*t**2 + 196297602447033751918195568051376792491869233408*t*x - 525011527660010557871349062870980202067479780112*t + 517905853447200553360289634770487684447317120*x**3 + 569119014870778921949288951688799397569321920*x**2 + 138877356748142786670127389526667463202210102080*x - 205109210539096046121625447192779783475018619520,
-3725142681462373002731339445216700112264527*t**3 + 583711207282060457652784180668273817487940*t**2*x - 12381382393074485225164741437227437062814908*t**2 + 151081054097783125250959636747516827435040*t*x**2 + 1814103857455163948531448580501928933873280*t*x - 13353115629395094645843682074271212731433648*t + 236415091385250007660606958022544983766080*x**2 + 1390443278862804663728298060085399578417600*x - 4716885828494075789338754454248931750698880
]
@XFAIL
def test_benchmark_czichowski():
skip('This takes too much time (without gmpy)')
setup('GB_METHOD', 'f5b')
helper_test_benchmark_czichowski()
setup('GB_METHOD', 'buchberger')
helper_test_benchmark_czichowski()
def helper_test_benchmark_cyclic_4():
a, b, c, d = symbols('a b c d')
I = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
assert groebner(I, a, b, c, d, order='lex') == [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d, d**12 - d**8 - d**4 + 1
]
assert groebner(I, a, b, c, d, order='grlex') == [
3*b*c - c**2 + d**6 - 3*d**2,
-b + 3*c**2*d**3 - c - d**5 - 4*d,
-b + 3*c*d**4 + 2*c + 2*d**5 + 2*d,
c**4 + 2*c**2*d**2 - d**4 - 2,
c**3*d + c*d**3 + d**4 + 1,
b*c**2 - c**3 - c**2*d - 2*c*d**2 - d**3,
b**2 - c**2, b*d + c**2 + c*d + d**2,
a + b + c + d
]
def test_benchmark_cyclic_4():
setup('GB_METHOD', 'f5b')
helper_test_benchmark_cyclic_4()
setup('GB_METHOD', 'buchberger')
helper_test_benchmark_cyclic_4()
def test_sig_key():
s1 = sig((0,) * 3, 2)
s2 = sig((1,) * 3, 4)
s3 = sig((2,) * 3, 2)
assert sig_key(s1, lex) > sig_key(s2, lex)
assert sig_key(s2, lex) < sig_key(s3, lex)
def test_lbp_key():
p1 = lbp(sig((0,) * 4, 3), [], 12)
p2 = lbp(sig((0,) * 4, 4), [], 13)
p3 = lbp(sig((0,) * 4, 4), [], 12)
assert lbp_key(p1, lex) > lbp_key(p2, lex)
assert lbp_key(p2, lex) < lbp_key(p3, lex)
def test_critical_pair():
# from cyclic4 with grlex
p1 = (((0, 0, 0, 0), 4), [((0, 1, 1, 2), QQ(1,1)), ((0, 0, 2, 2), QQ(1,1)), ((0, 0, 0, 4), QQ(-1,1)), ((0, 0, 0, 0), QQ(-1,1))], 4)
q1 = (((0, 0, 0, 0), 2), [((0, 2, 0, 0), QQ(-1,1)), ((0, 1, 0, 1), QQ(-1,1)), ((0, 0, 1, 1), QQ(-1,1)), ((0, 0, 0, 2), QQ(-1,1))], 2)
p2 = (((0, 0, 0, 2), 3), [((0, 0, 3, 2), QQ(1,1)), ((0, 0, 2, 3), QQ(1,1)), ((0, 0, 1, 0), QQ(-1,1)), ((0, 0, 0, 1), QQ(-1,1))], 5)
q2 = (((0, 0, 2, 2), 2), [((0, 0, 1, 5), QQ(1,1)), ((0, 0, 0, 6), QQ(1,1)), ((0, 1, 1, 0), QQ(1,1)), ((0, 0, 1, 1), QQ(1,1))], 13)
assert critical_pair(p1, q1, 3, grlex, QQ) == (((0, 0, 1, 2), 2), ((0, 0, 1, 2), QQ(-1,1)), (((0, 0, 0, 0), 2), [((0, 2, 0, 0), QQ(-1,1)), ((0, 1, 0, 1), QQ(-1,1)), ((0, 0, 1, 1), QQ(-1,1)), ((0, 0, 0, 2), QQ(-1,1))], 2), ((0, 1, 0, 0), 4), ((0, 1, 0, 0), QQ(1,1)), (((0, 0, 0, 0), 4), [((0, 1, 1, 2), QQ(1,1)), ((0, 0, 2, 2), QQ(1,1)), ((0, 0, 0, 4), QQ(-1,1)), ((0, 0, 0, 0), QQ(-1,1))], 4))
assert critical_pair(p2, q2, 3, grlex, QQ) == (((0, 0, 4, 2), 2), ((0, 0, 2, 0), QQ(1,1)), (((0, 0, 2, 2), 2), [((0, 0, 1, 5), QQ(1,1)), ((0, 0, 0, 6), QQ(1,1)), ((0, 1, 1, 0), QQ(1,1)), ((0, 0, 1, 1), QQ(1,1))], 13), ((0, 0, 0, 5), 3), ((0, 0, 0, 3), QQ(1,1)), (((0, 0, 0, 2), 3), [((0, 0, 3, 2), QQ(1,1)), ((0, 0, 2, 3), QQ(1,1)), ((0, 0, 1, 0), QQ(-1,1)), ((0, 0, 0, 1), QQ(-1,1))], 5))
def test_cp_key():
# from cyclic4 with grlex
p1 = (((0, 0, 0, 0), 4), [((0, 1, 1, 2), QQ(1,1)), ((0, 0, 2, 2), QQ(1,1)), ((0, 0, 0, 4), QQ(-1,1)), ((0, 0, 0, 0), QQ(-1,1))], 4)
q1 = (((0, 0, 0, 0), 2), [((0, 2, 0, 0), QQ(-1,1)), ((0, 1, 0, 1), QQ(-1,1)), ((0, 0, 1, 1), QQ(-1,1)), ((0, 0, 0, 2), QQ(-1,1))], 2)
p2 = (((0, 0, 0, 2), 3), [((0, 0, 3, 2), QQ(1,1)), ((0, 0, 2, 3), QQ(1,1)), ((0, 0, 1, 0), QQ(-1,1)), ((0, 0, 0, 1), QQ(-1,1))], 5)
q2 = (((0, 0, 2, 2), 2), [((0, 0, 1, 5), QQ(1,1)), ((0, 0, 0, 6), QQ(1,1)), ((0, 1, 1, 0), QQ(1,1)), ((0, 0, 1, 1), QQ(1,1))], 13)
cp1 = critical_pair(p1, q1, 3, grlex, QQ)
cp2 = critical_pair(p2, q2, 3, grlex, QQ)
assert cp_key(cp1, grlex) < cp_key(cp2, grlex)
cp1 = critical_pair(p1, p2, 3, grlex, QQ)
cp2 = critical_pair(q1, q2, 3, grlex, QQ)
assert cp_key(cp1, grlex) < cp_key(cp2, grlex)
def test_is_rewritable_or_comparable():
# from katsura4 with grlex
p = lbp(sig((0, 0, 2, 1), 2), [], 2)
B = [lbp(sig((0, 0, 0, 1), 2), [((0, 0, 2, 1), QQ(1,1)), ((0, 0, 1, 2), QQ(76,35)), ((0, 0, 0, 3), QQ(13,7)), ((0, 2, 0, 0), QQ(2,45)), ((0, 1, 1, 0), QQ(1,5)), ((0, 1, 0, 1), QQ(5,63)), ((0, 0, 2, 0), QQ(4,45)), ((0, 0, 1, 1), QQ(-32,105)), ((0, 0, 0, 2), QQ(-13,21))], 6)]
# rewritable:
assert is_rewritable_or_comparable(Sign(p), Num(p), B, 3, QQ) == True
p = lbp(sig((0, 1, 1, 0), 2), [], 7)
B = [lbp(sig((0, 0, 0, 0), 3), [((0, 1, 1, 0), QQ(10,3)), ((0, 1, 0, 1), QQ(4,3)), ((0, 0, 2, 0), QQ(4,1)), ((0, 0, 1, 1), QQ(22,3)), ((0, 0, 0, 2), QQ(4,1)), ((0, 1, 0, 0), QQ(-1,3)), ((0, 0, 1, 0), QQ(-4,3)), ((0, 0, 0, 1), QQ(-4,3))], 3)]
# comparable:
assert is_rewritable_or_comparable(Sign(p), Num(p), B, 3, QQ) == True
def test_f5_reduce():
# katsura3 with lex
F = [(((0, 0, 0), 1), [((1, 0, 0), QQ(1,1)), ((0, 1, 0), QQ(2,1)), ((0, 0, 1), QQ(2,1)), ((0, 0, 0), QQ(-1,1))], 1), (((0, 0, 0), 2), [((0, 2, 0), QQ(6,1)), ((0, 1, 1), QQ(8,1)), ((0, 1, 0), QQ(-2,1)), ((0, 0, 2), QQ(6,1)), ((0, 0, 1), QQ(-2,1))], 2), (((0, 0, 0), 3), [((0, 1, 1), QQ(10,3)), ((0, 1, 0), QQ(-1,3)), ((0, 0, 2), QQ(4,1)), ((0, 0, 1), QQ(-4,3))], 3), (((0, 0, 1), 2), [((0, 1, 0), QQ(1,1)), ((0, 0, 3), QQ(30,1)), ((0, 0, 2), QQ(-79,7)), ((0, 0, 1), QQ(3,7))], 4), (((0, 0, 2), 2), [((0, 0, 4), QQ(1,1)), ((0, 0, 3), QQ(-10,21)), ((0, 0, 2), QQ(1,84)), ((0, 0, 1), QQ(1,84))], 5)]
cp = critical_pair(F[0], F[1], 2, lex, QQ)
s = s_poly(cp, 2, lex, QQ)
assert f5_reduce(s, F, 2, lex, QQ) == (((0, 2, 0), 1), [], 1)
s = lbp(sig(Sign(s)[0], 100), Polyn(s), Num(s))
assert f5_reduce(s, F, 2, lex, QQ) == s
def test_matrix_fglm():
pass # see test_polytools.py
def test_representing_matrices():
basis = [(0, 0), (0, 1), (1, 0), (1, 1)]
F = [[((2, 0), QQ(1,1)), ((1, 0), QQ(-1,1)), ((0, 1), QQ(-3,1)), ((0, 0), QQ(1,1))],
[((0, 2), QQ(1,1)), ((1, 0), QQ(-2,1)), ((0, 1), QQ(1,1)), ((0, 0), QQ(-1,1))]]
assert _representing_matrices(basis, F, 1, grlex, QQ) ==[ \
[[QQ(0,1), QQ(0,1), QQ(-1,1), QQ(3,1)],
[QQ(0,1), QQ(0,1), QQ(3,1), QQ(-4,1)],
[QQ(1,1), QQ(0,1), QQ(1,1), QQ(6,1)],
[QQ(0,1), QQ(1,1), QQ(0,1), QQ(1,1)]],
[[QQ(0,1), QQ(1,1), QQ(0,1), QQ(-2,1)],
[QQ(1,1), QQ(-1,1), QQ(0,1), QQ(6,1)],
[QQ(0,1), QQ(2,1), QQ(0,1), QQ(3,1)],
[QQ(0,1), QQ(0,1), QQ(1,1), QQ(-1,1)]]]
|
import sys
import math
import random
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import treelib
from pandas.core.frame import DataFrame
from scipy.spatial import distance_matrix
from sklearn.manifold import MDS
from treelib.node import Node
from treelib.tree import Tree
from src.taxodist import cs_algorithms
from src.taxodist import ic_algorithms
from src.taxodist import setsim_algorithms
from numpy import ndarray
max_ic = None
def iterateOverDiags(parent: ET.Element, parent_node: Node, tree: Tree):
for diag in parent.iter('diag'):
diag_name = diag.find('name').text
if not tree.contains(diag_name):
diag_node = tree.create_node(diag_name, diag_name, parent=parent_node)
iterateOverDiags(diag,diag_node,tree)
def getIC(concept: str, tree: Tree, ic_mode: str):
"""
Returns information content of a given concept
based on the IC algorthms from https://doi.org/10.1186/s12911-019-0807-y
"""
try:
if ic_mode == 'levels':
# IC calculation based on Boriah et al. https://doi.org/10.1137/1.9781611972788.22
return tree.depth(concept)
elif ic_mode == 'sanchez':
return ic_algorithms.getICSanchez(concept,tree)
else:
raise ValueError('Unsupported IC-mode: ',ic_mode)
except ValueError as err:
print(err.args)
sys.exit()
def getLCA(concept1: str, concept2: str, tree: Tree, ic_mode: str) -> str:
"""Return lowest common ancester of two concepts."""
lca = 0
ca = list(getAncestors(concept1, tree).intersection(getAncestors(concept2, tree)))
if len(ca) != 0:
lca = ca[0]
for concept in ca:
# TODO discuss: does this make sense? cant i just take the depth?
if getIC(concept, tree, ic_mode) > getIC(lca, tree, ic_mode):
lca = concept
return lca
def getAncestors(concept: str, tree: Tree):
"""Return the ancestors of a concept in a given tree"""
if concept == tree.root:
return set()
ancestors = []
parent: Node = tree.parent(concept)
while tree.depth(parent.identifier) >= 0:
ancestors.append(parent.identifier)
if parent.is_root():
return set(ancestors)
parent = tree.parent(parent.identifier)
return set(ancestors)
def getShortestPath(concept1: str, concept2: str, depth_lca: int, tree: Tree):
depth_concept1 = tree.level(concept1)
depth_concept2 = tree.level(concept2)
return depth_concept1 + depth_concept2 - 2*depth_lca
def getCS(concept1: str, concept2: str, tree: Tree, depth: int,ic_mode: str,cs_mode: str):
"""Returns concept similarity of two concepts based on CS-algorithms from https://doi.org/10.1186/s12911-019-0807-y"""
if concept1 == concept2 and ( cs_mode == 'wu_palmer' or cs_mode == 'simple_wu_palmer' ):
return 1.0
lca = getLCA(concept1, concept2, tree, ic_mode)
ic_lca = getIC(lca, tree,ic_mode)
ic_1 = getIC(concept1,tree,ic_mode)
ic_2 = getIC(concept2,tree,ic_mode)
try:
if cs_mode == 'wu_palmer':
return cs_algorithms.getCSWuPalmer(ic_1,ic_2,ic_lca)
elif cs_mode == 'li':
return cs_algorithms.getCSLi(ic_1,ic_2,ic_lca)
elif cs_mode == 'simple_wu_palmer':
return cs_algorithms.getCSSimpleWuPalmer(ic_lca,depth)
elif cs_mode == 'leacock_chodorow':
return cs_algorithms.getCSLeacockChodorow(ic_1,ic_2,ic_lca,ic_mode,tree,depth)
elif cs_mode == 'nguyen_almubaid':
return cs_algorithms.getCSNguyenAlMubaid(concept1, concept2, lca, tree, depth)
elif cs_mode == 'batet':
return cs_algorithms.getCSBatet(concept1, concept2, tree)
else:
raise ValueError('Unsupported CS-mode: ',cs_mode)
except ValueError as err:
print(err.args)
sys.exit()
###### ONLY PYTHON >= 3.10.0 #####
# match cs_mode:
# # CS1
# case 'binary':
# return int(concept1==concept2)
# # CS2
# case 'wu_palmer':
# return 1 - (2*ic_lca)/(ic_1+ic_2)
# # CS 3
# case 'li':
# return 1 - math.exp( 0.2*(ic_1 + ic_2 - 2*ic_lca) )*
# # CS4
# case 'simple_wu_palmer':
# return (depth - ic_lca)/(depth - 1)
def getSetSim(concepts_1: set, concepts_2: set, setsim: str, tree: Tree, cs_mode: str, ic_mode: str) -> float:
try:
if len(concepts_1) != 0 and len(concepts_2) != 0:
if setsim == 'jaccard':
return setsim_algorithms.getJaccardSetSim(concepts_1, concepts_2)
elif setsim == 'dice':
return setsim_algorithms.getDiceSetSim(concepts_1, concepts_2)
elif setsim == 'cosine':
return setsim_algorithms.getCosineSetSim(concepts_1, concepts_2)
elif setsim == 'overlap':
return setsim_algorithms.getOverlapSetSim(concepts_1, concepts_2)
elif setsim == 'mean_cs':
return setsim_algorithms.getMeanCSSetSim(concepts_1, concepts_2, tree, cs_mode, ic_mode)
elif setsim == 'hierarchical':
return setsim_algorithms.getHierachicalDistSetSim(concepts_1, concepts_2, tree, cs_mode, ic_mode)
elif setsim == 'bipartite_matching':
return setsim_algorithms.getMaxWeightedBipartiteMatchingSim(concepts_1,concepts_2,tree,ic_mode,cs_mode)
else:
raise ValueError("Unsupported setsim algorithm: ", setsim)
else:
raise ValueError('Empty Concept Set(s)')
except ValueError as err:
print(err.args)
sys.exit()
def getAllConcepts(tree: Tree):
all_concepts = []
for node in tree.all_nodes():
all_concepts.append(node.identifier)
all_concepts.remove(0)
return all_concepts
def getDistMatrix(concepts: list, tree: Tree, worker_index, max_workers,ic_mode,cs_mode):
"""
Function for the parallelized processes. \n
Computes the part of the (absolute) distance matrix of the given concepts,
that corresponds to the worker index of the calling process.
"""
depth = tree.depth()
length = len(concepts)
start = getStart(worker_index, max_workers, length)
stop = getStop(worker_index, max_workers, length)
dist_matrix = np.zeros(shape=(stop-start, length))
i = 0
for concept1 in concepts[start:stop]:
concept1_index = concepts.index(concept1)
for concept2 in concepts[concept1_index:]:
cs = getCS(concept1, concept2, tree,depth,ic_mode,cs_mode)
# safe CS values in matrix (only upper triangular)
dist_matrix[i, concepts.index(concept2)] = cs
i+=1
return dist_matrix, worker_index
def getStop(worker_index, max_workers, length):
"""Returns logarithmically spaced stop index"""
if worker_index == max_workers:
return length
return getStart(worker_index + 1, max_workers, length)
def getStart(worker_index, max_workers, length):
'''Returns logarithmically spaced start index'''
logspace = getSpacing(max_workers, length)
return math.ceil(logspace[worker_index-1])
def getSpacing(max_workers, length):
"""Returns spacing for the concept list."""
logspace = length/10*np.logspace(start=-1,stop=1,num=max_workers, endpoint=True)
# remove offset
logspace = logspace - logspace[0]
return logspace
def getDistMatrixWrapper(p):
"""Wrapper for the parallel-process-function"""
return getDistMatrix(*p)
def getMDSMatrix(dist_matrix):
"""Computes multi-dimensionally-scaled two-dimensional concept-coordinates based on a pairwise-distance-matrix"""
# use MDS to compute the relative distances of the distinct concepts
embedding = MDS(n_components=2)
dist_matrix_transformed = embedding.fit_transform(dist_matrix)
df_dist_matrix = pd.DataFrame(dist_matrix_transformed)
return df_dist_matrix
def mirrorMatrix(dist_matrix):
"""mirrors uppertriangular distance matrix along its diagonal"""
return dist_matrix + dist_matrix.T - np.diag(np.diag(dist_matrix))
def plotConcepts(df_mds_coordinates: DataFrame, concepts: list):
fig, ax = plt.subplots()
df_mds_coordinates.plot(0, 1, kind='scatter', ax=ax)
for k, v in df_mds_coordinates.iterrows():
ax.annotate(concepts[k], v)
plt.show()
def saveConceptDistancesInExcel(df_mds_coordinates: DataFrame, concepts: list):
"""Saves pairwise concept-distances to excel."""
array = df_mds_coordinates.to_numpy()
dm = distance_matrix(array,array)
df = pd.DataFrame(dm)
df.to_excel('concept_distances.xlsx')
def getRandomConcepts(concept_cnt: int,tree: treelib.Tree) -> list:
"""Returns list with concept_cnt random concepts from the given taxonomy tree."""
nodes: list[Node]
nodes = random.sample(tree.all_nodes(),concept_cnt)
return [x.identifier for x in nodes]
def getConceptCount(tree: treelib.Tree):
"""Returns the number of concepts in a taxonomy."""
return len(tree.leaves())
def setMaxIC(tree: Tree, ic_mode: str) -> float:
max_ic = 0
for node in tree.all_nodes():
concept = node.identifier
ic = getIC(concept,tree,ic_mode)
if ic > max_ic:
max_ic = ic
tree.create_node('max_ic','max_ic', data=max_ic,parent=0)
return
def getCSMatrix(concepts_1: list, concepts_2: list, tree: Tree, ic_mode, cs_mode) -> ndarray:
cs_matrix = np.zeros(shape=(len(concepts_1),len(concepts_2)))
depth = tree.depth()
for concept1 in concepts_1:
c1_index = concepts_1.index(concept1)
for concept2 in concepts_2:
c2_index = concepts_2.index(concept2)
cs_matrix[c1_index,c2_index] = getCS(concept1,concept2,tree,depth,ic_mode,cs_mode)
return cs_matrix |
<filename>view_commands.py
import sublime
import sublime_plugin
import os
import serial_constants
class SerialMonitorWriteCommand(sublime_plugin.TextCommand):
"""
Writes text (or a file) to the serial output view the command is run on
"""
def run(self, edit, **args):
"""
Runs the command to write to a serial output view
:param args: The args for writing to the view. Needs to contain:
"text": string of text to write to the view
or
"view_id": The id of the input view
"region_begin": Starting index for the input view
"region_end": Ending index for the input view
:type args: dict
:return:
"""
# Check if the end of the output file is visible. If so, enable the auto-scroll
should_autoscroll = self.view.visible_region().contains(self.view.size())
self.view.set_read_only(False)
if "text" in args:
self.view.insert(edit, self.view.size(), args["text"])
else:
view = sublime.View(args["view_id"])
begin = args["region_begin"]
end = args["region_end"]
self.view.insert(edit, self.view.size(), view.substr(sublime.Region(begin, end)))
self.view.set_read_only(True)
if should_autoscroll and not self.view.visible_region().contains(self.view.size()):
self.view.window().run_command("serial_monitor_scroll", {"view_id": self.view.id()})
class SerialMonitorEraseCommand(sublime_plugin.TextCommand):
"""
Clears the view
"""
def run(self, edit, **args):
self.view.set_read_only(False)
self.view.erase(edit, sublime.Region(0, self.view.size()))
self.view.set_read_only(True)
class SerialMonitorScrollCommand(sublime_plugin.WindowCommand):
"""
Scrolls to the end of a view
"""
def run(self, view_id):
last_focused = self.window.active_view()
view = sublime.View(view_id)
self.window.focus_view(view)
view.show(view.size())
self.window.focus_view(last_focused)
class SerialMonitorNewFilterCommand(sublime_plugin.TextCommand):
"""
Creates a new Serial Monitor Filter file with the default filter set to the text provided
"""
def run(self, edit, text="sample filter"):
folder = os.path.split(__file__)[0]
file = os.path.join(folder, "filter", "default_serial_filter.json")
with open(file) as f:
template = "".join(f.readlines())
# Escape slashes and quotes
text = text.replace("\\", "\\\\")
text = text.replace("\"", "\\\"")
template = template.replace("$1", text.strip("\r\n"))
v = self.view.window().new_file()
v.insert(edit, 0, template)
v.set_name("new filter")
v.assign_syntax("Packages/JavaScript/JSON.sublime-syntax")
class SerialMonitorNewFilterFromTextCommand(SerialMonitorNewFilterCommand):
"""
Creates a new Serial Monitor Filter file based on the text selected
"""
def run(self, edit, text=""):
sel = self.view.substr(self.view.sel()[0])
super(SerialMonitorNewFilterFromTextCommand, self).run(edit, sel)
def is_visible(self):
# Only show this command if the file syntax is a Serial Monitor syntax
# And exactly 1 region is selected that is not multi-line
if self.view.settings().get("syntax") == serial_constants.SYNTAX_FILE:
sel = self.view.sel()
if len(sel) == 1 and not sel[0].empty():
return len(self.view.substr(sel[0]).splitlines()) == 1
|
<reponame>levilucio/SyVOLT<gh_stars>1-10
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitDaughter2Woman_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitDaughter2Woman_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitDaughter2Woman_CompleteLHS, self).__init__(name='HUnitDaughter2Woman_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitDaughter2Woman_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class Family(Fam) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Family"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Fam')
# match class Child(Child) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Child"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Child')
# apply class Woman(Woman) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__Woman"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Woman')
# match association null--daughters-->nullnode
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return attr_value == "daughters" """
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__directLink_S"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Famassoc3Child')
# trace association null--trace-->nullnode
self.add_node()
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__trace_link"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Womanassoc4Child')
self['equations'].append(((2,'fullName'),('concat',((1,'firstName'),(0,'lastName')))))
# Add the edges
self.add_edges([
(0,3), # match class null(Fam) -> association daughters
(3,1), # association null -> match class null(Child)
(2,4), # apply class null(Child) -> backward_association
(4,1), # backward_associationnull -> match_class null(Child)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr13(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr14(self, attr_value, this):
return attr_value == "daughters"
# define evaluation methods for each apply association.
def constraint(self, PreNode, graph):
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.